Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 6935 → Rev 6937

/drivers/video/drm/drm_atomic.c
65,8 → 65,6
*/
state->allow_modeset = true;
 
state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
 
state->crtcs = kcalloc(dev->mode_config.num_crtc,
sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs)
83,16 → 81,6
sizeof(*state->plane_states), GFP_KERNEL);
if (!state->plane_states)
goto fail;
state->connectors = kcalloc(state->num_connector,
sizeof(*state->connectors),
GFP_KERNEL);
if (!state->connectors)
goto fail;
state->connector_states = kcalloc(state->num_connector,
sizeof(*state->connector_states),
GFP_KERNEL);
if (!state->connector_states)
goto fail;
 
state->dev = dev;
 
288,8 → 276,8
state->crtcs[index] = crtc;
crtc_state->state = state;
 
DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
crtc->base.id, crtc_state, state);
DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
crtc->base.id, crtc->name, crtc_state, state);
 
return crtc_state;
}
316,7 → 304,6
if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
return 0;
 
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
 
363,12 → 350,9
if (blob == state->mode_blob)
return 0;
 
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
 
memset(&state->mode, 0, sizeof(state->mode));
 
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(&state->mode,
381,6 → 365,7
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
420,7 → 405,6
struct drm_property_blob *mode =
drm_property_lookup_blob(dev, val);
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
if (mode)
drm_property_unreference_blob(mode);
return ret;
}
433,11 → 417,20
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
 
/*
/**
* drm_atomic_crtc_get_property - get property value from CRTC state
* @crtc: the drm CRTC to set a property on
* @state: the state object to get the property value from
* @property: the property to set
* @val: return location for the property value
*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int
drm_atomic_crtc_get_property(struct drm_crtc *crtc,
481,8 → 474,8
*/
 
if (state->active && !state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
 
491,14 → 484,30
* be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(state->enable && !state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
 
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(!state->enable && state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n",
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
 
/*
* Reject event generation for when a CRTC is off and stays off.
* It wouldn't be hard to implement this, but userspace has a track
* record of happily burning through 100% cpu (or worse, crash) when the
* display pipe is suspended. To avoid all that fun just reject updates
* that ask for events since likely that indicates a bug in the
* compositor's drawing loop. This is consistent with the vblank IOCTL
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
if (state->event && !state->active && !crtc->state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
crtc->base.id);
return -EINVAL;
}
544,8 → 553,8
state->planes[index] = plane;
plane_state->state = state;
 
DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
plane->base.id, plane_state, state);
DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
plane->base.id, plane->name, plane_state, state);
 
if (plane_state->crtc) {
struct drm_crtc_state *crtc_state;
620,11 → 629,20
}
EXPORT_SYMBOL(drm_atomic_plane_set_property);
 
/*
/**
* drm_atomic_plane_get_property - get property value from plane state
* @plane: the drm plane to set a property on
* @state: the state object to get the property value from
* @property: the property to set
* @val: return location for the property value
*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int
drm_atomic_plane_get_property(struct drm_plane *plane,
756,8 → 774,8
}
 
if (plane_switching_crtc(state->state, plane, state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
plane->base.id);
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id, plane->name);
return -EINVAL;
}
 
793,19 → 811,27
 
index = drm_connector_index(connector);
 
/*
* Construction of atomic state updates can race with a connector
* hot-add which might overflow. In this case flip the table and just
* restart the entire ioctl - no one is fast enough to livelock a cpu
* with physical hotplug events anyway.
*
* Note that we only grab the indexes once we have the right lock to
* prevent hotplug/unplugging of connectors. So removal is no problem,
* at most the array is a bit too large.
*/
if (index >= state->num_connector) {
DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
return ERR_PTR(-EAGAIN);
struct drm_connector **c;
struct drm_connector_state **cs;
int alloc = max(index + 1, config->num_connector);
 
c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
 
state->connectors = c;
memset(&state->connectors[state->num_connector], 0,
sizeof(*state->connectors) * (alloc - state->num_connector));
 
cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
 
state->connector_states = cs;
memset(&state->connector_states[state->num_connector], 0,
sizeof(*state->connector_states) * (alloc - state->num_connector));
state->num_connector = alloc;
}
 
if (state->connector_states[index])
876,11 → 902,20
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
 
/*
/**
* drm_atomic_connector_get_property - get property value from connector state
* @connector: the drm connector to set a property on
* @state: the state object to get the property value from
* @property: the property to set
* @val: return location for the property value
*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int
drm_atomic_connector_get_property(struct drm_connector *connector,
981,8 → 1016,8
}
 
if (crtc)
DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
plane_state, crtc->base.id);
DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
plane_state, crtc->base.id, crtc->name);
else
DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
plane_state);
1040,17 → 1075,28
{
struct drm_crtc_state *crtc_state;
 
if (conn_state->crtc && conn_state->crtc != crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
conn_state->crtc);
 
crtc_state->connector_mask &=
~(1 << drm_connector_index(conn_state->connector));
}
 
if (crtc) {
crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
crtc_state->connector_mask |=
1 << drm_connector_index(conn_state->connector);
}
 
conn_state->crtc = crtc;
 
if (crtc)
DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
conn_state, crtc->base.id);
DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
conn_state, crtc->base.id, crtc->name);
else
DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
conn_state);
1089,8 → 1135,8
if (ret)
return ret;
 
DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
crtc->base.id, state);
DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
 
/*
* Changed connectors are already in @state, so only need to look at the
1149,35 → 1195,6
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
 
/**
* drm_atomic_connectors_for_crtc - count number of connected outputs
* @state: atomic state
* @crtc: DRM crtc
*
* This function counts all connectors which will be connected to @crtc
* according to @state. Useful to recompute the enable state for @crtc.
*/
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
 
int i, num_connected_connectors = 0;
 
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == crtc)
num_connected_connectors++;
}
 
DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
state, num_connected_connectors, crtc->base.id);
 
return num_connected_connectors;
}
EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
 
/**
* drm_atomic_legacy_backoff - locking backoff for legacy ioctls
* @state: atomic state
*
1192,14 → 1209,9
retry:
drm_modeset_backoff(state->acquire_ctx);
 
ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
state->acquire_ctx);
ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
if (ret)
goto retry;
ret = drm_modeset_lock_all_crtcs(state->dev,
state->acquire_ctx);
if (ret)
goto retry;
}
EXPORT_SYMBOL(drm_atomic_legacy_backoff);
 
1229,8 → 1241,8
for_each_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_plane_check(plane, plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
plane->base.id);
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
plane->base.id, plane->name);
return ret;
}
}
1238,8 → 1250,8
for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_crtc_check(crtc, crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
crtc->base.id, crtc->name);
return ret;
}
}
1250,8 → 1262,8
if (!state->allow_modeset) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
}
1434,7 → 1446,7
}
 
/**
* drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
* drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
*
* @dev: drm device to check.
* @plane_mask: plane mask for planes that were updated.
/drivers/video/drm/drm_atomic_helper.c
52,6 → 52,12
* drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
* various functions to implement set_property callbacks. New drivers must not
* implement these functions themselves but must use the provided helpers.
*
* The atomic helper uses the same function table structures as all other
* modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs,
* struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It
* also shares the struct &drm_plane_helper_funcs function table with the plane
* helpers.
*/
static void
drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
80,6 → 86,26
}
}
 
static bool
check_pending_encoder_assignment(struct drm_atomic_state *state,
struct drm_encoder *new_encoder)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
 
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->best_encoder != new_encoder)
continue;
 
/* encoder already assigned and we're trying to re-steal it! */
if (connector->state->best_encoder != conn_state->best_encoder)
return false;
}
 
return true;
}
 
static struct drm_crtc *
get_current_crtc_for_encoder(struct drm_device *dev,
struct drm_encoder *encoder)
108,6 → 134,7
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int ret;
 
/*
* We can only steal an encoder coming from a connector, which means we
115,9 → 142,9
*/
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
encoder->base.id, encoder->name,
encoder_crtc->base.id);
encoder_crtc->base.id, encoder_crtc->name);
 
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state))
138,6 → 165,9
if (IS_ERR(connector_state))
return PTR_ERR(connector_state);
 
ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
if (ret)
return ret;
connector_state->best_encoder = NULL;
}
 
215,16 → 245,24
}
 
if (new_encoder == connector_state->best_encoder) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
connector_state->crtc->base.id);
connector_state->crtc->base.id,
connector_state->crtc->name);
 
return 0;
}
 
if (!check_pending_encoder_assignment(state, new_encoder)) {
DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
connector->base.id,
connector->name);
return -EINVAL;
}
 
encoder_crtc = get_current_crtc_for_encoder(state->dev,
new_encoder);
 
247,12 → 285,13
crtc_state = state->crtc_states[idx];
crtc_state->connectors_changed = true;
 
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
connector_state->crtc->base.id);
connector_state->crtc->base.id,
connector_state->crtc->name);
 
return 0;
}
265,7 → 304,7
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
int ret;
bool ret;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->mode_changed &&
336,8 → 375,8
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
}
384,14 → 423,14
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
crtc->base.id, crtc->name);
crtc_state->mode_changed = true;
}
 
if (crtc->state->enable != crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
crtc->base.id, crtc->name);
 
/*
* For clarity this assignment is done here, but
424,7 → 463,8
* crtc only changed its mode but has the same set of connectors.
*/
for_each_crtc_in_state(state, crtc, crtc_state, i) {
int num_connectors;
bool has_connectors =
!!crtc_state->connector_mask;
 
/*
* We must set ->active_changed after walking connectors for
432,8 → 472,8
* a full modeset because update_connector_routing force that.
*/
if (crtc->state->active != crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
crtc->base.id, crtc->name);
crtc_state->active_changed = true;
}
 
440,8 → 480,8
if (!drm_atomic_crtc_needs_modeset(crtc_state))
continue;
 
DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
crtc->base.id,
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
crtc->base.id, crtc->name,
crtc_state->enable ? 'y' : 'n',
crtc_state->active ? 'y' : 'n');
 
453,13 → 493,10
if (ret != 0)
return ret;
 
num_connectors = drm_atomic_connectors_for_crtc(state,
crtc);
if (crtc_state->enable != has_connectors) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
crtc->base.id, crtc->name);
 
if (crtc_state->enable != !!num_connectors) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
crtc->base.id);
 
return -EINVAL;
}
}
505,8 → 542,8
 
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
plane->base.id);
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
plane->base.id, plane->name);
return ret;
}
}
521,8 → 558,8
 
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id, crtc->name);
return ret;
}
}
635,8 → 672,8
 
funcs = crtc->helper_private;
 
DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
 
 
/* Right function depends upon target state. */
747,8 → 784,8
funcs = crtc->helper_private;
 
if (crtc->state->enable && funcs->mode_set_nofb) {
DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
 
funcs->mode_set_nofb(crtc);
}
847,8 → 884,8
funcs = crtc->helper_private;
 
if (crtc->state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
crtc->base.id);
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
 
if (funcs->enable)
funcs->enable(crtc);
909,7 → 946,21
}
}
 
static bool framebuffer_changed(struct drm_device *dev,
/**
* drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
* @dev: DRM device
* @old_state: atomic state object with old state structures
* @crtc: DRM crtc
*
* Checks whether the framebuffer used for this CRTC changes as a result of
* the atomic update. This is useful for drivers which cannot use
* drm_atomic_helper_wait_for_vblanks() and need to reimplement its
* functionality.
*
* Returns:
* true if the framebuffer changed.
*/
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state,
struct drm_crtc *crtc)
{
928,6 → 979,7
 
return false;
}
EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
 
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
962,7 → 1014,8
if (old_state->legacy_cursor_update)
continue;
 
if (!framebuffer_changed(dev, old_state, crtc))
if (!drm_atomic_helper_framebuffer_changed(dev,
old_state, crtc))
continue;
 
ret = drm_crtc_vblank_get(crtc);
1338,6 → 1391,49
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
/**
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
* @crtc: CRTC
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
*
* Disables all planes associated with the given CRTC. This can be
* used for instance in the CRTC helper disable callback to disable
* all planes before shutting down the display pipeline.
*
* If the atomic-parameter is set the function calls the CRTC's
* atomic_begin hook before and atomic_flush hook after disabling the
* planes.
*
* It is a bug to call this function without having implemented the
* ->atomic_disable() plane hook.
*/
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic)
{
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
struct drm_plane *plane;
 
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, NULL);
 
drm_for_each_plane(plane, crtc->dev) {
const struct drm_plane_helper_funcs *plane_funcs =
plane->helper_private;
 
if (plane->state->crtc != crtc || !plane_funcs)
continue;
 
WARN_ON(!plane_funcs->atomic_disable);
if (plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, NULL);
}
 
if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
crtc_funcs->atomic_flush(crtc, NULL);
}
EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
 
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
* @old_state: atomic state object with old state structures
1397,7 → 1493,7
{
int i;
 
for (i = 0; i < dev->mode_config.num_connector; i++) {
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
 
if (!connector)
1481,12 → 1577,12
drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
plane_state->crtc_w = crtc_w;
plane_state->crtc_h = crtc_h;
plane_state->crtc_w = crtc_w;
plane_state->src_x = src_x;
plane_state->src_y = src_y;
plane_state->src_w = src_w;
plane_state->src_h = src_h;
plane_state->src_w = src_w;
 
if (plane == crtc->cursor)
state->legacy_cursor_update = true;
1605,12 → 1701,12
drm_atomic_set_fb_for_plane(plane_state, NULL);
plane_state->crtc_x = 0;
plane_state->crtc_y = 0;
plane_state->crtc_w = 0;
plane_state->crtc_h = 0;
plane_state->crtc_w = 0;
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = 0;
plane_state->src_h = 0;
plane_state->src_w = 0;
 
return 0;
}
1672,7 → 1768,7
if (crtc == set->crtc)
continue;
 
if (!drm_atomic_connectors_for_crtc(state, crtc)) {
if (!crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL);
if (ret < 0)
1793,16 → 1889,16
drm_atomic_set_fb_for_plane(primary_state, set->fb);
primary_state->crtc_x = 0;
primary_state->crtc_y = 0;
primary_state->crtc_w = hdisplay;
primary_state->crtc_h = vdisplay;
primary_state->crtc_w = hdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
primary_state->src_w = vdisplay << 16;
} else {
primary_state->src_w = hdisplay << 16;
primary_state->src_h = vdisplay << 16;
primary_state->src_w = hdisplay << 16;
}
 
commit:
1814,6 → 1910,161
}
 
/**
* drm_atomic_helper_disable_all - disable all currently active outputs
* @dev: DRM device
* @ctx: lock acquisition context
*
* Loops through all connectors, finding those that aren't turned off and then
* turns them off by setting their DPMS mode to OFF and deactivating the CRTC
* that they are connected to.
*
* This is used for example in suspend/resume to disable all currently active
* functions when suspending.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
int err;
 
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = ctx;
 
drm_for_each_connector(conn, dev) {
struct drm_crtc *crtc = conn->state->crtc;
struct drm_crtc_state *crtc_state;
 
if (!crtc || conn->dpms != DRM_MODE_DPMS_ON)
continue;
 
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
 
crtc_state->active = false;
}
 
err = drm_atomic_commit(state);
 
free:
if (err < 0)
drm_atomic_state_free(state);
 
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
 
/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
* Duplicates the current atomic state, disables all active outputs and then
* returns a pointer to the original atomic state to the caller. Drivers can
* pass this pointer to the drm_atomic_helper_resume() helper upon resume to
* restore the output configuration that was active at the time the system
* entered suspend.
*
* Note that it is potentially unsafe to use this. The atomic state object
* returned by this function is assumed to be persistent. Drivers must ensure
* that this holds true. Before calling this function, drivers must make sure
* to suspend fbdev emulation so that nothing can be using the device.
*
* Returns:
* A pointer to a copy of the state before suspend on success or an ERR_PTR()-
* encoded error code on failure. Drivers should store the returned atomic
* state object and pass it to the drm_atomic_helper_resume() helper upon
* resume.
*
* See also:
* drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
* drm_atomic_helper_resume()
*/
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
int err;
 
drm_modeset_acquire_init(&ctx, 0);
 
retry:
err = drm_modeset_lock_all_ctx(dev, &ctx);
if (err < 0) {
state = ERR_PTR(err);
goto unlock;
}
 
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
goto unlock;
 
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
drm_atomic_state_free(state);
state = ERR_PTR(err);
goto unlock;
}
 
unlock:
if (PTR_ERR(state) == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
 
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
 
/**
* drm_atomic_helper_resume - subsystem-level resume helper
* @dev: DRM device
* @state: atomic state to resume to
*
* Calls drm_mode_config_reset() to synchronize hardware and software states,
* grabs all modeset locks and commits the atomic state object. This can be
* used in conjunction with the drm_atomic_helper_suspend() helper to
* implement suspend/resume for drivers that support atomic mode-setting.
*
* Returns:
* 0 on success or a negative error code on failure.
*
* See also:
* drm_atomic_helper_suspend()
*/
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_mode_config *config = &dev->mode_config;
int err;
 
drm_mode_config_reset(dev);
drm_modeset_lock_all(dev);
state->acquire_ctx = config->acquire_ctx;
err = drm_atomic_commit(state);
drm_modeset_unlock_all(dev);
 
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_resume);
 
/**
* drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc
* @property: DRM property
2047,6 → 2298,15
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
 
/* Make sure we don't accidentally do a full modeset. */
state->allow_modeset = false;
if (!crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
crtc->base.id);
ret = -EINVAL;
goto fail;
}
 
ret = drm_atomic_async_commit(state);
if (ret != 0)
goto fail;
2169,6 → 2429,12
* The simpler solution is to just reset the software state to everything off,
* which is easiest to do by calling drm_mode_config_reset(). To facilitate this
* the atomic helpers provide default reset implementations for all hooks.
*
* On the upside the precise state tracking of atomic simplifies system suspend
* and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
* is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
* For other drivers the building blocks are split out, see the documentation
* for these functions.
*/
 
/**
2180,7 → 2446,7
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state && crtc->state->mode_blob)
if (crtc->state)
drm_property_unreference_blob(crtc->state->mode_blob);
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
2248,7 → 2514,6
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob);
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
2364,6 → 2629,28
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
 
/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
*
* Initializes the newly allocated @conn_state and assigns it to
* #connector ->state, usually required when initializing the drivers
* or when called from the ->reset hook.
*
* This is useful for drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
conn_state->connector = connector;
 
connector->state = conn_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
 
/**
* drm_atomic_helper_connector_reset - default ->reset hook for connectors
* @connector: drm connector
*
2373,11 → 2660,11
*/
void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{
struct drm_connector_state *conn_state =
kzalloc(sizeof(*conn_state), GFP_KERNEL);
 
kfree(connector->state);
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
 
if (connector->state)
connector->state->connector = connector;
__drm_atomic_helper_connector_reset(connector, conn_state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
 
2426,7 → 2713,9
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states.
* duplicating their respective states. This is used for example by suspend/
* resume support code to save the state prior to suspend such that it can
* be restored upon resume.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
2438,6 → 2727,9
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
/drivers/video/drm/drm_bridge.c
31,11 → 31,11
/**
* DOC: overview
*
* drm_bridge represents a device that hangs on to an encoder. These are handy
* when a regular drm_encoder entity isn't enough to represent the entire
* struct &drm_bridge represents a device that hangs on to an encoder. These are
* handy when a regular &drm_encoder entity isn't enough to represent the entire
* encoder chain.
*
* A bridge is always associated to a single drm_encoder at a time, but can be
* A bridge is always attached to a single &drm_encoder at a time, but can be
* either connected to it directly, or through an intermediate bridge:
*
* encoder ---> bridge B ---> bridge A
46,11 → 46,16
* The driver using the bridge is responsible to make the associations between
* the encoder and bridges. Once these links are made, the bridges will
* participate along with encoder functions to perform mode_set/enable/disable
* through the ops provided in drm_bridge_funcs.
* through the ops provided in &drm_bridge_funcs.
*
* drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
* crtcs, encoders or connectors. They just provide additional hooks to get the
* desired output at the end of the encoder chain.
* CRTCs, encoders or connectors and hence are not visible to userspace. They
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
*
* Bridges can also be chained up using the next pointer in struct &drm_bridge.
*
* Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
 
static DEFINE_MUTEX(bridge_lock);
122,34 → 127,12
/**
* DOC: bridge callbacks
*
* The drm_bridge_funcs ops are populated by the bridge driver. The drm
* internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c
* These helpers call a specific drm_bridge_funcs op for all the bridges
* The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
* internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
* These helpers call a specific &drm_bridge_funcs op for all the bridges
* during encoder configuration.
*
* When creating a bridge driver, one can implement drm_bridge_funcs op with
* the help of these rough rules:
*
* pre_enable: this contains things needed to be done for the bridge before
* its clock and timings are enabled by its source. For a bridge, its source
* is generally the encoder or bridge just before it in the encoder chain.
*
* enable: this contains things needed to be done for the bridge once its
* source is enabled. In other words, enable is called once the source is
* ready with clock and timing needed by the bridge.
*
* disable: this contains things needed to be done for the bridge assuming
* that its source is still enabled, i.e. clock and timings are still on.
*
* post_disable: this contains things needed to be done for the bridge once
* its source is disabled, i.e. once clocks and timings are off.
*
* mode_fixup: this should fixup the given mode for the bridge. It is called
* after the encoder's mode fixup. mode_fixup can also reject a mode completely
* if it's unsuitable for the hardware.
*
* mode_set: this sets up the mode for the bridge. It assumes that its source
* (an encoder or a bridge) has set the mode too.
* For detailed specification of the bridge callbacks see &drm_bridge_funcs.
*/
 
/**
159,7 → 142,7
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
* Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the
* Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
186,11 → 169,11
EXPORT_SYMBOL(drm_bridge_mode_fixup);
 
/**
* drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all
* drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all
* bridges in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder
* Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called before
* calling the encoder's prepare op.
*
208,11 → 191,11
EXPORT_SYMBOL(drm_bridge_disable);
 
/**
* drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for
* drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for
* all bridges in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'post_disable' drm_bridge_funcs op for all the bridges in the
* Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last. These are called
* after completing the encoder's prepare op.
*
236,7 → 219,7
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
* Calls 'mode_set' drm_bridge_funcs op for all the bridges in the
* Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
256,11 → 239,11
EXPORT_SYMBOL(drm_bridge_mode_set);
 
/**
* drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all
* drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all
* bridges in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder
* Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called
* before calling the encoder's commit op.
*
278,11 → 261,11
EXPORT_SYMBOL(drm_bridge_pre_enable);
 
/**
* drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges
* drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges
* in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder
* Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the first bridge to the last. These are called
* after completing the encoder's commit op.
*
/drivers/video/drm/drm_crtc.c
45,7 → 45,7
 
static struct drm_framebuffer *
internal_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
const struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
 
/* Avoid boilerplate. I'm tired of typing. */
649,6 → 649,18
 
DEFINE_WW_CLASS(crtc_ww_class);
 
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_crtc *tmp;
 
drm_for_each_crtc(tmp, dev) {
num++;
}
 
return num;
}
 
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
657,6 → 669,7
* @primary: Primary plane for CRTC
* @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
* @name: printf style format string for the CRTC name, or NULL for default name
*
* Inits a new object created as base part of a driver crtc object.
*
666,7 → 679,8
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs)
const struct drm_crtc_funcs *funcs,
const char *name, ...)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
682,6 → 696,21
if (ret)
return ret;
 
if (name) {
va_list ap;
 
va_start(ap, name);
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
va_end(ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
}
if (!crtc->name) {
drm_mode_object_put(dev, &crtc->base);
return -ENOMEM;
}
 
crtc->base.properties = &crtc->properties;
 
list_add_tail(&crtc->head, &config->crtc_list);
728,6 → 757,8
if (crtc->state && crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
 
kfree(crtc->name);
 
memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
887,12 → 918,19
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
 
connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
if (connector->connector_id < 0) {
ret = connector->connector_id;
goto out_put;
}
 
connector->connector_type = connector_type;
connector->connector_type_id =
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
goto out_put;
goto out_put_id;
}
connector->name =
kasprintf(GFP_KERNEL, "%s-%d",
900,7 → 938,7
connector->connector_type_id);
if (!connector->name) {
ret = -ENOMEM;
goto out_put;
goto out_put_type_id;
}
 
INIT_LIST_HEAD(&connector->probed_modes);
928,7 → 966,12
}
 
connector->debugfs_entry = NULL;
 
out_put_type_id:
if (ret)
ida_remove(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
ida_remove(&config->connector_ida, connector->connector_id);
out_put:
if (ret)
drm_mode_object_put(dev, &connector->base);
965,6 → 1008,9
ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
 
ida_remove(&dev->mode_config.connector_ida,
connector->connector_id);
 
kfree(connector->display_info.bus_formats);
drm_mode_object_put(dev, &connector->base);
kfree(connector->name);
982,32 → 1028,6
EXPORT_SYMBOL(drm_connector_cleanup);
 
/**
* drm_connector_index - find the index of a registered connector
* @connector: connector to find index for
*
* Given a registered connector, return the index of that connector within a DRM
* device's list of connectors.
*/
unsigned int drm_connector_index(struct drm_connector *connector)
{
unsigned int index = 0;
struct drm_connector *tmp;
struct drm_mode_config *config = &connector->dev->mode_config;
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
drm_for_each_connector(tmp, connector->dev) {
if (tmp == connector)
return index;
 
index++;
}
 
BUG();
}
EXPORT_SYMBOL(drm_connector_index);
 
/**
* drm_connector_register - register a connector
* @connector: the connector to register
*
1075,6 → 1095,7
* @encoder: the encoder to init
* @funcs: callbacks for this encoder
* @encoder_type: user visible type of the encoder
* @name: printf style format string for the encoder name, or NULL for default name
*
* Initialises a preallocated encoder. Encoder should be
* subclassed as part of driver encoder objects.
1085,7 → 1106,7
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type)
int encoder_type, const char *name, ...)
{
int ret;
 
1098,9 → 1119,17
encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
if (name) {
va_list ap;
 
va_start(ap, name);
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
va_end(ap);
} else {
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
encoder->base.id);
}
if (!encoder->name) {
ret = -ENOMEM;
goto out_put;
1141,6 → 1170,18
}
EXPORT_SYMBOL(drm_encoder_cleanup);
 
static unsigned int drm_num_planes(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_plane *tmp;
 
drm_for_each_plane(tmp, dev) {
num++;
}
 
return num;
}
 
/**
* drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
1150,6 → 1191,7
* @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats
* @type: type of plane (overlay, primary, cursor)
* @name: printf style format string for the plane name, or NULL for default name
*
* Initializes a plane object of type @type.
*
1160,7 → 1202,8
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type)
enum drm_plane_type type,
const char *name, ...)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
1182,6 → 1225,22
return -ENOMEM;
}
 
if (name) {
va_list ap;
 
va_start(ap, name);
plane->name = kvasprintf(GFP_KERNEL, name, ap);
va_end(ap);
} else {
plane->name = kasprintf(GFP_KERNEL, "plane-%d",
drm_num_planes(dev));
}
if (!plane->name) {
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
return -ENOMEM;
}
 
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
1240,7 → 1299,7
 
type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, type);
formats, format_count, type, NULL);
}
EXPORT_SYMBOL(drm_plane_init);
 
1272,6 → 1331,8
if (plane->state && plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane->state);
 
kfree(plane->name);
 
memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
1802,7 → 1863,8
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
drm_for_each_crtc(crtc, dev) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
2649,7 → 2711,7
ret = -ENOENT;
goto out;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
2685,6 → 2747,8
goto out;
}
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
/*
* Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a
3144,7 → 3208,7
 
static struct drm_framebuffer *
internal_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
const struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
3418,6 → 3482,7
return ret;
}
 
 
/**
* drm_fb_release - remove and free the FBs on this file
* @priv: drm file for the ioctl
4565,8 → 4630,6
 
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
ret = 0;
if (connector->funcs->dpms)
ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
4765,6 → 4828,20
{
int i;
 
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
* direct assignment of connector->encoder = encoder. This connection
* is a logical one and the responsibility of the core, so drivers are
* expected not to mess with this.
*
* Note that the error return should've been enough here, but a large
* majority of drivers ignores the return value, so add in a big WARN
* to get people's attention.
*/
if (WARN_ON(connector->encoder))
return -EINVAL;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id;
5245,6 → 5322,7
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
idr_init(&dev->mode_config.tile_idr);
ida_init(&dev->mode_config.connector_ida);
 
drm_modeset_lock_all(dev);
drm_mode_create_standard_properties(dev);
5325,6 → 5403,7
crtc->funcs->destroy(crtc);
}
 
ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
/drivers/video/drm/drm_crtc_helper.c
51,6 → 51,11
* the same callbacks which drivers can use to e.g. restore the modeset
* configuration on resume with drm_helper_resume_force_mode().
*
* Note that this helper library doesn't track the current power state of CRTCs
* and encoders. It can call callbacks like ->dpms() even though the hardware is
* already in the desired state. This deficiency has been fixed in the atomic
* helpers.
*
* The driver callbacks are mostly compatible with the atomic modeset helpers,
* except for the handling of the primary plane: Atomic helpers require that the
* primary plane is implemented as a real standalone plane and not directly tied
62,6 → 67,11
* converting to the plane helpers). New drivers must not use these functions
* but need to implement the atomic interface instead, potentially using the
* atomic helpers for that.
*
* These legacy modeset helpers use the same function table structures as
* all other modesetting helpers. See the documentation for struct
* &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
* &drm_connector_helper_funcs.
*/
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
206,8 → 216,8
* @dev: DRM device
*
* This function walks through the entire mode setting configuration of @dev. It
* will remove any crtc links of unused encoders and encoder links of
* disconnected connectors. Then it will disable all unused encoders and crtcs
* will remove any CRTC links of unused encoders and encoder links of
* disconnected connectors. Then it will disable all unused encoders and CRTCs
* either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF.
*/
329,7 → 339,7
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
crtc->hwmode = *adjusted_mode;
 
445,12 → 455,37
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
* Setup a new configuration, provided by the upper layers (either an ioctl call
* from userspace or internally e.g. from the fbdev support code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
* The drm_crtc_helper_set_config() helper function implements the set_config
* callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers.
*
* It first tries to locate the best encoder for each connector by calling the
* connector ->best_encoder() (struct &drm_connector_helper_funcs) helper
* operation.
*
* After locating the appropriate encoders, the helper function will call the
* mode_fixup encoder and CRTC helper operations to adjust the requested mode,
* or reject it completely in which case an error will be returned to the
* application. If the new configuration after mode adjustment is identical to
* the current configuration the helper function will return without performing
* any other operation.
*
* If the adjusted mode is identical to the current mode but changes to the
* frame buffer need to be applied, the drm_crtc_helper_set_config() function
* will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper
* operation.
*
* If the adjusted mode differs from the current mode, or if the
* ->mode_set_base() helper operation is not provided, the helper function
* performs a full mode set sequence by calling the ->prepare(), ->mode_set()
* and ->commit() CRTC and encoder helper operations, in that order.
* Alternatively it can also use the dpms and disable helper operations. For
* details see struct &drm_crtc_helper_funcs and struct
* &drm_encoder_helper_funcs.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, for which this function is unsuitable. Instead drivers should use
* drm_atomic_helper_set_config().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
484,11 → 519,13
set->fb = NULL;
 
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id,
DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->crtc->name,
set->fb->base.id,
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
set->crtc->base.id, set->crtc->name);
drm_crtc_helper_disable(set->crtc);
return 0;
}
628,9 → 665,9
connector->encoder->crtc = new_crtc;
}
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
connector->base.id, connector->name,
new_crtc->base.id);
new_crtc->base.id, new_crtc->name);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.id, connector->name);
650,8 → 687,8
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
set->crtc->base.id, set->crtc->name);
set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
758,11 → 795,19
* @connector: affected connector
* @mode: DPMS mode
*
* This is the main helper function provided by the crtc helper framework for
* The drm_helper_connector_dpms() helper function implements the ->dpms()
* callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers.
*
* This is the main helper function provided by the CRTC helper framework for
* implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately.
* state for all encoders and CRTCs in the output mesh and calls the ->dpms()
* callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct
* &drm_encoder_helper_funcs appropriately.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, for which this function is unsuitable. Instead drivers should use
* drm_atomic_helper_connector_dpms().
*
* Returns:
* Always returns 0.
*/
818,7 → 863,7
* metadata fields.
*/
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd)
const struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
 
855,6 → 900,12
* due to slight differences in allocating shared resources when the
* configuration is restored in a different order than when userspace set it up)
* need to use their own restore logic.
*
* This function is deprecated. New drivers should implement atomic mode-
* setting and use the atomic suspend/resume helpers.
*
* See also:
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
void drm_helper_resume_force_mode(struct drm_device *dev)
{
913,9 → 964,9
* @old_fb: previous framebuffer
*
* This function implements a callback useable as the ->mode_set callback
* required by the crtc helpers. Besides the atomic plane helper functions for
* required by the CRTC helpers. Besides the atomic plane helper functions for
* the primary plane the driver must also provide the ->mode_set_nofb callback
* to set up the crtc.
* to set up the CRTC.
*
* This is a transitional helper useful for converting drivers to the atomic
* interfaces.
979,7 → 1030,7
* @old_fb: previous framebuffer
*
* This function implements a callback useable as the ->mode_set_base used
* required by the crtc helpers. The driver must provide the atomic plane helper
* required by the CRTC helpers. The driver must provide the atomic plane helper
* functions for the primary plane.
*
* This is a transitional helper useful for converting drivers to the atomic
/drivers/video/drm/drm_dp_mst_topology.c
21,10 → 21,6
*/
 
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/errno.h>
674,7 → 670,9
}
 
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
u8 vcpi, uint16_t pbn)
u8 vcpi, uint16_t pbn,
u8 number_sdp_streams,
u8 *sdp_stream_sink)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
682,6 → 680,9
req.u.allocate_payload.port_number = port_num;
req.u.allocate_payload.vcpi = vcpi;
req.u.allocate_payload.pbn = pbn;
req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
return 0;
916,7 → 917,6
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
}
kfree(port);
}
1162,9 → 1162,7
drm_dp_put_port(port);
goto out;
}
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
1674,6 → 1672,8
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
int len, ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
 
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
1696,10 → 1696,13
goto fail_put;
}
 
for (i = 0; i < port->num_sdp_streams; i++)
sinks[i] = i;
 
txmsg->dst = mstb;
len = build_allocate_payload(txmsg, port_num,
id,
pbn);
pbn, port->num_sdp_streams, sinks);
 
drm_dp_queue_down_tx(mgr, txmsg);
 
1802,6 → 1805,7
return -EINVAL;
}
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
1817,9 → 1821,10
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
mgr->payloads[i].start_slot = 0;
}
1955,7 → 1960,7
{
struct drm_dp_sideband_msg_reply_body reply;
 
reply.reply_type = 1;
reply.reply_type = 0;
reply.req_type = req_type;
drm_dp_encode_sideband_reply(&reply, msg);
return 0;
2411,6 → 2416,27
EXPORT_SYMBOL(drm_dp_mst_detect_port);
 
/**
* drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
* @mgr: manager for this port
* @port: unverified pointer to a port.
*
* This returns whether the port supports audio or not.
*/
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
bool ret = false;
 
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return ret;
ret = port->has_audio;
drm_dp_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
 
/**
* drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
2435,6 → 2461,7
edid = drm_get_edid(connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port);
return edid;
}
2821,6 → 2848,9
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
mgr->conn_base_id = conn_base_id;
if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
return -EINVAL;
mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
if (!mgr->payloads)
return -ENOMEM;
2828,7 → 2858,9
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
test_calc_pbn_mode();
if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
 
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
/drivers/video/drm/drm_edid.c
73,10 → 73,6
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
 
struct detailed_mode_closure {
struct drm_connector *connector;
103,9 → 99,6
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
 
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
 
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
119,9 → 112,6
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
 
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
 
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
149,9 → 139,6
 
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
};
 
/*
650,8 → 637,12
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*
* Index using the VIC.
*/
static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
1000,9 → 991,11
};
 
/*
* HDMI 1.4 4k modes.
* HDMI 1.4 4k modes. Index using the VIC.
*/
static const struct drm_display_mode edid_4k_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 3840x2160@30Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4016, 4104, 4400, 0,
2558,6 → 2551,33
return clock;
}
 
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
u8 vic;
 
if (!to_match->clock)
return 0;
 
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
unsigned int clock1, clock2;
 
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
 
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
 
if (drm_mode_equal_no_clocks(to_match, cea_mode))
return vic;
}
 
return 0;
}
 
/**
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
2567,13 → 2587,13
*/
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
u8 mode;
u8 vic;
 
if (!to_match->clock)
return 0;
 
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
unsigned int clock1, clock2;
 
/* Check both 60Hz and 59.94Hz */
2583,12 → 2603,17
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1;
return vic;
}
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
 
static bool drm_valid_cea_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
 
/**
* drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
* the input VIC from the CEA mode list
2598,10 → 2623,7
*/
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
/* return picture aspect ratio for video_code - 1 to access the
* right array element
*/
return edid_cea_modes[video_code-1].picture_aspect_ratio;
return edid_cea_modes[video_code].picture_aspect_ratio;
}
EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
 
2622,6 → 2644,33
return cea_mode_alternate_clock(hdmi_mode);
}
 
static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance)
{
u8 vic;
 
if (!to_match->clock)
return 0;
 
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
 
/* Make sure to also match alternate clocks */
clock1 = hdmi_mode->clock;
clock2 = hdmi_mode_alternate_clock(hdmi_mode);
 
if (abs(to_match->clock - clock1) > clock_tolerance &&
abs(to_match->clock - clock2) > clock_tolerance)
continue;
 
if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
return vic;
}
 
return 0;
}
 
/*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode
2632,13 → 2681,13
*/
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
{
u8 mode;
u8 vic;
 
if (!to_match->clock)
return 0;
 
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2;
 
/* Make sure to also match alternate clocks */
2648,11 → 2697,16
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1;
return vic;
}
return 0;
}
 
static bool drm_valid_hdmi_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes);
}
 
static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{
2672,16 → 2726,16
list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode;
u8 mode_idx = drm_match_cea_mode(mode) - 1;
u8 vic = drm_match_cea_mode(mode);
unsigned int clock1, clock2;
 
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
cea_mode = &edid_cea_modes[mode_idx];
if (drm_valid_cea_vic(vic)) {
cea_mode = &edid_cea_modes[vic];
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
mode_idx = drm_match_hdmi_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
cea_mode = &edid_4k_modes[mode_idx];
vic = drm_match_hdmi_mode(mode);
if (drm_valid_hdmi_vic(vic)) {
cea_mode = &edid_4k_modes[vic];
clock2 = hdmi_mode_alternate_clock(cea_mode);
}
}
2732,17 → 2786,17
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
u8 cea_mode;
u8 vic;
 
if (video_db == NULL || video_index >= video_len)
return NULL;
 
/* CEA modes are numbered 1..127 */
cea_mode = (video_db[video_index] & 127) - 1;
if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
vic = (video_db[video_index] & 127);
if (!drm_valid_cea_vic(vic))
return NULL;
 
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
if (!newmode)
return NULL;
 
2837,8 → 2891,7
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
 
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
if (!drm_valid_hdmi_vic(vic)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
return 0;
}
3129,20 → 3182,24
{
const struct drm_display_mode *cea_mode;
int clock1, clock2, clock;
u8 mode_idx;
u8 vic;
const char *type;
 
mode_idx = drm_match_cea_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
/*
* allow 5kHz clock difference either way to account for
* the 10kHz clock resolution limit of detailed timings.
*/
vic = drm_match_cea_mode_clock_tolerance(mode, 5);
if (drm_valid_cea_vic(vic)) {
type = "CEA";
cea_mode = &edid_cea_modes[mode_idx];
cea_mode = &edid_cea_modes[vic];
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
mode_idx = drm_match_hdmi_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
vic = drm_match_hdmi_mode_clock_tolerance(mode, 5);
if (drm_valid_hdmi_vic(vic)) {
type = "HDMI";
cea_mode = &edid_4k_modes[mode_idx];
cea_mode = &edid_4k_modes[vic];
clock1 = cea_mode->clock;
clock2 = hdmi_mode_alternate_clock(cea_mode);
} else {
3160,7 → 3217,7
return;
 
DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
type, mode_idx + 1, mode->clock, clock);
type, vic, mode->clock, clock);
mode->clock = clock;
}
 
3833,15 → 3890,9
 
drm_add_display_info(edid, &connector->display_info, connector);
 
if (quirks & EDID_QUIRK_FORCE_6BPC)
connector->display_info.bpc = 6;
 
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
 
if (quirks & EDID_QUIRK_FORCE_10BPC)
connector->display_info.bpc = 10;
 
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;
 
/drivers/video/drm/drm_fb_helper.c
944,7 → 944,7
goto fail;
 
plane = mode_set->crtc->primary;
plane_mask |= drm_plane_index(plane);
plane_mask |= (1 << drm_plane_index(plane));
plane->old_fb = plane->fb;
}
 
/drivers/video/drm/drm_gem.c
170,6 → 170,15
}
EXPORT_SYMBOL(drm_gem_private_object_init);
 
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
/*
* Note: obj->dma_buf can't disappear as long as we still hold a
* handle reference in obj->handle_count.
*/
}
 
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
195,6 → 204,9
static void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
bool final = false;
 
if (WARN_ON(obj->handle_count == 0))
return;
 
204,22 → 216,48
* checked for a name
*/
 
mutex_lock(&obj->dev->object_name_lock);
mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj);
final = true;
}
mutex_unlock(&obj->dev->object_name_lock);
mutex_unlock(&dev->object_name_lock);
 
if (final)
drm_gem_object_unreference_unlocked(obj);
}
 
/*
* Called at device or object close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
 
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
// drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
 
drm_gem_object_handle_unreference_unlocked(obj);
 
return 0;
}
 
/**
* drm_gem_handle_delete - deletes the given file-private handle
* @filp: drm file-private structure to use for the handle look up
* @handle: userspace handle to delete
*
* Removes the GEM handle from the @filp lookup table and if this is the last
* handle also cleans up linked resources like GEM names.
* Removes the GEM handle from the @filp lookup table which has been added with
* drm_gem_handle_create(). If this is the last handle also cleans up linked
* resources like GEM names.
*/
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
250,12 → 288,7
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
 
// drm_vma_node_revoke(&obj->vma_node, filp->filp);
 
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
 
drm_gem_object_handle_unreference_unlocked(obj);
drm_gem_object_release_handle(handle, obj, filp);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
286,6 → 319,10
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
* importing an object from either an flink name or a dma-buf.
*
* Handles must be release again through drm_gem_handle_delete(). This is done
* when userspace closes @file_priv for all attached handles, or through the
* GEM_CLOSE ioctl for individual handles.
*/
int
drm_gem_handle_create_tail(struct drm_file *file_priv,
293,9 → 330,12
u32 *handlep)
{
struct drm_device *dev = obj->dev;
u32 handle;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_reference(obj);
 
/*
* Get the user-visible handle using idr. Preload and perform
305,15 → 345,15
spin_lock(&file_priv->table_lock);
 
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
drm_gem_object_reference(obj);
obj->handle_count++;
 
spin_unlock(&file_priv->table_lock);
idr_preload_end();
 
mutex_unlock(&dev->object_name_lock);
if (ret < 0)
goto err_unref;
 
*handlep = ret;
handle = ret;
 
// ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
// if (ret) {
327,6 → 367,7
goto err_revoke;
}
 
*handlep = handle;
return 0;
 
err_revoke:
333,7 → 374,7
// drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
err_remove:
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, *handlep);
idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock);
err_unref:
drm_gem_object_handle_unreference_unlocked(obj);
521,7 → 562,17
EXPORT_SYMBOL(drm_gem_put_pages);
#endif
 
/** Returns a reference to the object named by the handle. */
/**
* drm_gem_object_lookup - look up a GEM object from it's handle
* @dev: DRM device
* @filp: DRM file private date
* @handle: userspace handle
*
* Returns:
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
u32 handle)
595,7 → 646,6
return -ENOENT;
 
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
/* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) {
ret = -ENOENT;
603,7 → 653,7
}
 
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
if (ret < 0)
goto err;
 
614,9 → 664,12
ret = 0;
 
err:
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
 
// printf("%s object %p name %d refcount %d\n",
// __FUNCTION__, obj, obj->name, obj->refcount.refcount);
 
return ret;
}
 
661,6 → 714,9
args->handle = handle;
args->size = obj->size;
 
// printf("%s object %p handle %d refcount %d\n",
// __FUNCTION__, obj, handle, obj->refcount.refcount);
 
return 0;
}
 
680,27 → 736,6
spin_lock_init(&file_private->table_lock);
}
 
/*
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
 
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
 
drm_gem_object_handle_unreference_unlocked(obj);
 
return 0;
}
 
/**
* drm_gem_release - release file-private GEM resources
* @dev: drm_device which is being closed by userspace
/drivers/video/drm/drm_mipi_dsi.c
25,7 → 25,6
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h>
 
#include <linux/device.h>
/drivers/video/drm/drm_modes.c
708,7 → 708,8
}
EXPORT_SYMBOL(drm_mode_set_name);
 
/** drm_mode_hsync - get the hsync of a mode
/**
* drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
* Returns:
917,6 → 918,23
} else if (mode1->clock != mode2->clock)
return false;
 
return drm_mode_equal_no_clocks(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
 
/**
* drm_mode_equal_no_clocks - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks.
*
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
return false;
923,7 → 941,7
 
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
 
/**
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
1056,7 → 1074,7
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
MODE_STATUS(UNVERIFIED),
MODE_STATUS(STALE),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
};
1154,7 → 1172,6
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
* @merge_type_bits: whether to merge or overwrite type bits
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
1163,34 → 1180,49
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
void drm_mode_connector_list_update(struct drm_connector *connector,
bool merge_type_bits)
void drm_mode_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
int found_it;
 
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
 
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) {
struct drm_display_mode *mode;
bool found_it = false;
 
/* go through current modes checking for the new probed mode */
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_equal(pmode, mode)) {
found_it = 1;
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
if (merge_type_bits)
if (!drm_mode_equal(pmode, mode))
continue;
 
found_it = true;
 
/*
* If the old matching mode is stale (ie. left over
* from a previous probe) just replace it outright.
* Otherwise just merge the type bits between all
* equal probed modes.
*
* If two probed modes are considered equal, pick the
* actual timings from the one that's marked as
* preferred (in case the match isn't 100%). If
* multiple or zero preferred modes are present, favor
* the mode added to the probed_modes list first.
*/
if (mode->status == MODE_STALE) {
drm_mode_copy(mode, pmode);
} else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 &&
(pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) {
pmode->type |= mode->type;
drm_mode_copy(mode, pmode);
} else {
mode->type |= pmode->type;
else
mode->type = pmode->type;
}
 
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
}
}
 
if (!found_it) {
list_move_tail(&pmode->head, &connector->modes);
1401,13 → 1433,6
return NULL;
 
mode->type |= DRM_MODE_TYPE_USERDEF;
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
mode->hdisplay = 1366;
mode->hsync_start--;
mode->hsync_end--;
drm_mode_set_name(mode);
}
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
1494,10 → 1519,8
if (out->status != MODE_OK)
goto out;
 
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
 
ret = 0;
 
out:
return ret;
}
}
/drivers/video/drm/drm_modeset_lock.c
48,9 → 48,7
* goto retry;
* }
* }
*
* ... do stuff ...
*
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*/
57,11 → 55,18
 
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
* @dev: DRM device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
* scheme isn't (yet) implemented. Locks must be dropped by calling the
* drm_modeset_unlock_all() function.
*
* This function is deprecated. It allocates a lock acquisition context and
* stores it in the DRM device's ->mode_config. This facilitate conversion of
* existing code because it removes the need to manually deal with the
* acquisition context, but it is also brittle because the context is global
* and care must be taken not to nest calls. New code should use the
* drm_modeset_lock_all_ctx() function and pass in the context explicitly.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
78,39 → 83,43
drm_modeset_acquire_init(ctx, 0);
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret < 0) {
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
 
drm_modeset_acquire_fini(ctx);
kfree(ctx);
return;
}
 
WARN_ON(config->acquire_ctx);
 
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
/*
* We hold the locks now, so it is safe to stash the acquisition
* context for drm_modeset_unlock_all().
*/
config->acquire_ctx = ctx;
 
drm_warn_on_modeset_not_all_locked(dev);
 
return;
 
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
 
kfree(ctx);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
 
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
* @dev: DRM device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
* This function drops all modeset locks taken by a previous call to the
* drm_modeset_lock_all() function.
*
* This function is deprecated. It uses the lock acquisition context stored
* in the DRM device's ->mode_config. This facilitates conversion of existing
* code because it removes the need to manually deal with the acquisition
* context, but it is also brittle because the context is global and care must
* be taken not to nest calls. New code should pass the acquisition context
* directly to the drm_modeset_drop_locks() function.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
431,15 → 440,35
}
EXPORT_SYMBOL(drm_modeset_unlock);
 
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
* related locks. */
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
/**
* drm_modeset_lock_all_ctx - take all modeset locks
* @dev: DRM device
* @ctx: lock acquisition context
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
*
* Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
* since that lock isn't required for modeset state changes. Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ctx.
*
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret = 0;
int ret;
 
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
if (ret)
return ret;
 
drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
454,4 → 483,4
 
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
/drivers/video/drm/drm_plane_helper.c
57,6 → 57,10
* by the atomic helpers.
*
* Again drivers are strongly urged to switch to the new interfaces.
*
* The plane helpers share the function table structures with other helpers,
* specifically also the atomic helpers. See struct &drm_plane_helper_funcs for
* the details.
*/
 
/*
164,6 → 168,8
vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
DRM_DEBUG_KMS("Invalid scaling of plane\n");
drm_rect_debug_print("src: ", src, true);
drm_rect_debug_print("dst: ", dest, false);
return -ERANGE;
}
 
180,6 → 186,8
 
if (!can_position && !drm_rect_equals(dest, clip)) {
DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dest, false);
drm_rect_debug_print("clip: ", clip, false);
return -EINVAL;
}
 
367,7 → 375,7
&drm_primary_helper_funcs,
safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats),
DRM_PLANE_TYPE_PRIMARY);
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
394,7 → 402,8
struct drm_plane *primary;
 
primary = create_primary_plane(dev);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
NULL);
}
EXPORT_SYMBOL(drm_crtc_init);
 
/drivers/video/drm/drm_probe_helper.c
53,6 → 53,9
* This helper library can be used independently of the modeset helper library.
* Drivers can also overwrite different parts e.g. use their own hotplug
* handling code to avoid probing unrelated outputs.
*
* The probe helpers share the function table structures with other display
* helper libraries. See struct &drm_connector_helper_funcs for the details.
*/
 
static bool drm_kms_helper_poll = true;
126,9 → 129,64
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
 
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector in struct
* &drm_connector_helper_funcs try to detect all valid modes. Modes will first
* be added to the connector's probed_modes list, then culled (based on validity
* and the @maxX, @maxY parameters) and put into the normal modes list.
*
* Intended to be used as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the CRTC helpers for output mode
* filtering and detection.
*
* The basic procedure is as follows
*
* 1. All modes currently on the connector's modes list are marked as stale
*
* 2. New modes are added to the connector's probed_modes list with
* drm_mode_probed_add(). New modes start their life with status as OK.
* Modes are added from a single source using the following priority order.
*
* - debugfs 'override_edid' (used for testing only)
* - firmware EDID (drm_load_edid_firmware())
* - connector helper ->get_modes() vfunc
* - if the connector status is connector_status_connected, standard
* VESA DMT modes up to 1024x768 are automatically added
* (drm_add_modes_noedid())
*
* Finally modes specified via the kernel command line (video=...) are
* added in addition to what the earlier probes produced
* (drm_helper_probe_add_cmdline_mode()). These modes are generated
* using the VESA GTF/CVT formulas.
*
* 3. Modes are moved from the probed_modes list to the modes list. Potential
* duplicates are merged together (see drm_mode_connector_list_update()).
* After this step the probed_modes list will be empty again.
*
* 4. Any non-stale mode on the modes list then undergoes validation
*
* - drm_mode_validate_basic() performs basic sanity checks
* - drm_mode_validate_size() filters out modes larger than @maxX and @maxY
* (if specified)
* - drm_mode_validate_flag() checks the modes againt basic connector
* capabilites (interlace_allowed,doublescan_allowed,stereo_allowed)
* - the optional connector ->mode_valid() helper can perform driver and/or
* hardware specific checks
*
* 5. Any mode whose status is not OK is pruned from the connector's modes list,
* accompanied by a debug message indicating the reason for the mode's
* rejection (see drm_mode_prune_invalid()).
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
143,10 → 201,12
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name);
/* set all modes to the unverified state */
/* set all old modes to the stale state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
mode->status = MODE_STALE;
 
old_status = connector->status;
 
if (connector->force) {
if (connector->force == DRM_FORCE_ON ||
connector->force == DRM_FORCE_ON_DIGITAL)
156,9 → 216,8
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, true);
}
 
/*
* Normally either the driver's hpd code or the poll loop should
167,10 → 226,11
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
old_status, connector->status);
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
 
/*
* The hotplug event code might call into the fb
183,7 → 243,6
schedule_delayed_work(&dev->mode_config.output_poll_work,
0);
}
}
 
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
199,17 → 258,16
goto prune;
}
 
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
{
if (connector->override_edid) {
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
count = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
} else
} else {
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
count = (*connector_funcs->get_modes)(connector);
}
 
219,7 → 277,7
if (count == 0)
goto prune;
 
drm_mode_connector_list_update(connector, merge_type_bits);
drm_mode_connector_list_update(connector);
 
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
263,49 → 321,9
 
return count;
}
 
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
/**
* drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* This operates like drm_hehlper_probe_single_connector_modes except it
* replaces the mode bits instead of merging them for preferred modes.
*/
int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
 
/**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed
*
/drivers/video/drm/drm_rect.c
275,22 → 275,23
 
/**
* drm_rect_debug_print - print the rectangle information
* @prefix: prefix string
* @r: rectangle to print
* @fixed_point: rectangle is in 16.16 fixed point format
*/
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point)
{
int w = drm_rect_width(r);
int h = drm_rect_height(r);
 
if (fixed_point)
DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix,
w >> 16, ((w & 0xffff) * 15625) >> 10,
h >> 16, ((h & 0xffff) * 15625) >> 10,
r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
else
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);
 
/drivers/video/drm/i915/intel_dsi_cmd.c
File deleted
/drivers/video/drm/i915/intel_dsi_cmd.h
File deleted
/drivers/video/drm/i915/kos_gem_fb.c
File deleted
/drivers/video/drm/i915/Makefile
80,9 → 80,9
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dp_link_training.c \
intel_dp_mst.c \
intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_panel_vbt.c \
intel_dsi_pll.c \
intel_dvo.c \
/drivers/video/drm/i915/Makefile.lto
80,9 → 80,9
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dp_link_training.c \
intel_dp_mst.c \
intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_panel_vbt.c \
intel_dsi_pll.c \
intel_dvo.c \
/drivers/video/drm/i915/dvo.h
32,7 → 32,8
const char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
i915_reg_t dvo_reg;
i915_reg_t dvo_srcdim_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
128,11 → 129,11
void (*dump_regs)(struct intel_dvo_device *dvo);
};
 
extern struct intel_dvo_dev_ops sil164_ops;
extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops;
extern const struct intel_dvo_dev_ops sil164_ops;
extern const struct intel_dvo_dev_ops ch7xxx_ops;
extern const struct intel_dvo_dev_ops ivch_ops;
extern const struct intel_dvo_dev_ops tfp410_ops;
extern const struct intel_dvo_dev_ops ch7017_ops;
extern const struct intel_dvo_dev_ops ns2501_ops;
 
#endif /* _INTEL_DVO_H */
/drivers/video/drm/i915/dvo_ch7017.c
402,7 → 402,7
}
}
 
struct intel_dvo_dev_ops ch7017_ops = {
const struct intel_dvo_dev_ops ch7017_ops = {
.init = ch7017_init,
.detect = ch7017_detect,
.mode_valid = ch7017_mode_valid,
/drivers/video/drm/i915/dvo_ch7xxx.c
356,7 → 356,7
}
}
 
struct intel_dvo_dev_ops ch7xxx_ops = {
const struct intel_dvo_dev_ops ch7xxx_ops = {
.init = ch7xxx_init,
.detect = ch7xxx_detect,
.mode_valid = ch7xxx_mode_valid,
/drivers/video/drm/i915/dvo_ivch.c
490,7 → 490,7
}
}
 
struct intel_dvo_dev_ops ivch_ops = {
const struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
.get_hw_state = ivch_get_hw_state,
/drivers/video/drm/i915/dvo_ns2501.c
698,7 → 698,7
}
}
 
struct intel_dvo_dev_ops ns2501_ops = {
const struct intel_dvo_dev_ops ns2501_ops = {
.init = ns2501_init,
.detect = ns2501_detect,
.mode_valid = ns2501_mode_valid,
/drivers/video/drm/i915/dvo_sil164.c
267,7 → 267,7
}
}
 
struct intel_dvo_dev_ops sil164_ops = {
const struct intel_dvo_dev_ops sil164_ops = {
.init = sil164_init,
.detect = sil164_detect,
.mode_valid = sil164_mode_valid,
/drivers/video/drm/i915/dvo_tfp410.c
306,7 → 306,7
}
}
 
struct intel_dvo_dev_ops tfp410_ops = {
const struct intel_dvo_dev_ops tfp410_ops = {
.init = tfp410_init,
.detect = tfp410_detect,
.mode_valid = tfp410_mode_valid,
/drivers/video/drm/i915/firmware/bxt_dmc_ver1.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/bxt_guc_ver8_7.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/bxt_huc_ver01_07_1398.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/kbl_dmc_ver1.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/kbl_guc_ver9_14.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/kbl_huc_ver02_00_1810.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/skl_dmc_ver1.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/skl_dmc_ver1_26.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/skl_guc_ver1.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/skl_guc_ver6.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/firmware/skl_huc_ver01_07_1398.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/fwblob.asm
10,35 → 10,58
public ___start_builtin_fw
public ___end_builtin_fw
 
section '.text' code readable executable align 16
section '.rdata' data readable align 16
 
align 16
 
macro CP_code [arg]
macro DMC_code [arg]
{
dd FIRMWARE_#arg#_CP
dd arg#_CP_START
dd (arg#_CP_END - arg#_CP_START)
dd FIRMWARE_#arg#_DMC
dd arg#_DMC_START
dd (arg#_DMC_END - arg#_DMC_START)
}
 
macro CP_firmware [arg]
macro DMC_firmware [arg]
{
forward
FIRMWARE_#arg#_CP db 'i915/',`arg,'.bin',0
FIRMWARE_#arg#_DMC db 'i915/',`arg,'.bin',0
forward
 
align 16
arg#_CP_START:
arg#_DMC_START:
file "firmware/"#`arg#".bin"
arg#_CP_END:
arg#_DMC_END:
}
 
macro GUC_code [arg]
{
dd FIRMWARE_#arg#_GUC
dd arg#_GUC_START
dd (arg#_GUC_END - arg#_GUC_START)
}
 
macro GUC_firmware [arg]
{
forward
FIRMWARE_#arg#_GUC db 'i915/',`arg,'.bin',0
forward
 
align 16
arg#_GUC_START:
file "firmware/"#`arg#".bin"
arg#_GUC_END:
}
 
___start_builtin_fw:
 
CP_code skl_guc_ver4
DMC_code skl_dmc_ver1
DMC_code bxt_dmc_ver1
GUC_code skl_guc_ver4
 
___end_builtin_fw:
 
CP_firmware skl_guc_ver4
DMC_firmware skl_dmc_ver1
DMC_firmware bxt_dmc_ver1
GUC_firmware skl_guc_ver4
 
 
/drivers/video/drm/i915/getopt.c
83,6 → 83,9
#endif
 
#define strlen __builtin_strlen
#define strcmp __builtin_strcmp
#define strncmp __builtin_strncmp
#define printf __builtin_printf
 
# define _(msgid) (msgid)
 
/drivers/video/drm/i915/i915_cmd_parser.c
407,14 → 407,14
* LRI.
*/
struct drm_i915_reg_descriptor {
u32 addr;
i915_reg_t addr;
u32 mask;
u32 value;
};
 
/* Convenience macro for adding 32-bit registers. */
#define REG32(address, ...) \
{ .addr = address, __VA_ARGS__ }
#define REG32(_reg, ...) \
{ .addr = (_reg), __VA_ARGS__ }
 
/*
* Convenience macro for adding 64-bit registers.
423,9 → 423,14
* access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers.
*/
#define REG64(addr) \
REG32(addr), REG32(addr + sizeof(u32))
#define REG64(_reg) \
{ .addr = _reg }, \
{ .addr = _reg ## _UDW }
 
#define REG64_IDX(_reg, idx) \
{ .addr = _reg(idx) }, \
{ .addr = _reg ## _UDW(idx) }
 
static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
451,14 → 456,14
REG32(GEN7_GPGPU_DISPATCHDIMX),
REG32(GEN7_GPGPU_DISPATCHDIMY),
REG32(GEN7_GPGPU_DISPATCHDIMZ),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
REG32(GEN7_SO_WRITE_OFFSET(0)),
REG32(GEN7_SO_WRITE_OFFSET(1)),
REG32(GEN7_SO_WRITE_OFFSET(2)),
592,7 → 597,7
bool ret = true;
 
for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i].addr;
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
 
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
847,7 → 852,7
int i;
 
for (i = 0; i < count; i++) {
if (table[i].addr == addr)
if (i915_mmio_reg_offset(table[i].addr) == addr)
return &table[i];
}
}
1023,7 → 1028,7
* to the register. Hence, limit OACONTROL writes to
* only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
/drivers/video/drm/i915/i915_dma.c
28,7 → 28,6
 
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
#include <linux/async.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
44,6 → 43,7
//#include <linux/pnp.h>
//#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
//#include <acpi/video.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
 
130,7 → 130,7
value = 1;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = 1;
value = capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
206,7 → 206,7
u32 temp;
bool enabled;
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return;
 
dev_priv->mchbar_need_disable = false;
286,7 → 286,7
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = intel_parse_bios(dev);
ret = intel_bios_init(dev_priv);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
 
305,8 → 305,10
if (ret)
goto cleanup_vga_switcheroo;
 
intel_power_domains_init_hw(dev_priv);
intel_power_domains_init_hw(dev_priv, false);
 
intel_csr_ucode_init(dev_priv);
 
ret = intel_irq_install(dev_priv);
if (ret)
goto cleanup_gem_stolen;
317,6 → 319,8
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
 
intel_guc_ucode_init(dev);
 
ret = i915_gem_init(dev);
if (ret)
goto cleanup_irq;
358,6 → 362,7
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
intel_guc_ucode_fini(dev);
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
571,7 → 576,8
* supports EU power gating on devices with more than one EU
* pair per subslice.
*/
info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(info->slice_total > 1));
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
info->has_eu_pg = (info->eu_per_subslice > 2);
}
685,7 → 691,7
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev))
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
697,7 → 703,7
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
!IS_VALLEYVIEW(dev)) {
HAS_PCH_SPLIT(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
 
742,9 → 748,6
 
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
if (!IS_VALLEYVIEW(dev_priv))
return;
 
/*
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
* CHV x1 PHY (DP/HDMI D)
753,7 → 756,7
if (IS_CHERRYVIEW(dev_priv)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
} else {
} else if (IS_VALLEYVIEW(dev_priv)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
}
}
798,11 → 801,12
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->csr_lock);
mutex_init(&dev_priv->av_mutex);
 
intel_pm_setup(dev);
 
intel_runtime_pm_get(dev_priv);
 
intel_display_crc_init(dev);
 
i915_dump_device_info(dev_priv);
847,9 → 851,6
 
intel_uncore_init(dev);
 
/* Load CSR Firmware for SKL */
intel_csr_ucode_init(dev);
 
ret = i915_gem_gtt_init(dev);
if (ret)
goto out_freecsr;
998,6 → 999,8
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
intel_fbdev_fini(dev);
 
i915_audio_component_cleanup(dev_priv);
 
ret = i915_gem_suspend(dev);
1020,8 → 1023,6
 
acpi_video_unregister();
 
intel_fbdev_fini(dev);
 
drm_vblank_cleanup(dev);
 
intel_modeset_cleanup(dev);
1063,7 → 1064,7
intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
 
intel_csr_ucode_fini(dev);
intel_csr_ucode_fini(dev_priv);
 
intel_teardown_mchbar(dev);
 
1132,8 → 1133,6
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
if (file_priv && file_priv->bsd_ring)
file_priv->bsd_ring = NULL;
kfree(file_priv);
}
 
/drivers/video/drm/i915/i915_drv.c
37,14 → 37,14
 
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include <drm/i915_pciids.h>
 
#include <drm/drm_crtc_helper.h>
 
#include <syscall.h>
 
int init_display_kms(struct drm_device *dev);
 
extern int intel_agp_enabled;
 
static struct drm_driver driver;
 
#define GEN_DEFAULT_PIPEOFFSETS \
68,14 → 68,9
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
 
int init_display_kms(struct drm_device *dev);
 
 
extern int intel_agp_enabled;
 
#define PCI_VENDOR_ID_INTEL 0x8086
 
 
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
207,13 → 202,13
.need_gfx_hws = 1, .has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1
.has_llc = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
 
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_ivybridge_m_info = {
220,8 → 215,6
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_ivybridge_q_info = {
228,100 → 221,64
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
#define VLV_FEATURES \
.gen = 7, .num_pipes = 2, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.display_mmio_offset = VLV_DISPLAY_BASE, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
 
static const struct intel_device_info intel_valleyview_m_info = {
GEN7_FEATURES,
VLV_FEATURES,
.is_valleyview = 1,
.is_mobile = 1,
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_valleyview_d_info = {
GEN7_FEATURES,
.num_pipes = 2,
VLV_FEATURES,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
#define HSW_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \
.has_fpga_dbg = 1
 
static const struct intel_device_info intel_haswell_d_info = {
GEN7_FEATURES,
HSW_FEATURES,
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_haswell_m_info = {
GEN7_FEATURES,
HSW_FEATURES,
.is_haswell = 1,
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broadwell_d_info = {
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
HSW_FEATURES,
.gen = 8,
};
 
static const struct intel_device_info intel_broadwell_m_info = {
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
HSW_FEATURES,
.gen = 8, .is_mobile = 1,
};
 
static const struct intel_device_info intel_broadwell_gt3d_info = {
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
HSW_FEATURES,
.gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broadwell_gt3m_info = {
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
HSW_FEATURES,
.gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_cherryview_info = {
328,7 → 285,7
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_valleyview = 1,
.is_cherryview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
335,33 → 292,21
};
 
static const struct intel_device_info intel_skylake_info = {
HSW_FEATURES,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
.gen = 9,
};
 
static const struct intel_device_info intel_skylake_gt3_info = {
HSW_FEATURES,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1,
.is_broxton = 1,
.gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
373,6 → 318,21
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_kabylake_info = {
HSW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
};
 
static const struct intel_device_info intel_kabylake_gt3_info = {
HSW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
 
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
379,48 → 339,47
* and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
#define INTEL_PCI_IDS \
INTEL_I915G_IDS(&intel_i915g_info), \
INTEL_I915GM_IDS(&intel_i915gm_info), \
INTEL_I945G_IDS(&intel_i945g_info), \
INTEL_I945GM_IDS(&intel_i945gm_info), \
INTEL_I965G_IDS(&intel_i965g_info), \
INTEL_G33_IDS(&intel_g33_info), \
INTEL_I965GM_IDS(&intel_i965gm_info), \
INTEL_GM45_IDS(&intel_gm45_info), \
INTEL_G45_IDS(&intel_g45_info), \
INTEL_PINEVIEW_IDS(&intel_pineview_info), \
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info), \
INTEL_SKL_GT1_IDS(&intel_skylake_info), \
INTEL_SKL_GT2_IDS(&intel_skylake_info), \
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
INTEL_BXT_IDS(&intel_broxton_info)
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
static const struct pci_device_id pciidlist[] = {
INTEL_I915G_IDS(&intel_i915g_info),
INTEL_I915GM_IDS(&intel_i915gm_info),
INTEL_I945G_IDS(&intel_i945g_info),
INTEL_I945GM_IDS(&intel_i945gm_info),
INTEL_I965G_IDS(&intel_i965g_info),
INTEL_G33_IDS(&intel_g33_info),
INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info),
INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
INTEL_HSW_D_IDS(&intel_haswell_d_info),
INTEL_HSW_M_IDS(&intel_haswell_m_info),
INTEL_VLV_M_IDS(&intel_valleyview_m_info),
INTEL_VLV_D_IDS(&intel_valleyview_d_info),
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
{0, 0, 0}
};
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
 
 
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
enum intel_pch ret = PCH_NOP;
441,7 → 400,7
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
504,11 → 463,13
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
552,38 → 513,16
}
 
#if 0
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
 
/*
* If the reason is not known assume -ENOENT since that's the most
* usual failure mode.
*/
if (!err)
err = -ENOENT;
 
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
return;
 
DRM_ERROR(
"The driver is built-in, so to load the firmware you need to\n"
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
"in your initrd/initramfs image.\n");
}
 
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct drm_encoder *encoder;
struct intel_encoder *encoder;
 
drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
if (intel_encoder->suspend)
intel_encoder->suspend(intel_encoder);
}
for_each_intel_encoder(dev, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
drm_modeset_unlock_all(dev);
}
 
590,9 → 529,16
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
 
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
return true;
#endif
return false;
}
 
static int i915_drm_suspend(struct drm_device *dev)
{
605,6 → 551,8
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
 
disable_rpm_wakeref_asserts(dev_priv);
 
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_display_set_init_power(dev_priv, true);
617,7 → 565,7
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
return error;
goto out;
}
 
intel_guc_suspend(dev);
645,11 → 593,7
 
i915_save_state(dev);
 
opregion_target_state = PCI_D3cold;
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
opregion_target_state = PCI_D1;
#endif
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev, opregion_target_state);
 
intel_uncore_forcewake_reset(dev, false);
661,20 → 605,42
 
intel_display_set_init_power(dev_priv, false);
 
return 0;
if (HAS_CSR(dev_priv))
flush_work(&dev_priv->csr.work);
 
out:
enable_rpm_wakeref_asserts(dev_priv);
 
return error;
}
 
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
bool fw_csr;
int ret;
 
disable_rpm_wakeref_asserts(dev_priv);
 
fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
* stay active, it will power down any HW resources as required and
* also enable deeper system power states that would be blocked if the
* firmware was inactive.
*/
if (!fw_csr)
intel_power_domains_suspend(dev_priv);
 
ret = intel_suspend_complete(dev_priv);
 
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
if (!fw_csr)
intel_power_domains_init_hw(dev_priv, true);
 
return ret;
goto out;
}
 
pci_disable_device(drm_dev->pdev);
693,7 → 659,12
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
return 0;
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
 
out:
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
724,6 → 695,8
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
disable_rpm_wakeref_asserts(dev_priv);
 
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
788,6 → 761,8
 
drm_kms_helper_poll_enable(dev);
 
enable_rpm_wakeref_asserts(dev_priv);
 
return 0;
}
 
794,7 → 769,7
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
int ret;
 
/*
* We have a resume ordering issue with the snd-hda driver also
805,12 → 780,46
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
if (pci_enable_device(dev->pdev))
return -EIO;
 
/*
* Note that we need to set the power state explicitly, since we
* powered off the device during freeze and the PCI core won't power
* it back up for us during thaw. Powering off the device during
* freeze is not a hard requirement though, and during the
* suspend/resume phases the PCI core makes sure we get here with the
* device powered on. So in case we change our freeze logic and keep
* the device powered we can also remove the following set power state
* call.
*/
ret = pci_set_power_state(dev->pdev, PCI_D0);
if (ret) {
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
goto out;
}
 
/*
* Note that pci_enable_device() first enables any parent bridge
* device and only then sets the power state for this device. The
* bridge enabling is a nop though, since bridge devices are resumed
* first. The order of enabling power and enabling the device is
* imposed by the PCI core as described above, so here we preserve the
* same order for the freeze/thaw phases.
*
* TODO: eventually we should remove pci_disable_device() /
* pci_enable_enable_device() from suspend/resume. Due to how they
* depend on the device enable refcount we can't anyway depend on them
* disabling/enabling the device.
*/
if (pci_enable_device(dev->pdev)) {
ret = -EIO;
goto out;
}
 
pci_set_master(dev->pdev);
 
if (IS_VALLEYVIEW(dev_priv))
disable_rpm_wakeref_asserts(dev_priv);
 
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
820,14 → 829,19
 
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
 
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
 
if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
 
out:
dev_priv->suspended_to_idle = false;
 
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
896,6 → 910,8
return ret;
}
 
intel_overlay_reset(dev_priv);
 
/* Ok, now get things going again... */
 
/*
1031,15 → 1047,6
return i915_drm_resume(drm_dev);
}
 
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
{
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
 
skl_uninit_cdclk(dev_priv);
 
return 0;
}
 
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
1079,16 → 1086,6
return 0;
}
 
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
skl_init_cdclk(dev_priv);
intel_csr_load_program(dev);
 
return 0;
}
 
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
1478,6 → 1475,9
 
return -EAGAIN;
}
 
disable_rpm_wakeref_asserts(dev_priv);
 
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
1485,6 → 1485,8
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 
intel_guc_suspend(dev);
 
intel_suspend_gt_powersave(dev);
1495,11 → 1497,15
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_enable_interrupts(dev_priv);
 
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_uncore_forcewake_reset(dev, false);
 
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
dev_priv->pm.suspended = true;
 
/*
1543,6 → 1549,9
 
DRM_DEBUG_KMS("Resuming device\n");
 
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
 
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
 
1553,11 → 1562,9
 
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
 
/*
1574,11 → 1581,13
* power well, so hpd is reinitialized from there. For
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv))
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
 
intel_enable_gt_powersave(dev);
 
enable_rpm_wakeref_asserts(dev_priv);
 
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
else
1597,11 → 1606,9
 
if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
1729,7 → 1736,7
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
 
driver.driver_features |= DRIVER_MODESET;
driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC;
 
err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
 
/drivers/video/drm/i915/i915_drv.h
33,6 → 33,7
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
 
#include <drm/drmP.h>
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
45,7 → 46,7
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
//#include <linux/backlight.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/kref.h>
#include "intel_guc.h"
52,26 → 53,12
 
#include <linux/spinlock.h>
 
#define ioread32(addr) readl(addr)
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
 
 
/* General customization:
*/
 
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20151010"
#define DRIVER_DATE "20151218"
 
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
194,15 → 181,11
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_PORT_DDI_A_2_LANES,
POWER_DOMAIN_PORT_DDI_A_4_LANES,
POWER_DOMAIN_PORT_DDI_B_2_LANES,
POWER_DOMAIN_PORT_DDI_B_4_LANES,
POWER_DOMAIN_PORT_DDI_C_2_LANES,
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
POWER_DOMAIN_PORT_DDI_E_2_LANES,
POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
214,6 → 197,7
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_INIT,
 
POWER_DOMAIN_NUM,
303,7 → 287,7
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
base.head) \
if ((intel_plane)->pipe == (intel_crtc)->pipe)
for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
 
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
320,15 → 304,15
 
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
for_each_if ((intel_encoder)->base.crtc == (__crtc))
 
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder))
for_each_if ((intel_connector)->base.encoder == (__encoder))
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
for_each_if ((1 << (domain)) & (mask))
 
struct drm_i915_private;
struct i915_mm_struct;
474,7 → 458,9
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle;
void *vbt;
void *rvda;
const void *vbt;
u32 vbt_size;
u32 *lid_state;
struct work_struct asle_work;
};
645,11 → 631,9
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
707,18 → 691,18
void (*force_wake_put)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
 
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
 
void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint8_t val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint16_t val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint32_t val, bool trace);
void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint64_t val, bool trace);
};
 
735,11 → 719,11
enum forcewake_domain_id id;
unsigned wake_count;
struct timer_list timer;
u32 reg_set;
i915_reg_t reg_set;
u32 val_set;
u32 val_clear;
u32 reg_ack;
u32 reg_post;
i915_reg_t reg_ack;
i915_reg_t reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
};
749,25 → 733,25
for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
(i__) < FW_DOMAIN_ID_COUNT; \
(i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
 
#define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
 
enum csr_state {
FW_UNINITIALIZED = 0,
FW_LOADED,
FW_FAILED
};
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
 
struct intel_csr {
struct work_struct work;
const char *fw_path;
uint32_t *dmc_payload;
uint32_t dmc_fw_size;
uint32_t version;
uint32_t mmio_count;
uint32_t mmioaddr[8];
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
enum csr_state state;
uint32_t dc_state;
};
 
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
783,8 → 767,11
func(is_crestline) sep \
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_cherryview) sep \
func(is_haswell) sep \
func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
920,7 → 907,6
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
unsigned int possible_framebuffer_bits;
933,38 → 919,21
 
bool false_color;
 
/* Tracks whether the HW is actually enabled, not whether the feature is
* possible. */
bool enabled;
bool active;
 
struct intel_fbc_work {
struct delayed_work work;
struct intel_crtc *crtc;
bool scheduled;
struct work_struct work;
struct drm_framebuffer *fb;
} *fbc_work;
unsigned long enable_jiffies;
} work;
 
enum no_fbc_reason {
FBC_OK, /* FBC is enabled */
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
FBC_ROTATION, /* rotation is not supported */
FBC_IN_DBG_MASTER, /* kernel debugger is active */
FBC_BAD_STRIDE, /* stride is not supported */
FBC_PIXEL_RATE, /* pixel rate is too big */
FBC_PIXEL_FORMAT /* pixel format is invalid */
} no_fbc_reason;
const char *no_fbc_reason;
 
bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
void (*enable_fbc)(struct intel_crtc *crtc);
void (*disable_fbc)(struct drm_i915_private *dev_priv);
bool (*is_active)(struct drm_i915_private *dev_priv);
void (*activate)(struct intel_crtc *crtc);
void (*deactivate)(struct drm_i915_private *dev_priv);
};
 
/**
1034,7 → 1003,7
struct i2c_adapter adapter;
u32 force_bit;
u32 reg0;
u32 gpio_reg;
i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
1173,7 → 1142,7
struct intel_rps_client semaphores, mmioflips;
 
/* manual wa residency calculations */
struct intel_rps_ei ei;
struct intel_rps_ei up_ei, down_ei;
 
/*
* Protects RPS/RC6 register access and PCU communication.
1293,6 → 1262,7
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
struct notifier_block oom_notifier;
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
1633,6 → 1603,8
* For more, read the Documentation/power/runtime_pm.txt.
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};
1679,7 → 1651,7
};
 
struct i915_wa_reg {
u32 addr;
i915_reg_t addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
1708,6 → 1680,13
struct drm_i915_gem_request *request;
};
 
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
};
 
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *objects;
1728,9 → 1707,6
 
struct intel_csr csr;
 
/* Display CSR-related protection */
struct mutex csr_lock;
 
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
 
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1745,6 → 1721,8
/* MMIO base address for MIPI regs */
uint32_t mipi_mmio_base;
 
uint32_t psr_mmio_base;
 
wait_queue_head_t gmbus_wait_queue;
 
struct pci_dev *bridge_dev;
1908,6 → 1886,7
u32 chv_phy_control;
 
u32 suspend_count;
bool suspended_to_idle;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
 
1930,6 → 1909,9
*/
uint16_t skl_latency[8];
 
/* Committed wm config */
struct intel_wm_config config;
 
/*
* The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store
1964,6 → 1946,8
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
 
struct intel_encoder *dig_port_map[I915_MAX_PORTS];
 
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
1988,7 → 1972,7
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
 
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
2000,6 → 1984,9
#define I915_GTT_OFFSET_NONE ((u32)-1)
 
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
 
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
* of pages before to binding them into the GTT, and put_pages() is
2015,6 → 2002,7
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
 
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
2158,6 → 2146,10
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
 
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
 
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
2168,10 → 2160,8
struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
 
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
};
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
void i915_gem_track_fb(struct drm_i915_gem_object *old,
2451,6 → 2441,15
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
 
#define REVID_FOREVER 0xff
/*
* Return true if revision is in range [since,until] inclusive.
*
* Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
*/
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
 
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
2473,11 → 2472,12
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2505,6 → 2505,14
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
INTEL_DEVID(dev) == 0x1915 || \
INTEL_DEVID(dev) == 0x191E)
#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
INTEL_DEVID(dev) == 0x5913 || \
INTEL_DEVID(dev) == 0x5916 || \
INTEL_DEVID(dev) == 0x5921 || \
INTEL_DEVID(dev) == 0x5926)
#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
INTEL_DEVID(dev) == 0x5915 || \
INTEL_DEVID(dev) == 0x591E)
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
2512,17 → 2520,22
 
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
#define SKL_REVID_A0 (0x0)
#define SKL_REVID_B0 (0x1)
#define SKL_REVID_C0 (0x2)
#define SKL_REVID_D0 (0x3)
#define SKL_REVID_E0 (0x4)
#define SKL_REVID_F0 (0x5)
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
#define SKL_REVID_C0 0x2
#define SKL_REVID_D0 0x3
#define SKL_REVID_E0 0x4
#define SKL_REVID_F0 0x5
 
#define BXT_REVID_A0 (0x0)
#define BXT_REVID_B0 (0x3)
#define BXT_REVID_C0 (0x9)
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
 
#define BXT_REVID_A0 0x0
#define BXT_REVID_A1 0x1
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
 
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
 
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
2593,23 → 2606,25
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev))
IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_SKYLAKE(dev))
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
#define HAS_CSR(dev) (IS_GEN9(dev))
 
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
 
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
 
#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
!IS_BROXTON(dev))
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2626,12 → 2641,14
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2657,6 → 2674,7
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
2708,7 → 2726,6
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void i915_firmware_load_error_print(const char *fw_path, int err);
 
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
2765,18 → 2782,48
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits);
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
static inline void
ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
{
ilk_update_display_irq(dev_priv, bits, bits);
}
static inline void
ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
{
ilk_update_display_irq(dev_priv, bits, 0);
}
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, uint32_t bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
}
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, uint32_t bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
}
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
#define ibx_enable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), 0)
static inline void
ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
{
ibx_display_interrupt_update(dev_priv, bits, bits);
}
static inline void
ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
{
ibx_display_interrupt_update(dev_priv, bits, 0);
}
 
 
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
2844,6 → 2891,7
#define PIN_UPDATE (1<<5)
#define PIN_ZONE_4G (1<<6)
#define PIN_HIGH (1<<7)
#define PIN_OFFSET_FIXED (1<<8)
#define PIN_OFFSET_MASK (~4095)
int __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
2879,6 → 2927,9
return sg->length >> PAGE_SHIFT;
}
 
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
 
static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
3018,8 → 3069,6
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
3194,6 → 3243,7
unsigned long start,
unsigned long end,
unsigned flags);
int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 
/* belongs in i915_gem_gtt.h */
3323,7 → 3373,8
extern void intel_i2c_reset(struct drm_device *dev);
 
/* intel_bios.c */
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
int intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
 
/* intel_opregion.c */
#ifdef CONFIG_ACPI
3377,7 → 3428,6
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
 
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
3460,6 → 3510,32
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
 
#define __raw_read(x, s) \
static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
 
#define __raw_write(x, s) \
static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
__raw_read(8, b)
__raw_read(16, w)
__raw_read(32, l)
__raw_read(64, q)
 
__raw_write(8, b)
__raw_write(16, w)
__raw_write(32, l)
__raw_write(64, q)
 
#undef __raw_read
#undef __raw_write
 
/* These are untraced mmio-accessors that are only valid to be used inside
* criticial sections inside IRQ handlers where forcewake is explicitly
* controlled.
3467,8 → 3543,8
* Note: Should only be used between intel_uncore_forcewake_irqlock() and
* intel_uncore_forcewake_irqunlock().
*/
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
 
/* "Broadcast RGB" property */
3476,9 → 3552,9
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
 
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
{
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return VLV_VGACNTRL;
else if (INTEL_INFO(dev)->gen >= 5)
return CPU_VGACNTRL;
3543,4 → 3619,6
i915_gem_request_assign(&ring->trace_irq_req, req);
}
 
#include "intel_drv.h"
 
#endif
/drivers/video/drm/i915/i915_gem.c
56,11 → 56,6
unsigned long flag, unsigned long offset);
 
 
#define MAX_ERRNO 4095
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
 
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void
1973,6 → 1968,9
sg->length += PAGE_SIZE;
}
last_pfn = page_to_pfn(page);
 
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
#ifdef CONFIG_SWIOTLB
if (!swiotlb_nr_tbl())
2439,6 → 2437,8
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring)
{
struct intel_ringbuffer *buffer;
 
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
 
2454,18 → 2454,16
* are the ones that keep the context and ringbuffer backing objects
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
struct drm_i915_gem_request *submit_req;
 
submit_req = list_first_entry(&ring->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
if (i915.enable_execlists) {
spin_lock_irq(&ring->execlist_lock);
 
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(submit_req);
/* list_splice_tail_init checks for empty lists */
list_splice_tail_init(&ring->execlist_queue,
&ring->execlist_retired_req_list);
 
i915_gem_request_unreference(submit_req);
spin_unlock_irq(&ring->execlist_lock);
intel_execlists_retire_requests(ring);
}
 
/*
2484,7 → 2482,19
 
i915_gem_request_retire(request);
}
 
/* Having flushed all requests from all queues, we know that all
* ringbuffers must now be empty. However, since we do not reclaim
* all space when retiring the request (to prevent HEADs colliding
* with rapid ringbuffer wraparound) the amount of available space
* upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head.
*/
list_for_each_entry(buffer, &ring->buffers, link) {
buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer);
}
}
 
void i915_gem_reset(struct drm_device *dev)
{
2584,10 → 2594,10
}
}
 
if (idle)
mod_delayed_work(dev_priv->wq,
&dev_priv->mm.idle_work,
msecs_to_jiffies(100));
// if (idle)
// mod_delayed_work(dev_priv->wq,
// &dev_priv->mm.idle_work,
// msecs_to_jiffies(100));
 
return idle;
}
2624,6 → 2634,10
if (!list_empty(&ring->request_list))
return;
 
/* we probably should sync with hangcheck here, using cancel_work_sync.
* Also locking seems to be fubar here, ring->request_list is protected
* by dev->struct_mutex. */
 
intel_mark_idle(dev);
 
if (mutex_trylock(&dev->struct_mutex)) {
2748,7 → 2762,7
if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv);
to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]);
}
return ret;
3114,7 → 3128,7
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->gtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32));
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
3151,6 → 3165,20
if (IS_ERR(vma))
goto err_unpin;
 
if (flags & PIN_OFFSET_FIXED) {
uint64_t offset = flags & PIN_OFFSET_MASK;
 
if (offset & (alignment - 1) || offset + size > end) {
ret = -EINVAL;
goto err_free_vma;
}
vma->node.start = offset;
vma->node.size = size;
vma->node.color = obj->cache_level;
ret = drm_mm_reserve_node(&vm->mm, &vma->node);
if (ret)
goto err_free_vma;
} else {
if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
3170,6 → 3198,7
 
goto err_free_vma;
}
}
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
ret = -EINVAL;
goto err_remove_node;
3522,7 → 3551,7
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return -ENODEV;
 
level = I915_CACHE_LLC;
3565,17 → 3594,11
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
int ret;
 
ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
if (ret)
return ret;
 
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
3765,6 → 3788,10
vma->node.start < (flags & PIN_OFFSET_MASK))
return true;
 
if (flags & PIN_OFFSET_FIXED &&
vma->node.start != (flags & PIN_OFFSET_MASK))
return true;
 
return false;
}
 
4030,6 → 4057,7
}
 
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
4163,10 → 4191,8
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->vm == vm)
return vma;
}
return NULL;
4257,7 → 4283,6
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
 
4273,10 → 4298,10
* here because no other code should access these registers other than
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, reg_base + i);
intel_ring_emit(ring, remap_info[i/4]);
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
intel_ring_emit(ring, remap_info[i]);
}
 
intel_ring_advance(ring);
4444,17 → 4469,8
if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev);
if (ret) {
/*
* If we got an error and GuC submission is enabled, map
* the error to -EIO so the GPU will be declared wedged.
* OTOH, if we didn't intend to use the GuC anyway, just
* discard the error and carry on.
*/
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
i915.enable_guc_submission ? "" :
" (ignored)");
ret = i915.enable_guc_submission ? -EIO : 0;
if (ret)
DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
ret = -EIO;
goto out;
}
}
4518,14 → 4534,6
 
mutex_lock(&dev->struct_mutex);
 
if (IS_VALLEYVIEW(dev)) {
/* VLVA0 (potential hack), BIOS isn't actually waking us */
I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_ALLOWWAKEACK), 10))
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
 
if (!i915.enable_execlists) {
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
dev_priv->gt.init_rings = i915_gem_init_rings;
4619,7 → 4627,7
 
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
4837,6 → 4845,21
return false;
}
 
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
{
struct page *page;
 
/* Only default objects have per-page dirty tracking */
if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
return NULL;
 
page = i915_gem_object_get_page(obj, n);
set_page_dirty(page);
return page;
}
 
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
4862,6 → 4885,7
i915_gem_object_pin_pages(obj);
sg = obj->pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
obj->dirty = 1; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj);
 
if (WARN_ON(bytes != size)) {
/drivers/video/drm/i915/i915_gem_context.c
189,8 → 189,15
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
if (IS_IVYBRIDGE(dev)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
558,7 → 565,7
if (signaller == ring)
continue;
 
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
583,7 → 590,7
if (signaller == ring)
continue;
 
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
927,6 → 934,14
case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
args->value = ctx->ppgtt->base.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
args->value = to_i915(dev)->gtt.base.total;
break;
default:
ret = -EINVAL;
break;
958,7 → 973,8
case I915_CONTEXT_PARAM_BAN_PERIOD:
if (args->size)
ret = -EINVAL;
else if (args->value < ctx->hang_stats.ban_period_seconds)
else if (args->value < ctx->hang_stats.ban_period_seconds &&
!capable(CAP_SYS_ADMIN))
ret = -EPERM;
else
ctx->hang_stats.ban_period_seconds = args->value;
/drivers/video/drm/i915/i915_gem_evict.c
199,6 → 199,45
return ret;
}
 
int
i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
 
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
struct i915_vma *vma;
int ret;
 
if (node->start + node->size <= target->node.start)
continue;
if (node->start >= target->node.start + target->node.size)
break;
 
vma = container_of(node, typeof(*vma), node);
 
if (vma->pin_count) {
if (!vma->exec_entry || (vma->pin_count > 1))
/* Object is pinned for some other use */
return -EBUSY;
 
/* We need to evict a buffer in the same batch */
if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
/* Overlapping fixed objects in the same batch */
return -EINVAL;
 
return -ENOSPC;
}
 
ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
 
return 0;
}
 
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
* @vm: Address space to cleanse
/drivers/video/drm/i915/i915_gem_execbuffer.c
249,6 → 249,31
obj->cache_level != I915_CACHE_NONE);
}
 
/* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
* MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
* addresses to be in a canonical form:
* "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
* canonical form [63:48] == [47]."
*/
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}
 
static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}
 
static inline uint64_t
relocation_target(struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
return gen8_canonical_addr((int)reloc->delta + target_offset);
}
 
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc,
256,7 → 281,7
{
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = reloc->delta + target_offset;
uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr;
int ret;
 
264,7 → 289,7
if (ret)
return ret;
 
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 
273,7 → 298,7
 
if (page_offset == 0) {
kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
}
 
292,7 → 317,7
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t delta = reloc->delta + target_offset;
uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset;
void __iomem *reloc_page;
int ret;
334,7 → 359,7
{
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = (int)reloc->delta + target_offset;
uint64_t delta = relocation_target(reloc, target_offset);
char *vaddr;
int ret;
 
342,7 → 367,7
if (ret)
return ret;
 
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
reloc->offset >> PAGE_SHIFT));
clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 
351,7 → 376,7
 
if (page_offset == 0) {
kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
}
 
382,7 → 407,7
target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base;
 
target_offset = target_vma->node.start;
target_offset = gen8_canonical_addr(target_vma->node.start);
 
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
583,6 → 608,8
flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
if (entry->flags & EXEC_OBJECT_PINNED)
flags |= entry->offset | PIN_OFFSET_FIXED;
if ((flags & PIN_MAPPABLE) == 0)
flags |= PIN_HIGH;
}
654,6 → 681,10
vma->node.start & (entry->alignment - 1))
return true;
 
if (entry->flags & EXEC_OBJECT_PINNED &&
vma->node.start != entry->offset)
return true;
 
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
679,6 → 710,7
struct i915_vma *vma;
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
 
687,6 → 719,7
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
INIT_LIST_HEAD(&ordered_vmas);
INIT_LIST_HEAD(&pinned_vmas);
while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
705,7 → 738,9
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
 
if (need_mappable) {
if (entry->flags & EXEC_OBJECT_PINNED)
list_move_tail(&vma->exec_list, &pinned_vmas);
else if (need_mappable) {
entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
list_move(&vma->exec_list, &ordered_vmas);
} else
715,6 → 750,7
obj->base.pending_write_domain = 0;
}
list_splice(&ordered_vmas, vmas);
list_splice(&pinned_vmas, vmas);
 
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
967,6 → 1003,21
if (exec[i].flags & invalid_flags)
return -EINVAL;
 
/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
* any non-page-aligned or non-canonical addresses.
*/
if (exec[i].flags & EXEC_OBJECT_PINNED) {
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
 
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
 
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;
 
1091,7 → 1142,7
 
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
}
 
1218,7 → 1269,7
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
 
1294,6 → 1345,7
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
 
return vma->obj;
1654,6 → 1706,8
 
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].offset =
gen8_canonical_addr(exec2_list[i].offset);
ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset,
sizeof(user_exec_list[i].offset));
1718,6 → 1772,8
int i;
 
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].offset =
gen8_canonical_addr(exec2_list[i].offset);
ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset,
sizeof(user_exec_list[i].offset));
/drivers/video/drm/i915/i915_gem_fence.c
59,7 → 59,7
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg_lo, fence_reg_hi;
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
 
if (INTEL_INFO(dev)->gen >= 6) {
/drivers/video/drm/i915/i915_gem_gtt.c
104,9 → 104,11
{
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
 
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
 
if (intel_vgpu_active(dev))
has_full_ppgtt = false; /* emulation is too hard */
125,6 → 127,9
if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
 
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
return 3;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
134,14 → 139,13
#endif
 
/* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
dev->pdev->revision < 0xb) {
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
 
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2;
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
}
654,10 → 658,10
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
 
757,10 → 761,10
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
scratch_pte);
} else {
uint64_t templ4, pml4e;
uint64_t pml4e;
struct i915_page_directory_pointer *pdp;
 
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
scratch_pte);
}
826,10 → 830,10
cache_level);
} else {
struct i915_page_directory_pointer *pdp;
uint64_t templ4, pml4e;
uint64_t pml4e;
uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
 
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
start, cache_level);
}
897,14 → 901,13
enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int offset = vgtif_reg(pdp0_lo);
int i;
 
if (USES_FULL_48BIT_PPGTT(dev)) {
u64 daddr = px_dma(&ppgtt->pml4);
 
I915_WRITE(offset, lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
912,10 → 915,8
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 
I915_WRITE(offset, lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr));
 
offset += 8;
I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
}
 
msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1010,10 → 1011,9
{
struct drm_device *dev = vm->dev;
struct i915_page_table *pt;
uint64_t temp;
uint32_t pde;
 
gen8_for_each_pde(pt, pd, start, length, temp, pde) {
gen8_for_each_pde(pt, pd, start, length, pde) {
/* Don't reallocate page tables */
if (test_bit(pde, pd->used_pdes)) {
/* Scratch is never allocated this way */
1072,13 → 1072,12
{
struct drm_device *dev = vm->dev;
struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
 
WARN_ON(!bitmap_empty(new_pds, pdpes));
 
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (test_bit(pdpe, pdp->used_pdpes))
continue;
 
1126,12 → 1125,11
{
struct drm_device *dev = vm->dev;
struct i915_page_directory_pointer *pdp;
uint64_t temp;
uint32_t pml4e;
 
WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
 
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
pdp = alloc_pdp(dev);
if (IS_ERR(pdp))
1215,7 → 1213,6
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint64_t temp;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
int ret;
1242,7 → 1239,7
}
 
/* For every page directory referenced, allocate page tables */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
if (ret)
1254,7 → 1251,7
 
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = length;
1264,7 → 1261,7
/* Every pd should be allocated, we just did that above. */
WARN_ON(!pd);
 
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
/* Same reasoning as pd */
WARN_ON(!pt);
WARN_ON(!pd_len);
1301,6 → 1298,8
 
err_out:
while (pdpe--) {
unsigned long temp;
 
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1323,7 → 1322,7
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_directory_pointer *pdp;
uint64_t temp, pml4e;
uint64_t pml4e;
int ret = 0;
 
/* Do the pml4 allocations first, so we don't need to track the newly
1342,7 → 1341,7
"The allocation has spanned more than 512GB. "
"It is highly likely this is incorrect.");
 
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
WARN_ON(!pdp);
 
ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1382,10 → 1381,9
struct seq_file *m)
{
struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe;
 
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
struct i915_page_table *pt;
uint64_t pd_len = length;
uint64_t pd_start = start;
1395,7 → 1393,7
continue;
 
seq_printf(m, "\tPDPE #%d\n", pdpe);
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
uint32_t pte;
gen8_pte_t *pt_vaddr;
 
1445,11 → 1443,11
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t templ4, pml4e;
uint64_t pml4e;
struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
 
gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es))
continue;
 
1655,9 → 1653,9
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
1692,9 → 1690,9
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
2345,7 → 2343,10
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT);
2371,8 → 2372,36
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
struct insert_entries {
struct i915_address_space *vm;
struct sg_table *st;
uint64_t start;
enum i915_cache_level level;
u32 flags;
};
 
static int gen8_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st,
arg->start, arg->level, arg->flags);
return 0;
}
 
static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level,
u32 flags)
{
struct insert_entries arg = { vm, st, start, level, flags };
gen8_ggtt_insert_entries__cb, &arg;
}
 
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
2391,7 → 2420,10
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
2415,6 → 2447,8
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2429,7 → 2463,10
(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
2441,6 → 2478,8
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2455,7 → 2494,10
(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
2467,6 → 2509,8
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2474,11 → 2518,17
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 
}
 
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2486,9 → 2536,16
uint64_t length,
bool unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
intel_gtt_clear_range(first_entry, num_entries);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static int ggtt_bind_vma(struct i915_vma *vma,
2740,7 → 2797,6
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
}
 
if (drm_mm_initialized(&vm->mm)) {
2990,6 → 3046,9
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
 
if (IS_CHERRYVIEW(dev_priv))
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
 
return ret;
}
 
3298,7 → 3357,7
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj)
{
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
3530,7 → 3589,7
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
return view->rotation_info.size;
return view->params.rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
/drivers/video/drm/i915/i915_gem_gtt.h
156,14 → 156,11
u64 offset;
unsigned int size;
} partial;
struct intel_rotation_info rotation_info;
} params;
 
struct sg_table *pages;
 
union {
struct intel_rotation_info rotation_info;
};
};
 
extern const struct i915_ggtt_view i915_ggtt_view_normal;
extern const struct i915_ggtt_view i915_ggtt_view_rotated;
458,32 → 455,29
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
*/
#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
#define gen8_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen8_pde_index(start); \
length > 0 && iter < I915_PDES ? \
(pt = (pd)->page_table[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
length > 0 && iter < I915_PDES && \
(pt = (pd)->page_table[iter], true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
 
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \
length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \
(pd = (pdp)->page_directory[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
(pd = (pdp)->page_directory[iter], true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
 
#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
for (iter = gen8_pml4e_index(start); \
length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \
(pdp = (pml4)->pdps[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
(pdp = (pml4)->pdps[iter], true); \
({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
 
static inline uint32_t gen8_pte_index(uint64_t address)
{
556,7 → 550,7
 
if (a->type != b->type)
return false;
if (a->type == I915_GGTT_VIEW_PARTIAL)
if (a->type != I915_GGTT_VIEW_NORMAL)
return !memcmp(&a->params, &b->params, sizeof(a->params));
return true;
}
/drivers/video/drm/i915/i915_gem_render_state.c
103,7 → 103,7
if (ret)
return ret;
 
page = sg_page(so->obj->pages->sgl);
page = i915_gem_object_get_dirty_page(so->obj, 0);
d = kmap(page);
 
while (i < rodata->batch_items) {
/drivers/video/drm/i915/i915_gem_stolen.c
367,7 → 367,8
&reserved_size);
break;
default:
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
/drivers/video/drm/i915/i915_gem_tiling.c
176,6 → 176,8
return -EINVAL;
}
 
intel_runtime_pm_get(dev_priv);
 
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
ret = -EBUSY;
269,6 → 271,8
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
 
intel_runtime_pm_put(dev_priv);
 
return ret;
}
 
/drivers/video/drm/i915/i915_gpu_error.c
367,6 → 367,17
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
if (HAS_CSR(dev)) {
struct intel_csr *csr = &dev_priv->csr;
 
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
err_printf(m, "DMC fw version: %d.%d\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
}
 
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_INFO(dev)->gen >= 8) {
863,7 → 874,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
if (INTEL_INFO(dev)->gen >= 8)
gen8_record_semaphore_state(dev_priv, error, ring, ering);
900,7 → 911,7
ering->ctl = I915_READ_CTL(ring);
 
if (I915_NEED_GFX_HWS(dev)) {
int mmio;
i915_reg_t mmio;
 
if (IS_GEN7(dev)) {
switch (ring->id) {
1072,6 → 1083,25
list_for_each_entry(request, &ring->request_list, list) {
struct drm_i915_error_request *erq;
 
if (count >= error->ring[i].num_requests) {
/*
* If the ring request list was changed in
* between the point where the error request
* list was created and dimensioned and this
* point then just exit early to avoid crashes.
*
* We don't need to communicate that the
* request list changed state during error
* state capture and that the error state is
* slightly incorrect as a consequence since we
* are typically only interested in the request
* list state at the point of error state
* capture, not in any changes happening during
* the capture.
*/
break;
}
 
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
1182,7 → 1212,7
if (IS_VALLEYVIEW(dev)) {
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV);
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
 
if (IS_GEN7(dev))
1194,7 → 1224,7
}
 
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
}
1201,7 → 1231,7
 
/* 2: Registers which belong to multiple generations */
if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
 
if (INTEL_INFO(dev)->gen >= 6) {
error->derrmr = I915_READ(DERRMR);
/drivers/video/drm/i915/i915_guc_reg.h
26,7 → 26,7
 
/* Definitions of GuC H/W registers, bits, etc */
 
#define GUC_STATUS 0xc000
#define GUC_STATUS _MMIO(0xc000)
#define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
39,40 → 39,41
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
 
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
 
#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4)
#define DMA_ADDR_0_LOW 0xc300
#define DMA_ADDR_0_HIGH 0xc304
#define DMA_ADDR_1_LOW 0xc308
#define DMA_ADDR_1_HIGH 0xc30c
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64
#define DMA_ADDR_0_LOW _MMIO(0xc300)
#define DMA_ADDR_0_HIGH _MMIO(0xc304)
#define DMA_ADDR_1_LOW _MMIO(0xc308)
#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
#define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE 0xc310
#define DMA_CTRL 0xc314
#define DMA_COPY_SIZE _MMIO(0xc310)
#define DMA_CTRL _MMIO(0xc314)
#define UOS_MOVE (1<<4)
#define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET 0xc340
#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT 0xC3E4
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
 
#define GUC_WOPCM_SIZE 0xc050
#define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
 
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
 
#define GEN8_GT_PM_CONFIG 0x138140
#define GEN9LP_GT_PM_CONFIG 0x138140
#define GEN9_GT_PM_CONFIG 0x13816c
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
#define GT_DOORBELL_ENABLE (1<<0)
 
#define GEN8_GTCR 0x4274
#define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0)
 
#define GUC_ARAT_C6DIS 0xA178
#define GUC_ARAT_C6DIS _MMIO(0xA178)
 
#define GUC_SHIM_CONTROL 0xc064
#define GUC_SHIM_CONTROL _MMIO(0xc064)
#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
#define GUC_ENABLE_MIA_CACHING (1<<2)
89,21 → 90,21
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING)
 
#define HOST2GUC_INTERRUPT 0xc4c8
#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
#define HOST2GUC_TRIGGER (1<<0)
 
#define DRBMISC1 0x1984
#define DOORBELL_ENABLE (1<<0)
 
#define GEN8_DRBREGL(x) (0x1000 + (x) * 8)
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
 
#define DE_GUCRMR 0x44054
#define DE_GUCRMR _MMIO(0x44054)
 
#define GUC_BCS_RCS_IER 0xC550
#define GUC_VCS2_VCS1_IER 0xC554
#define GUC_WD_VECS_IER 0xC558
#define GUC_PM_P24C_IER 0xC55C
#define GUC_BCS_RCS_IER _MMIO(0xC550)
#define GUC_VCS2_VCS1_IER _MMIO(0xC554)
#define GUC_WD_VECS_IER _MMIO(0xC558)
#define GUC_PM_P24C_IER _MMIO(0xC55C)
 
#endif
/drivers/video/drm/i915/i915_guc_submission.c
23,11 → 23,11
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_guc.h"
 
/**
* DOC: GuC Client
* DOC: GuC-based command submission
*
* i915_guc_client:
* We use the term client to avoid confusion with contexts. A i915_guc_client is
86,7 → 86,6
return -EINVAL;
 
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
spin_lock(&dev_priv->guc.host2guc_lock);
 
dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = data[0];
119,7 → 118,6
}
dev_priv->guc.action_status = status;
 
spin_unlock(&dev_priv->guc.host2guc_lock);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
return ret;
161,9 → 159,9
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
258,7 → 256,7
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell;
void *base;
int drbreg = GEN8_DRBREGL(client->doorbell_id);
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value;
 
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
292,8 → 290,6
const uint32_t cacheline_size = cache_line_size();
uint32_t offset;
 
spin_lock(&guc->host2guc_lock);
 
/* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline);
 
300,8 → 296,6
/* Moving to next cache line to reduce contention */
guc->db_cacheline += cacheline_size;
 
spin_unlock(&guc->host2guc_lock);
 
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
offset, guc->db_cacheline, cacheline_size);
 
322,13 → 316,11
const uint16_t end = start + half;
uint16_t id;
 
spin_lock(&guc->host2guc_lock);
id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
if (id == end)
id = GUC_INVALID_DOORBELL_ID;
else
bitmap_set(guc->doorbell_bitmap, id, 1);
spin_unlock(&guc->host2guc_lock);
 
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
hi_pri ? "high" : "normal", id);
338,9 → 330,7
 
static void release_doorbell(struct intel_guc *guc, uint16_t id)
{
spin_lock(&guc->host2guc_lock);
bitmap_clear(guc->doorbell_bitmap, id, 1);
spin_unlock(&guc->host2guc_lock);
}
 
/*
487,16 → 477,13
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = 0, timeout_counter = 200;
int ret = -ETIMEDOUT, timeout_counter = 200;
 
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
 
while (timeout_counter-- > 0) {
ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
gc->wq_size) >= size, 1);
 
if (!ret) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
*offset = gc->wq_tail;
 
/* advance the tail for next workqueue item */
505,7 → 492,11
 
/* this will break the loop */
timeout_counter = 0;
ret = 0;
}
 
if (timeout_counter)
usleep_range(1000, 2000);
};
 
kunmap_atomic(base);
577,7 → 568,7
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
588,8 → 579,7
/**
* i915_guc_submit() - Submit commands through GuC
* @client: the guc client where commands will go through
* @ctx: LRC where commands come from
* @ring: HW engine that will excute the commands
* @rq: request associated with the commands
*
* Return: 0 if succeed
*/
598,7 → 588,6
{
struct intel_guc *guc = client->guc;
enum intel_ring_id ring_id = rq->ring->id;
unsigned long flags;
int q_ret, b_ret;
 
/* Need this because of the deferred pin ctx and ring */
605,8 → 594,6
/* Shall we move this right after ring is pinned? */
lr_context_update(rq);
 
spin_lock_irqsave(&client->wq_lock, flags);
 
q_ret = guc_add_workqueue_item(client, rq);
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
621,12 → 608,8
} else {
client->retcode = 0;
}
spin_unlock_irqrestore(&client->wq_lock, flags);
 
spin_lock(&guc->host2guc_lock);
guc->submissions[ring_id] += 1;
guc->last_seqno[ring_id] = rq->seqno;
spin_unlock(&guc->host2guc_lock);
 
return q_ret;
}
731,7 → 714,8
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
* while a preemption context can use CRITICAL.
* @ctx the context to own the client (we use the default render context)
* @ctx: the context that owns the client (we use the default render
* context)
*
* Return: An i915_guc_client object if success.
*/
768,7 → 752,6
client->client_obj = obj;
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
spin_lock_init(&client->wq_lock);
 
client->doorbell_offset = select_doorbell_cacheline(guc);
 
871,8 → 854,6
if (!guc->ctx_pool_obj)
return -ENOMEM;
 
spin_lock_init(&dev_priv->guc.host2guc_lock);
 
ida_init(&guc->ctx_ids);
 
guc_create_log(guc);
/drivers/video/drm/i915/i915_irq.c
139,7 → 139,8
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
u32 val = I915_READ(reg);
 
147,7 → 148,7
return;
 
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
reg, val);
i915_mmio_reg_offset(reg), val);
I915_WRITE(reg, 0xffffffff);
POSTING_READ(reg);
I915_WRITE(reg, 0xffffffff);
214,7 → 215,7
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
238,18 → 239,6
}
}
 
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_display_irq(dev_priv, mask, mask);
}
 
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_display_irq(dev_priv, mask, 0);
}
 
/**
* ilk_update_gt_irq - update GTIMR
* @dev_priv: driver private
283,17 → 272,17
ilk_update_gt_irq(dev_priv, mask, 0);
}
 
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
 
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
 
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
350,7 → 339,7
void gen6_reset_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg = gen6_pm_iir(dev_priv);
i915_reg_t reg = gen6_pm_iir(dev_priv);
 
spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(reg, dev_priv->pm_rps_events);
401,7 → 390,6
dev_priv->rps.interrupts_enabled = false;
spin_unlock_irq(&dev_priv->irq_lock);
 
cancel_work_sync(&dev_priv->rps.work);
 
spin_lock_irq(&dev_priv->irq_lock);
 
448,6 → 436,38
}
 
/**
* bdw_update_pipe_irq - update DE pipe interrupt
* @dev_priv: driver private
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t new_val;
 
assert_spin_locked(&dev_priv->irq_lock);
 
WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
new_val = dev_priv->de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
 
if (new_val != dev_priv->de_irq_mask[pipe]) {
dev_priv->de_irq_mask[pipe] = new_val;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
}
}
 
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
476,7 → 496,7
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
u32 reg = PIPESTAT(pipe);
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
assert_spin_locked(&dev_priv->irq_lock);
503,7 → 523,7
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
u32 reg = PIPESTAT(pipe);
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
assert_spin_locked(&dev_priv->irq_lock);
559,7 → 579,7
{
u32 enable_mask;
 
if (IS_VALLEYVIEW(dev_priv->dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
status_mask);
else
573,7 → 593,7
{
u32 enable_mask;
 
if (IS_VALLEYVIEW(dev_priv->dev))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
status_mask);
else
664,8 → 684,7
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
716,9 → 735,7
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
 
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
 
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
732,9 → 749,9
vtotal /= 2;
 
if (IS_GEN2(dev))
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
826,7 → 843,7
* We can split this into vertical and horizontal
* scanout position.
*/
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
/* convert to pixel counts */
vbl_start *= htotal;
993,18 → 1010,46
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
 
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
const struct intel_rps_ei *old,
const struct intel_rps_ei *now,
int threshold)
{
u64 time, c0;
unsigned int mul = 100;
 
if (old->cz_clock == 0)
return false;
 
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
mul <<= 8;
 
time = now->cz_clock - old->cz_clock;
time *= threshold * dev_priv->czclk_freq;
 
/* Workload can be split between render + media, e.g. SwapBuffers
* being blitted in X after being rendered in mesa. To account for
* this we need to combine both engines into our activity counter.
*/
c0 = now->render_c0 - old->render_c0;
c0 += now->media_c0 - old->media_c0;
c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
 
return c0 >= time;
}
 
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
dev_priv->rps.up_ei = dev_priv->rps.down_ei;
}
 
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
 
if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
return 0;
 
vlv_c0_read(dev_priv, &now);
1011,33 → 1056,22
if (now.cz_clock == 0)
return 0;
 
if (prev->cz_clock) {
u64 time, c0;
unsigned int mul;
if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
if (!vlv_c0_above(dev_priv,
&dev_priv->rps.down_ei, &now,
dev_priv->rps.down_threshold))
events |= GEN6_PM_RP_DOWN_THRESHOLD;
dev_priv->rps.down_ei = now;
}
 
mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
mul <<= 8;
 
time = now.cz_clock - prev->cz_clock;
time *= dev_priv->czclk_freq;
 
/* Workload can be split between render + media,
* e.g. SwapBuffers being blitted in X after being rendered in
* mesa. To account for this we need to combine both engines
* into our activity counter.
*/
c0 = now.render_c0 - prev->render_c0;
c0 += now.media_c0 - prev->media_c0;
c0 *= mul;
 
if (c0 > time * dev_priv->rps.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
else if (c0 < time * dev_priv->rps.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
if (vlv_c0_above(dev_priv,
&dev_priv->rps.up_ei, &now,
dev_priv->rps.up_threshold))
events |= GEN6_PM_RP_UP_THRESHOLD;
dev_priv->rps.up_ei = now;
}
 
dev_priv->rps.ei = now;
return events;
}
 
1067,6 → 1101,14
spin_unlock_irq(&dev_priv->irq_lock);
return;
}
 
/*
* The RPS work is synced during runtime suspend, we don't require a
* wakeref. TODO: instead of disabling the asserts make sure that we
* always hold an RPM reference while the work is running.
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1079,7 → 1121,7
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
 
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
return;
goto out;
 
mutex_lock(&dev_priv->rps.hw_lock);
 
1134,6 → 1176,8
intel_set_rps(dev_priv->dev, new_delay);
 
mutex_unlock(&dev_priv->rps.hw_lock);
out:
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
}
 
 
1170,7 → 1214,7
POSTING_READ(GEN7_MISCCPCTL);
 
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
u32 reg;
i915_reg_t reg;
 
slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1178,7 → 1222,7
 
dev_priv->l3_parity.which_slice &= ~(1<<slice);
 
reg = GEN7_L3CDERRST1 + (slice * 0x200);
reg = GEN7_L3CDERRST1(slice);
 
error_status = I915_READ(reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
1258,6 → 1302,15
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
 
static __always_inline void
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
{
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(ring);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
intel_lrc_irq_handler(ring);
}
 
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 master_ctl)
{
1264,64 → 1317,54
irqreturn_t ret = IRQ_NONE;
 
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
if (tmp) {
I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(0), iir);
ret = IRQ_HANDLED;
 
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[RCS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[RCS]);
gen8_cs_irq_handler(&dev_priv->ring[RCS],
iir, GEN8_RCS_IRQ_SHIFT);
 
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[BCS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[BCS]);
gen8_cs_irq_handler(&dev_priv->ring[BCS],
iir, GEN8_BCS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
 
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
if (tmp) {
I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(1), iir);
ret = IRQ_HANDLED;
 
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[VCS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS]);
gen8_cs_irq_handler(&dev_priv->ring[VCS],
iir, GEN8_VCS1_IRQ_SHIFT);
 
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS2]);
gen8_cs_irq_handler(&dev_priv->ring[VCS2],
iir, GEN8_VCS2_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
 
if (master_ctl & GEN8_GT_VECS_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
if (tmp) {
I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(3), iir);
ret = IRQ_HANDLED;
 
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[VECS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VECS]);
gen8_cs_irq_handler(&dev_priv->ring[VECS],
iir, GEN8_VECS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
 
if (master_ctl & GEN8_GT_PM_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
if (iir & dev_priv->pm_rps_events) {
I915_WRITE_FW(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
iir & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
gen6_rps_irq_handler(dev_priv, tmp);
gen6_rps_irq_handler(dev_priv, iir);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
1593,7 → 1636,7
 
spin_lock(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
int reg;
i915_reg_t reg;
u32 mask, iir_bit = 0;
 
/*
1674,7 → 1717,7
*/
POSTING_READ(PORT_HOTPLUG_STAT);
 
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
if (hotplug_trigger) {
1709,6 → 1752,9
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
while (true) {
/* Find, clear, then process each source of interrupt */
 
1743,6 → 1789,8
}
 
out:
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
1756,6 → 1804,9
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
for (;;) {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
1786,6 → 1837,8
POSTING_READ(GEN8_MASTER_IRQ);
}
 
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
1795,8 → 1848,24
struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
/*
* Somehow the PCH doesn't seem to really ack the interrupt to the CPU
* unless we touch the hotplug register, even if hotplug_trigger is
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
PORTC_HOTPLUG_STATUS_MASK |
PORTB_HOTPLUG_STATUS_MASK;
dig_hotplug_reg &= ~mask;
}
 
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
 
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
1811,7 → 1880,6
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
if (hotplug_trigger)
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
 
if (pch_iir & SDE_AUDIO_POWER_MASK) {
1905,7 → 1973,6
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
if (hotplug_trigger)
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
 
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2102,6 → 2169,9
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
2160,6 → 2230,9
POSTING_READ(SDEIER);
}
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
2192,6 → 2265,9
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
if (INTEL_INFO(dev_priv)->gen >= 9)
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
2199,7 → 2275,7
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
return IRQ_NONE;
goto out;
 
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
 
2334,6 → 2410,9
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
 
out:
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
2393,6 → 2472,13
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
/*
* In most cases it's guaranteed that we get here with an RPM
* reference held, for example because there is a pending GPU
* request that won't finish until the reset is done. This
* isn't the case at least when we get here by doing a
* simulated reset via debugs, so get an RPM reference.
*/
intel_runtime_pm_get(dev_priv);
 
/*
2600,7 → 2686,7
DE_PIPE_VBLANK(pipe);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit);
ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
2625,10 → 2711,9
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
2655,7 → 2740,7
DE_PIPE_VBLANK(pipe);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, bit);
ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
2676,9 → 2761,7
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
2917,6 → 3000,13
if (!i915.enable_hangcheck)
return;
 
/*
* The hangcheck work is synced during runtime suspend, we don't
* require a wakeref. TODO: instead of disabling the asserts make
* sure that we hold a reference when this work is running.
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 
for_each_ring(ring, dev_priv, i) {
u64 acthd;
u32 seqno;
3387,7 → 3477,7
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
spin_lock_irq(&dev_priv->irq_lock);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
3787,13 → 3877,18
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
irqreturn_t ret;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
ret = IRQ_NONE;
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
goto out;
 
while (iir & ~flip_mask) {
/* Can't rely on pipestat interrupt bit in iir as it might
3806,7 → 3901,7
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/*
3842,8 → 3937,12
 
iir = new_iir;
}
ret = IRQ_HANDLED;
 
return IRQ_HANDLED;
out:
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
static void i8xx_irq_uninstall(struct drm_device * dev)
3974,6 → 4073,9
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
3989,7 → 4091,7
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/* Clear the PIPE*STAT regs before the IIR */
4056,6 → 4158,8
iir = new_iir;
} while (iir & ~flip_mask);
 
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
4195,6 → 4299,9
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
iir = I915_READ(IIR);
 
for (;;) {
4211,7 → 4318,7
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/*
4280,6 → 4387,8
iir = new_iir;
}
 
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
}
 
4323,9 → 4432,9
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
/drivers/video/drm/i915/i915_params.c
32,6 → 32,7
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
.enable_rc6 = 0,
.enable_dc = -1,
.enable_fbc = -1,
.enable_execlists = -1,
.enable_hangcheck = true,
82,6 → 83,11
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
 
module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
MODULE_PARM_DESC(enable_dc,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
 
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
114,7 → 120,7
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
 
module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
128,7 → 134,7
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
 
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
/drivers/video/drm/i915/i915_reg.h
25,14 → 25,43
#ifndef _I915_REG_H_
#define _I915_REG_H_
 
typedef struct {
uint32_t reg;
} i915_reg_t;
 
#define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
 
#define INVALID_MMIO_REG _MMIO(0)
 
static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
 
static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b)
{
return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b);
}
 
static inline bool i915_mmio_reg_valid(i915_reg_t reg)
{
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
 
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
#define _PLANE(plane, a, b) _PIPE(plane, a, b)
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
(port) == PORT_B ? (b) : (c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
 
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
105,7 → 134,7
#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
 
#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GRDOM_FULL (0<<1)
#define ILK_GRDOM_RENDER (1<<1)
#define ILK_GRDOM_MEDIA (3<<1)
112,7 → 141,7
#define ILK_GRDOM_MASK (3<<1)
#define ILK_GRDOM_RESET_ENABLE (1<<0)
 
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
#define GEN6_MBC_SNPCR_MASK (3<<21)
#define GEN6_MBC_SNPCR_MAX (0<<21)
120,10 → 149,10
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
 
#define VLV_G3DCTL 0x9024
#define VLV_GSCKGCTL 0x9028
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
 
#define GEN6_MBCTL 0x0907c
#define GEN6_MBCTL _MMIO(0x0907c)
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
130,21 → 159,21
#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
 
#define GEN6_GDRST 0x941c
#define GEN6_GDRST _MMIO(0x941c)
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
 
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220)
#define PP_DIR_DCLV_2G 0xffffffff
 
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8)
 
#define GEN8_R_PWR_CLK_STATE 0x20C8
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
#define GEN8_RPCS_S_CNT_SHIFT 15
157,7 → 186,7
#define GEN8_RPCS_EU_MIN_SHIFT 0
#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
 
#define GAM_ECOCHK 0x4090
#define GAM_ECOCHK _MMIO(0x4090)
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
#define ECOCHK_DIS_TLB (1<<8)
170,15 → 199,15
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
 
#define GAC_ECO_BITS 0x14090
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
#define ECOBITS_PPGTT_CACHE4B (0<<8)
 
#define GAB_CTL 0x24000
#define GAB_CTL _MMIO(0x24000)
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
 
#define GEN6_STOLEN_RESERVED 0x1082C0
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
200,6 → 229,7
#define VGA_ST01_MDA 0x3ba
#define VGA_ST01_CGA 0x3da
 
#define _VGA_MSR_WRITE _MMIO(0x3c2)
#define VGA_MSR_WRITE 0x3c2
#define VGA_MSR_READ 0x3cc
#define VGA_MSR_MEM_EN (1<<1)
377,10 → 407,12
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
#define MI_BATCH_RESOURCE_STREAMER (1<<10)
 
#define MI_PREDICATE_SRC0 (0x2400)
#define MI_PREDICATE_SRC1 (0x2408)
#define MI_PREDICATE_SRC0 _MMIO(0x2400)
#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
#define MI_PREDICATE_SRC1 _MMIO(0x2408)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
 
#define MI_PREDICATE_RESULT_2 (0x2214)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
#define LOWER_SLICE_DISABLED (0<<0)
 
509,49 → 541,61
/*
* Registers used only by the command parser
*/
#define BCS_SWCTRL 0x22200
#define BCS_SWCTRL _MMIO(0x22200)
 
#define GPGPU_THREADS_DISPATCHED 0x2290
#define HS_INVOCATION_COUNT 0x2300
#define DS_INVOCATION_COUNT 0x2308
#define IA_VERTICES_COUNT 0x2310
#define IA_PRIMITIVES_COUNT 0x2318
#define VS_INVOCATION_COUNT 0x2320
#define GS_INVOCATION_COUNT 0x2328
#define GS_PRIMITIVES_COUNT 0x2330
#define CL_INVOCATION_COUNT 0x2338
#define CL_PRIMITIVES_COUNT 0x2340
#define PS_INVOCATION_COUNT 0x2348
#define PS_DEPTH_COUNT 0x2350
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300)
#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
#define DS_INVOCATION_COUNT _MMIO(0x2308)
#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
#define IA_VERTICES_COUNT _MMIO(0x2310)
#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
#define VS_INVOCATION_COUNT _MMIO(0x2320)
#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
#define GS_INVOCATION_COUNT _MMIO(0x2328)
#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
#define CL_INVOCATION_COUNT _MMIO(0x2338)
#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
#define PS_INVOCATION_COUNT _MMIO(0x2348)
#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
#define PS_DEPTH_COUNT _MMIO(0x2350)
#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
 
/* There are the 4 64-bit counter registers, one for each stream output */
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
 
#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
 
#define GEN7_3DPRIM_END_OFFSET 0x2420
#define GEN7_3DPRIM_START_VERTEX 0x2430
#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
#define GEN7_3DPRIM_START_INSTANCE 0x243C
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C)
#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
 
#define GEN7_GPGPU_DISPATCHDIMX 0x2500
#define GEN7_GPGPU_DISPATCHDIMY 0x2504
#define GEN7_GPGPU_DISPATCHDIMZ 0x2508
#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
 
#define OACONTROL 0x2360
#define OACONTROL _MMIO(0x2360)
 
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
_GEN7_PIPEA_DE_LOAD_SL, \
_GEN7_PIPEB_DE_LOAD_SL)
#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
 
/*
* Reset registers
*/
#define DEBUG_RESET_I830 0x6070
#define DEBUG_RESET_I830 _MMIO(0x6070)
#define DEBUG_RESET_FULL (1<<7)
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
559,7 → 603,7
/*
* IOSF sideband
*/
#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100)
#define VLV_IOSF_DOORBELL_REQ _MMIO(VLV_DISPLAY_BASE + 0x2100)
#define IOSF_DEVFN_SHIFT 24
#define IOSF_OPCODE_SHIFT 16
#define IOSF_PORT_SHIFT 8
576,8 → 620,8
#define IOSF_PORT_CCU 0xA9
#define IOSF_PORT_GPS_CORE 0x48
#define IOSF_PORT_FLISDSI 0x1B
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
 
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
609,6 → 653,7
 
/* See the PUNIT HAS v0.8 for the below bits */
enum punit_power_well {
/* These numbers are fixed and must match the position of the pw bits */
PUNIT_POWER_WELL_RENDER = 0,
PUNIT_POWER_WELL_MEDIA = 1,
PUNIT_POWER_WELL_DISP2D = 3,
621,10 → 666,12
PUNIT_POWER_WELL_DPIO_RX1 = 11,
PUNIT_POWER_WELL_DPIO_CMN_D = 12,
 
PUNIT_POWER_WELL_NUM,
/* Not actual bit groups. Used as IDs for lookup_power_well() */
PUNIT_POWER_WELL_ALWAYS_ON,
};
 
enum skl_disp_power_wells {
/* These numbers are fixed and must match the position of the pw bits */
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_DDI_A_E,
SKL_DISP_PW_DDI_B,
632,6 → 679,10
SKL_DISP_PW_DDI_D,
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
 
/* Not actual bit groups. Used as IDs for lookup_power_well() */
SKL_DISP_PW_ALWAYS_ON,
SKL_DISP_PW_DC_OFF,
};
 
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
804,8 → 855,8
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
* digital port D (CHV) or port A (BXT).
*/
/*
*
*
* Dual channel PHY (VLV/CHV/BXT)
* ---------------------------------
* | CH0 | CH1 |
832,7 → 883,7
*/
#define DPIO_DEVFN 0
 
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
1185,9 → 1236,9
#define DPIO_UPAR_SHIFT 30
 
/* BXT PHY registers */
#define _BXT_PHY(phy, a, b) _PIPE((phy), (a), (b))
#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
 
#define BXT_P_CR_GT_DISP_PWRON 0x138090
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
 
#define _PHY_CTL_FAMILY_EDP 0x64C80
1203,7 → 1254,7
#define PORT_PLL_ENABLE (1 << 31)
#define PORT_PLL_LOCK (1 << 30)
#define PORT_PLL_REF_SEL (1 << 27)
#define BXT_PORT_PLL_ENABLE(port) _PORT(port, _PORT_PLL_A, _PORT_PLL_B)
#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
 
#define _PORT_PLL_EBB_0_A 0x162034
#define _PORT_PLL_EBB_0_B 0x6C034
1214,7 → 1265,7
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \
#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
 
1223,7 → 1274,7
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
#define BXT_PORT_PLL_EBB_4(port) _PORT3(port, _PORT_PLL_EBB_4_A, \
#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
_PORT_PLL_EBB_4_B, \
_PORT_PLL_EBB_4_C)
 
1259,7 → 1310,7
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
#define BXT_PORT_PLL(port, idx) (_PORT_PLL_BASE(port) + (idx) * 4)
#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
 
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
1297,7 → 1348,7
_PORT_CL1CM_DW30_A)
 
/* Defined for PHY0 only */
#define BXT_PORT_CL2CM_DW6_BC 0x6C358
#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
 
/* BXT PHY Ref registers */
1337,10 → 1388,10
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
#define BXT_PORT_PCS_DW10_LN01(port) _PORT3(port, _PORT_PCS_DW10_LN01_A, \
#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
_PORT_PCS_DW10_LN01_B, \
_PORT_PCS_DW10_LN01_C)
#define BXT_PORT_PCS_DW10_GRP(port) _PORT3(port, _PORT_PCS_DW10_GRP_A, \
#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
_PORT_PCS_DW10_GRP_B, \
_PORT_PCS_DW10_GRP_C)
#define TX2_SWING_CALC_INIT (1 << 31)
1357,13 → 1408,13
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
#define BXT_PORT_PCS_DW12_LN01(port) _PORT3(port, _PORT_PCS_DW12_LN01_A, \
#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
_PORT_PCS_DW12_LN01_B, \
_PORT_PCS_DW12_LN01_C)
#define BXT_PORT_PCS_DW12_LN23(port) _PORT3(port, _PORT_PCS_DW12_LN23_A, \
#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
_PORT_PCS_DW12_LN23_B, \
_PORT_PCS_DW12_LN23_C)
#define BXT_PORT_PCS_DW12_GRP(port) _PORT3(port, _PORT_PCS_DW12_GRP_A, \
#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
_PORT_PCS_DW12_GRP_B, \
_PORT_PCS_DW12_GRP_C)
 
1377,10 → 1428,10
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
#define BXT_PORT_TX_DW2_GRP(port) _PORT3(port, _PORT_TX_DW2_GRP_A, \
#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
_PORT_TX_DW2_GRP_B, \
_PORT_TX_DW2_GRP_C)
#define BXT_PORT_TX_DW2_LN0(port) _PORT3(port, _PORT_TX_DW2_LN0_A, \
#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
_PORT_TX_DW2_LN0_B, \
_PORT_TX_DW2_LN0_C)
#define MARGIN_000_SHIFT 16
1394,10 → 1445,10
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
#define BXT_PORT_TX_DW3_GRP(port) _PORT3(port, _PORT_TX_DW3_GRP_A, \
#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
_PORT_TX_DW3_GRP_B, \
_PORT_TX_DW3_GRP_C)
#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \
#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
_PORT_TX_DW3_LN0_B, \
_PORT_TX_DW3_LN0_C)
#define SCALE_DCOMP_METHOD (1 << 26)
1409,10 → 1460,10
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
#define BXT_PORT_TX_DW4_LN0(port) _PORT3(port, _PORT_TX_DW4_LN0_A, \
#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
_PORT_TX_DW4_LN0_B, \
_PORT_TX_DW4_LN0_C)
#define BXT_PORT_TX_DW4_GRP(port) _PORT3(port, _PORT_TX_DW4_GRP_A, \
#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
_PORT_TX_DW4_GRP_B, \
_PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
1423,17 → 1474,17
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
#define BXT_PORT_TX_DW14_LN(port, lane) (_PORT3((port), _PORT_TX_DW14_LN0_A, \
#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
_PORT_TX_DW14_LN0_B, \
_PORT_TX_DW14_LN0_C) + \
_BXT_LANE_OFFSET(lane))
 
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 0x4F074
#define UAIMI_SPR1 _MMIO(0x4F074)
/* SKL VccIO mask */
#define SKL_VCCIO_MASK 0x1
/* SKL balance leg register */
#define DISPIO_CR_TX_BMU_CR0 0x6C00C
#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
/* I_boost values */
#define BALANCE_LEG_SHIFT(port) (8+3*(port))
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
1450,7 → 1501,7
* [0-15] @ 0x100000 gen6,vlv,chv
* [0-31] @ 0x100000 gen7+
*/
#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
#define FENCE_REG(i) _MMIO(0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
#define I830_FENCE_START_MASK 0x07f80000
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
1463,21 → 1514,21
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
 
#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8)
#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4)
#define FENCE_REG_965_LO(i) _MMIO(0x03000 + (i) * 8)
#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
#define I965_FENCE_REG_VALID (1<<0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
 
#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8)
#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4)
#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
#define FENCE_REG_GEN6_HI(i) _MMIO(0x100000 + (i) * 8 + 4)
#define GEN6_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
 
 
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL _MMIO(0x101000)
#define TILECTL_SWZCTL (1 << 0)
#define TILECTL_TLBPF (1 << 1)
#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
1486,10 → 1537,10
/*
* Instruction and interrupt control regs
*/
#define PGTBL_CTL 0x02020
#define PGTBL_CTL _MMIO(0x02020)
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define PGTBL_ER _MMIO(0x02024)
#define PRB0_BASE (0x2030-0x30)
#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
#define PRB2_BASE (0x2050-0x30) /* gen3 */
1503,13 → 1554,13
#define GEN8_BSD2_RING_BASE 0x1c000
#define VEBOX_RING_BASE 0x1a000
#define BLT_RING_BASE 0x22000
#define RING_TAIL(base) ((base)+0x30)
#define RING_HEAD(base) ((base)+0x34)
#define RING_START(base) ((base)+0x38)
#define RING_CTL(base) ((base)+0x3c)
#define RING_SYNC_0(base) ((base)+0x40)
#define RING_SYNC_1(base) ((base)+0x44)
#define RING_SYNC_2(base) ((base)+0x48)
#define RING_TAIL(base) _MMIO((base)+0x30)
#define RING_HEAD(base) _MMIO((base)+0x34)
#define RING_START(base) _MMIO((base)+0x38)
#define RING_CTL(base) _MMIO((base)+0x3c)
#define RING_SYNC_0(base) _MMIO((base)+0x40)
#define RING_SYNC_1(base) _MMIO((base)+0x44)
#define RING_SYNC_2(base) _MMIO((base)+0x48)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
1522,51 → 1573,52
#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_RESET_CTL(base) ((base)+0xd0)
#define GEN6_NOSYNC INVALID_MMIO_REG
#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
#define RING_HWS_PGA(base) _MMIO((base)+0x80)
#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
 
#define HSW_GTT_CACHE_EN 0x4024
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK 0x4028
#define GEN7_GFX_PRIO_CTRL 0x402C
#define ARB_MODE 0x4030
#define GEN7_WR_WATERMARK _MMIO(0x4028)
#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
#define ARB_MODE _MMIO(0x4030)
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define GEN7_GFX_PEND_TLB0 0x4034
#define GEN7_GFX_PEND_TLB1 0x4038
#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
#define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4)
#define GEN7_LRA_LIMITS(i) _MMIO(0x403C + (i) * 4)
#define GEN7_LRA_LIMITS_REG_NUM 13
#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
#define GEN7_GFX_MAX_REQ_COUNT 0x4074
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
 
#define GAMTARBMODE 0x04a08
#define GAMTARBMODE _MMIO(0x04a08)
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1<<0)
#define DONE_REG 0x40b0
#define GEN8_PRIVATE_PAT_LO 0x40e0
#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4)
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
#define RING_ACTHD(base) ((base)+0x74)
#define RING_ACTHD_UDW(base) ((base)+0x5c)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define RING_HWSTAM(base) ((base)+0x98)
#define RING_TIMESTAMP(base) ((base)+0x358)
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
#define RING_ACTHD(base) _MMIO((base)+0x74)
#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
#define RING_NOPID(base) _MMIO((base)+0x94)
#define RING_IMR(base) _MMIO((base)+0xa8)
#define RING_HWSTAM(base) _MMIO((base)+0x98)
#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
1583,57 → 1635,65
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
 
#define GEN7_TLB_RD_ADDR 0x4700
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
 
#if 0
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
#define PRB0_START 0x02038
#define PRB0_CTL 0x0203c
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
#define PRB0_START _MMIO(0x2038)
#define PRB0_CTL _MMIO(0x203c)
#define PRB1_TAIL _MMIO(0x2040) /* 915+ only */
#define PRB1_HEAD _MMIO(0x2044) /* 915+ only */
#define PRB1_START _MMIO(0x2048) /* 915+ only */
#define PRB1_CTL _MMIO(0x204c) /* 915+ only */
#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define GEN7_SC_INSTDONE 0x07100
#define GEN7_SAMPLER_INSTDONE 0x0e160
#define GEN7_ROW_INSTDONE 0x0e164
#define IPEIR_I965 _MMIO(0x2064)
#define IPEHR_I965 _MMIO(0x2068)
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
#define RING_INSTDONE(base) ((base)+0x6c)
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
#define RING_INSTPM(base) ((base)+0xc0)
#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */
#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
#define RING_INSTDONE(base) _MMIO((base)+0x6c)
#define RING_INSTPS(base) _MMIO((base)+0x70)
#define RING_DMA_FADD(base) _MMIO((base)+0x78)
#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
#define RING_INSTPM(base) _MMIO((base)+0xc0)
#define RING_MI_MODE(base) _MMIO((base)+0x9c)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
#define HWS_PGA _MMIO(0x2080)
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
#define PWRCTX_EN (1<<0)
#define IPEIR 0x02088
#define IPEHR 0x0208c
#define GEN2_INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
#define RING_BBSTATE(base) ((base)+0x110)
#define RING_BBADDR(base) ((base)+0x140)
#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */
#define IPEIR _MMIO(0x2088)
#define IPEHR _MMIO(0x208c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
#define DMA_FADD_I8XX _MMIO(0x20d0)
#define RING_BBSTATE(base) _MMIO((base)+0x110)
#define RING_BB_PPGTT (1 << 5)
#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
#define RING_BBADDR(base) _MMIO((base)+0x140)
#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
 
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1<<31)
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
1645,13 → 1705,13
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
 
#define GEN8_FAULT_TLB_DATA0 0x04b10
#define GEN8_FAULT_TLB_DATA1 0x04b14
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
 
#define FPGA_DBG 0x42300
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
 
#define DERRMR 0x44050
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
1675,21 → 1735,21
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
#define _3D_CHICKEN _MMIO(0x2084)
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
#define _3D_CHICKEN2 _MMIO(0x208c)
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN3 _MMIO(0x2090)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
 
#define MI_MODE 0x0209c
#define MI_MODE _MMIO(0x209c)
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
1696,8 → 1756,8
# define MODE_IDLE (1 << 9)
# define STOP_RING (1 << 8)
 
#define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008
#define GEN6_GT_MODE _MMIO(0x20d0)
#define GEN7_GT_MODE _MMIO(0x7008)
#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
1707,9 → 1767,9
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
 
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_INTERRUPT_STEERING (1<<14)
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
1727,29 → 1787,29
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
 
#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
#define IER _MMIO(0x20a0)
#define IIR _MMIO(0x20a4)
#define IMR _MMIO(0x20a8)
#define ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1<<22)
#define GCFG_DIS (1<<8)
#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
 
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
#define GM45_ERROR_PAGE_TABLE (1<<5)
#define GM45_ERROR_MEM_PRIV (1<<4)
#define I915_ERROR_PAGE_TABLE (1<<4)
1756,7 → 1816,7
#define GM45_ERROR_CP_PRIV (1<<3)
#define I915_ERROR_MEMORY_REFRESH (1<<1)
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM _MMIO(0x20c0)
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
1764,14 → 1824,14
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1<<9)
#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define MEM_MODE 0x020cc
#define ACTHD _MMIO(0x20c8)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
#define FW_BLC_SELF_EN_MASK (1<<31)
#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
#define FW_BLC_SELF_EN (1<<15) /* 945 only */
1779,7 → 1839,7
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
 
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
1843,11 → 1903,11
#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
 
#define MI_STATE 0x020e4 /* gen2 only */
#define MI_STATE _MMIO(0x20e4) /* gen2 only */
#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
 
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
1856,32 → 1916,32
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECOSKPD _MMIO(0x21d0)
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
 
#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
#define RC_OP_FLUSH_ENABLE (1<<0)
#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
 
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
 
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
 
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
1893,7 → 1953,7
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
 
#define GEN8_FUSE2 0x9120
#define GEN8_FUSE2 _MMIO(0x9120)
#define GEN8_F2_SS_DIS_SHIFT 21
#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
#define GEN8_F2_S_ENA_SHIFT 25
1902,22 → 1962,22
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
 
#define GEN8_EU_DISABLE0 0x9134
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
 
#define GEN8_EU_DISABLE1 0x9138
#define GEN8_EU_DISABLE1 _MMIO(0x9138)
#define GEN8_EU_DIS1_S1_MASK 0xffff
#define GEN8_EU_DIS1_S2_SHIFT 16
#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
 
#define GEN8_EU_DISABLE2 0x913c
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
#define GEN8_EU_DIS2_S2_MASK 0xff
 
#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4)
#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
 
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
1995,9 → 2055,9
#define I915_ASLE_INTERRUPT (1<<0)
#define I915_BSD_USER_INTERRUPT (1<<25)
 
#define GEN6_BSD_RNCID 0x12198
#define GEN6_BSD_RNCID _MMIO(0x12198)
 
#define GEN7_FF_THREAD_MODE 0x20a0
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
2018,9 → 2078,9
* Framebuffer compression (915+ only)
*/
 
#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
#define FBC_LL_BASE 0x03204 /* 4k page aligned */
#define FBC_CONTROL 0x03208
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
#define FBC_CTL_EN (1<<31)
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
2028,14 → 2088,14
#define FBC_CTL_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND 0x0320c
#define FBC_COMMAND _MMIO(0x320c)
#define FBC_CMD_COMPRESS (1<<0)
#define FBC_STATUS 0x03210
#define FBC_STATUS _MMIO(0x3210)
#define FBC_STAT_COMPRESSING (1<<31)
#define FBC_STAT_COMPRESSED (1<<30)
#define FBC_STAT_MODIFIED (1<<29)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 0x03214
#define FBC_CONTROL2 _MMIO(0x3214)
#define FBC_CTL_FENCE_DBL (0<<4)
#define FBC_CTL_IDLE_IMM (0<<2)
#define FBC_CTL_IDLE_FULL (1<<2)
2043,17 → 2103,17
#define FBC_CTL_IDLE_DEBUG (3<<2)
#define FBC_CTL_CPU_FENCE (1<<1)
#define FBC_CTL_PLANE(plane) ((plane)<<0)
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG(i) (0x03300 + (i) * 4)
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
 
#define FBC_STATUS2 0x43214
#define FBC_STATUS2 _MMIO(0x43214)
#define FBC_COMPRESSION_MASK 0x7ff
 
#define FBC_LL_SIZE (1536)
 
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE 0x3200
#define DPFC_CONTROL 0x3208
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
#define DPFC_CTL_EN (1<<31)
#define DPFC_CTL_PLANE(plane) ((plane)<<30)
#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
2064,37 → 2124,37
#define DPFC_CTL_LIMIT_1X (0<<6)
#define DPFC_CTL_LIMIT_2X (1<<6)
#define DPFC_CTL_LIMIT_4X (2<<6)
#define DPFC_RECOMP_CTL 0x320c
#define DPFC_RECOMP_CTL _MMIO(0x320c)
#define DPFC_RECOMP_STALL_EN (1<<27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
#define DPFC_STATUS 0x3210
#define DPFC_STATUS _MMIO(0x3210)
#define DPFC_INVAL_SEG_SHIFT (16)
#define DPFC_INVAL_SEG_MASK (0x07ff0000)
#define DPFC_COMP_SEG_SHIFT (0)
#define DPFC_COMP_SEG_MASK (0x000003ff)
#define DPFC_STATUS2 0x3214
#define DPFC_FENCE_YOFF 0x3218
#define DPFC_CHICKEN 0x3224
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
#define DPFC_HT_MODIFY (1<<31)
 
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE 0x43200
#define ILK_DPFC_CONTROL 0x43208
#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define ILK_DPFC_CONTROL _MMIO(0x43208)
#define FBC_CTL_FALSE_COLOR (1<<10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL 0x4320c
#define ILK_DPFC_STATUS 0x43210
#define ILK_DPFC_FENCE_YOFF 0x43218
#define ILK_DPFC_CHICKEN 0x43224
#define ILK_FBC_RT_BASE 0x2128
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
 
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS (1<<22)
#define ILK_PABSTRETCH_DIS (1<<21)
 
2104,17 → 2164,17
*
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA 0x100100
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
#define SNB_CPU_FENCE_ENABLE (1<<29)
#define DPFC_CPU_FENCE_OFFSET 0x100104
#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
 
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE 0x7020
#define IVB_FBC_RT_BASE _MMIO(0x7020)
 
#define IPS_CTL 0x43408
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE (1 << 31)
 
#define MSG_FBC_REND_STATE 0x50380
#define MSG_FBC_REND_STATE _MMIO(0x50380)
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
 
2121,14 → 2181,14
/*
* GPIO regs
*/
#define GPIOA 0x5010
#define GPIOB 0x5014
#define GPIOC 0x5018
#define GPIOD 0x501c
#define GPIOE 0x5020
#define GPIOF 0x5024
#define GPIOG 0x5028
#define GPIOH 0x502c
#define GPIOA _MMIO(0x5010)
#define GPIOB _MMIO(0x5014)
#define GPIOC _MMIO(0x5018)
#define GPIOD _MMIO(0x501c)
#define GPIOE _MMIO(0x5020)
#define GPIOF _MMIO(0x5024)
#define GPIOG _MMIO(0x5028)
#define GPIOH _MMIO(0x502c)
# define GPIO_CLOCK_DIR_MASK (1 << 0)
# define GPIO_CLOCK_DIR_IN (0 << 1)
# define GPIO_CLOCK_DIR_OUT (1 << 1)
2144,7 → 2204,7
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
 
#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
#define GMBUS_RATE_100KHZ (0<<8)
#define GMBUS_RATE_50KHZ (1<<8)
#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
2163,7 → 2223,7
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_NUM_PINS 7 /* including 0 */
#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
#define GMBUS_ENT (1<<29) /* enable timeout */
2177,7 → 2237,7
#define GMBUS_SLAVE_ADDR_SHIFT 1
#define GMBUS_SLAVE_READ (1<<0)
#define GMBUS_SLAVE_WRITE (0<<0)
#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */
#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
#define GMBUS_INUSE (1<<15)
#define GMBUS_HW_WAIT_PHASE (1<<14)
#define GMBUS_STALL_TIMEOUT (1<<13)
2185,14 → 2245,14
#define GMBUS_HW_RDY (1<<11)
#define GMBUS_SATOER (1<<10)
#define GMBUS_ACTIVE (1<<9)
#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
#define GMBUS_NAK_EN (1<<3)
#define GMBUS_IDLE_EN (1<<2)
#define GMBUS_HW_WAIT_EN (1<<1)
#define GMBUS_HW_RDY_EN (1<<0)
#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */
#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
#define GMBUS_2BYTE_INDEX_EN (1<<31)
 
/*
2201,11 → 2261,11
#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
 
#define VGA0 0x6000
#define VGA1 0x6004
#define VGA_PD 0x6010
#define VGA0 _MMIO(0x6000)
#define VGA1 _MMIO(0x6004)
#define VGA_PD _MMIO(0x6010)
#define VGA0_PD_P2_DIV_4 (1 << 7)
#define VGA0_PD_P1_DIV_2 (1 << 5)
#define VGA0_PD_P1_SHIFT 0
2241,9 → 2301,9
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
 
/* Additional CHV pll/phy registers */
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
2254,7 → 2314,7
#define PHY_CH_DEEP_PSR 0x7
#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
2300,7 → 2360,7
#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
 
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
2339,12 → 2399,12
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
 
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
#define _FPB1 0x0604c
#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
#define _FPA0 0x6040
#define _FPA1 0x6044
#define _FPB0 0x6048
#define _FPB1 0x604c
#define FP0(pipe) _MMIO_PIPE(pipe, _FPA0, _FPB0)
#define FP1(pipe) _MMIO_PIPE(pipe, _FPA1, _FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
2353,7 → 2413,7
#define FP_M2_DIV_MASK 0x0000003f
#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
2364,12 → 2424,12
#define DPLLA_TEST_N_BYPASS (1 << 3)
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE 0x6104
#define D_STATE _MMIO(0x6104)
#define DSTATE_GFX_RESET_I830 (1<<6)
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200)
#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
2408,7 → 2468,7
# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
 
#define RENCLK_GATE_D1 0x6204
#define RENCLK_GATE_D1 _MMIO(0x6204)
# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
2472,28 → 2532,28
# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
 
#define RENCLK_GATE_D2 0x6208
#define RENCLK_GATE_D2 _MMIO(0x6208)
#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
 
#define VDECCLK_GATE_D 0x620C /* g4x only */
#define VDECCLK_GATE_D _MMIO(0x620C) /* g4x only */
#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
 
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
#define RAMCLK_GATE_D _MMIO(0x6210) /* CRL only */
#define DEUC _MMIO(0x6214) /* CRL only */
 
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
 
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
 
#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
 
#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C)
#define GCI_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x650C)
#define PFI_CREDIT_63 (9 << 28) /* chv only */
#define PFI_CREDIT_31 (8 << 28) /* chv only */
#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
2500,7 → 2560,7
#define PFI_CREDIT_RESEND (1 << 27)
#define VGA_FAST_MODE_DISABLE (1 << 14)
 
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
#define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510)
 
/*
* Palette regs
2508,7 → 2568,7
#define PALETTE_A_OFFSET 0xa000
#define PALETTE_B_OFFSET 0xa800
#define CHV_PALETTE_C_OFFSET 0xc000
#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \
#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
dev_priv->info.display_mmio_offset + (i) * 4)
 
/* MCH MMIO space */
2527,16 → 2587,16
 
#define MCHBAR_MIRROR_BASE_SNB 0x140000
 
#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34)
#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48)
#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34)
#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
 
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
/* 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200)
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
2543,21 → 2603,21
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
#define DCC2 0x10204
#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204)
#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
 
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8)
#define CSHRDDR3CTL_DDR3 (1 << 2)
 
/* 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
#define C0DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x206)
#define C1DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x606)
 
/* snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define MAD_DIMM_ECC_MASK (0x3 << 24)
#define MAD_DIMM_ECC_OFF (0x0 << 24)
#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
2577,14 → 2637,14
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
 
/* snb MCH registers for priority tuning */
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
 
#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
 
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
2600,26 → 2660,26
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
 
#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38)
#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f)
#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38)
#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
 
#define TSC1 0x11001
#define TSC1 _MMIO(0x11001)
#define TSE (1<<0)
#define TR1 0x11006
#define TSFS 0x11020
#define TR1 _MMIO(0x11006)
#define TSFS _MMIO(0x11020)
#define TSFS_SLOPE_MASK 0x0000ff00
#define TSFS_SLOPE_SHIFT 8
#define TSFS_INTR_MASK 0x000000ff
 
#define CRSTANDVID 0x11100
#define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define CRSTANDVID _MMIO(0x11100)
#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define PXVFREQ_PX_MASK 0x7f000000
#define PXVFREQ_PX_SHIFT 24
#define VIDFREQ_BASE 0x11110
#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
#define VIDFREQ2 0x11114
#define VIDFREQ3 0x11118
#define VIDFREQ4 0x1111c
#define VIDFREQ_BASE _MMIO(0x11110)
#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
#define VIDFREQ2 _MMIO(0x11114)
#define VIDFREQ3 _MMIO(0x11118)
#define VIDFREQ4 _MMIO(0x1111c)
#define VIDFREQ_P0_MASK 0x1f000000
#define VIDFREQ_P0_SHIFT 24
#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
2631,8 → 2691,8
#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
#define VIDFREQ_P1_CSCLK_SHIFT 4
#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
#define INTTOEXT_BASE_ILK 0x11300
#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
#define INTTOEXT_BASE_ILK _MMIO(0x11300)
#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
#define INTTOEXT_MAP3_SHIFT 24
#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
#define INTTOEXT_MAP2_SHIFT 16
2641,7 → 2701,7
#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
#define INTTOEXT_MAP0_SHIFT 0
#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
#define MEMSWCTL 0x11170 /* Ironlake only */
#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
#define MEMCTL_CMD_MASK 0xe000
#define MEMCTL_CMD_SHIFT 13
#define MEMCTL_CMD_RCLK_OFF 0
2656,8 → 2716,8
#define MEMCTL_FREQ_SHIFT 8
#define MEMCTL_SFCAVM (1<<7)
#define MEMCTL_TGT_VID_MASK 0x007f
#define MEMIHYST 0x1117c
#define MEMINTREN 0x11180 /* 16 bits */
#define MEMIHYST _MMIO(0x1117c)
#define MEMINTREN _MMIO(0x11180) /* 16 bits */
#define MEMINT_RSEXIT_EN (1<<8)
#define MEMINT_CX_SUPR_EN (1<<7)
#define MEMINT_CONT_BUSY_EN (1<<6)
2667,7 → 2727,7
#define MEMINT_UP_EVAL_EN (1<<2)
#define MEMINT_DOWN_EVAL_EN (1<<1)
#define MEMINT_SW_CMD_EN (1<<0)
#define MEMINTRSTR 0x11182 /* 16 bits */
#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
#define MEM_RSEXIT_MASK 0xc000
#define MEM_RSEXIT_SHIFT 14
#define MEM_CONT_BUSY_MASK 0x3000
2687,7 → 2747,7
#define MEM_INT_STEER_CMR 1
#define MEM_INT_STEER_SMI 2
#define MEM_INT_STEER_SCI 3
#define MEMINTRSTS 0x11184
#define MEMINTRSTS _MMIO(0x11184)
#define MEMINT_RSEXIT (1<<7)
#define MEMINT_CONT_BUSY (1<<6)
#define MEMINT_AVG_BUSY (1<<5)
2696,7 → 2756,7
#define MEMINT_UP_EVAL (1<<2)
#define MEMINT_DOWN_EVAL (1<<1)
#define MEMINT_SW_CMD (1<<0)
#define MEMMODECTL 0x11190
#define MEMMODECTL _MMIO(0x11190)
#define MEMMODE_BOOST_EN (1<<31)
#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
#define MEMMODE_BOOST_FREQ_SHIFT 24
2713,8 → 2773,8
#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
#define MEMMODE_FMAX_SHIFT 4
#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
#define RCBMAXAVG 0x1119c
#define MEMSWCTL2 0x1119e /* Cantiga only */
#define RCBMAXAVG _MMIO(0x1119c)
#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
#define SWMEMCMD_RENDER_OFF (0 << 13)
#define SWMEMCMD_RENDER_ON (1 << 13)
#define SWMEMCMD_SWFREQ (2 << 13)
2726,11 → 2786,11
#define SWFREQ_MASK 0x0380 /* P0-7 */
#define SWFREQ_SHIFT 7
#define TARVID_MASK 0x001f
#define MEMSTAT_CTG 0x111a0
#define RCBMINAVG 0x111a0
#define RCUPEI 0x111b0
#define RCDNEI 0x111b4
#define RSTDBYCTL 0x111b8
#define MEMSTAT_CTG _MMIO(0x111a0)
#define RCBMINAVG _MMIO(0x111a0)
#define RCUPEI _MMIO(0x111b0)
#define RCDNEI _MMIO(0x111b4)
#define RSTDBYCTL _MMIO(0x111b8)
#define RS1EN (1<<31)
#define RS2EN (1<<30)
#define RS3EN (1<<29)
2774,10 → 2834,10
#define RS_CSTATE_C367_RS2 (3<<4)
#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
#define VIDCTL 0x111c0
#define VIDSTS 0x111c8
#define VIDSTART 0x111cc /* 8 bits */
#define MEMSTAT_ILK 0x111f8
#define VIDCTL _MMIO(0x111c0)
#define VIDSTS _MMIO(0x111c8)
#define VIDSTART _MMIO(0x111cc) /* 8 bits */
#define MEMSTAT_ILK _MMIO(0x111f8)
#define MEMSTAT_VID_MASK 0x7f00
#define MEMSTAT_VID_SHIFT 8
#define MEMSTAT_PSTATE_MASK 0x00f8
2788,55 → 2848,55
#define MEMSTAT_SRC_CTL_TRB 1
#define MEMSTAT_SRC_CTL_THM 2
#define MEMSTAT_SRC_CTL_STDBY 3
#define RCPREVBSYTUPAVG 0x113b8
#define RCPREVBSYTDNAVG 0x113bc
#define PMMISC 0x11214
#define RCPREVBSYTUPAVG _MMIO(0x113b8)
#define RCPREVBSYTDNAVG _MMIO(0x113bc)
#define PMMISC _MMIO(0x11214)
#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
#define SDEW 0x1124c
#define CSIEW0 0x11250
#define CSIEW1 0x11254
#define CSIEW2 0x11258
#define PEW(i) (0x1125c + (i) * 4) /* 5 registers */
#define DEW(i) (0x11270 + (i) * 4) /* 3 registers */
#define MCHAFE 0x112c0
#define CSIEC 0x112e0
#define DMIEC 0x112e4
#define DDREC 0x112e8
#define PEG0EC 0x112ec
#define PEG1EC 0x112f0
#define GFXEC 0x112f4
#define RPPREVBSYTUPAVG 0x113b8
#define RPPREVBSYTDNAVG 0x113bc
#define ECR 0x11600
#define SDEW _MMIO(0x1124c)
#define CSIEW0 _MMIO(0x11250)
#define CSIEW1 _MMIO(0x11254)
#define CSIEW2 _MMIO(0x11258)
#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
#define MCHAFE _MMIO(0x112c0)
#define CSIEC _MMIO(0x112e0)
#define DMIEC _MMIO(0x112e4)
#define DDREC _MMIO(0x112e8)
#define PEG0EC _MMIO(0x112ec)
#define PEG1EC _MMIO(0x112f0)
#define GFXEC _MMIO(0x112f4)
#define RPPREVBSYTUPAVG _MMIO(0x113b8)
#define RPPREVBSYTDNAVG _MMIO(0x113bc)
#define ECR _MMIO(0x11600)
#define ECR_GPFE (1<<31)
#define ECR_IMONE (1<<30)
#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
#define OGW0 0x11608
#define OGW1 0x1160c
#define EG0 0x11610
#define EG1 0x11614
#define EG2 0x11618
#define EG3 0x1161c
#define EG4 0x11620
#define EG5 0x11624
#define EG6 0x11628
#define EG7 0x1162c
#define PXW(i) (0x11664 + (i) * 4) /* 4 registers */
#define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */
#define LCFUSE02 0x116c0
#define OGW0 _MMIO(0x11608)
#define OGW1 _MMIO(0x1160c)
#define EG0 _MMIO(0x11610)
#define EG1 _MMIO(0x11614)
#define EG2 _MMIO(0x11618)
#define EG3 _MMIO(0x1161c)
#define EG4 _MMIO(0x11620)
#define EG5 _MMIO(0x11624)
#define EG6 _MMIO(0x11628)
#define EG7 _MMIO(0x1162c)
#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
#define LCFUSE02 _MMIO(0x116c0)
#define LCFUSE_HIV_MASK 0x000000ff
#define CSIPLL0 0x12c10
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
#define CSIPLL0 _MMIO(0x12c10)
#define DDRMPLL1 _MMIO(0X12c20)
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
 
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP 0x138170
#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
 
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
2857,7 → 2917,7
/*
* Logical Context regs
*/
#define CCID 0x2180
#define CCID _MMIO(0x2180)
#define CCID_EN (1<<0)
/*
* Notes on SNB/IVB/VLV context size:
2872,7 → 2932,7
* - GT1 size just indicates how much of render context
* doesn't need saving on GT1
*/
#define CXT_SIZE 0x21a0
#define CXT_SIZE _MMIO(0x21a0)
#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
2881,7 → 2941,7
#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CXT_SIZE 0x21a8
#define GEN7_CXT_SIZE _MMIO(0x21a8)
#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
2901,8 → 2961,8
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
 
#define CHV_CLK_CTL1 0x101100
#define VLV_CLK_CTL2 0x101104
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
 
/*
2909,17 → 2969,24
* Overlay regs
*/
 
#define OVADD 0x30000
#define DOVSTA 0x30008
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
#define OC_BUF (0x3<<20)
#define OGAMC5 0x30010
#define OGAMC4 0x30014
#define OGAMC3 0x30018
#define OGAMC2 0x3001c
#define OGAMC1 0x30020
#define OGAMC0 0x30024
#define OGAMC5 _MMIO(0x30010)
#define OGAMC4 _MMIO(0x30014)
#define OGAMC3 _MMIO(0x30018)
#define OGAMC2 _MMIO(0x3001c)
#define OGAMC1 _MMIO(0x30020)
#define OGAMC0 _MMIO(0x30024)
 
/*
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
 
/*
* Display engine regs
*/
 
2977,28 → 3044,18
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
 
#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
#define PIPE_CRC_RES_1_IVB(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
#define PIPE_CRC_RES_2_IVB(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
#define PIPE_CRC_RES_3_IVB(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
#define PIPE_CRC_RES_4_IVB(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
#define PIPE_CRC_RES_5_IVB(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A)
#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB)
#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB)
#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB)
#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB)
#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB)
 
#define PIPE_CRC_RES_RED(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
#define PIPE_CRC_RES_GREEN(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
#define PIPE_CRC_RES_BLUE(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
#define PIPE_CRC_RES_RES1_I915(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) \
_TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A)
#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A)
#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A)
#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
 
/* Pipe A timing regs */
#define _HTOTAL_A 0x60000
3030,20 → 3087,20
#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
 
#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
dev_priv->info.display_mmio_offset)
 
#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A)
#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A)
#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A)
#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A)
#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
 
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
3059,7 → 3116,7
#define VLV_EDP_PSR_DBL_FRAME (1<<10)
#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
 
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
3066,7 → 3123,7
#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
 
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
3079,11 → 3136,12
#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
#define VLV_EDP_PSR_EXIT (5<<0)
#define VLV_EDP_PSR_IN_TRANS (1<<7)
#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
 
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
#define EDP_PSR_ENABLE (1<<31)
#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_STANDBY (1<<27)
3106,14 → 3164,10
#define EDP_PSR_TP1_TIME_0us (3<<4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
 
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
 
#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
3137,15 → 3191,15
#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
 
#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
 
#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
 
#define EDP_PSR2_CTL 0x6f900
#define EDP_PSR2_CTL _MMIO(0x6f900)
#define EDP_PSR2_ENABLE (1<<31)
#define EDP_SU_TRACK_ENABLE (1<<30)
#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
3160,9 → 3214,9
#define EDP_PSR2_IDLE_MASK 0xf
 
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
 
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
3208,7 → 3262,7
 
 
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110)
#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
#define PORTC_HOTPLUG_INT_EN (1 << 28)
#define PORTD_HOTPLUG_INT_EN (1 << 27)
3238,7 → 3292,7
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
 
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
/*
* HDMI/DP bits are g4x+
*
3304,21 → 3358,23
 
/* SDVO and HDMI port control.
* The same register may be used for SDVO or HDMI */
#define GEN3_SDVOB 0x61140
#define GEN3_SDVOC 0x61160
#define _GEN3_SDVOB 0x61140
#define _GEN3_SDVOC 0x61160
#define GEN3_SDVOB _MMIO(_GEN3_SDVOB)
#define GEN3_SDVOC _MMIO(_GEN3_SDVOC)
#define GEN4_HDMIB GEN3_SDVOB
#define GEN4_HDMIC GEN3_SDVOC
#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB)
#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC)
#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C)
#define PCH_SDVOB 0xe1140
#define VLV_HDMIB _MMIO(VLV_DISPLAY_BASE + 0x61140)
#define VLV_HDMIC _MMIO(VLV_DISPLAY_BASE + 0x61160)
#define CHV_HDMID _MMIO(VLV_DISPLAY_BASE + 0x6116C)
#define PCH_SDVOB _MMIO(0xe1140)
#define PCH_HDMIB PCH_SDVOB
#define PCH_HDMIC 0xe1150
#define PCH_HDMID 0xe1160
#define PCH_HDMIC _MMIO(0xe1150)
#define PCH_HDMID _MMIO(0xe1160)
 
#define PORT_DFT_I9XX 0x61150
#define PORT_DFT_I9XX _MMIO(0x61150)
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
3378,9 → 3434,12
 
 
/* DVO port control */
#define DVOA 0x61120
#define DVOB 0x61140
#define DVOC 0x61160
#define _DVOA 0x61120
#define DVOA _MMIO(_DVOA)
#define _DVOB 0x61140
#define DVOB _MMIO(_DVOB)
#define _DVOC 0x61160
#define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31)
#define DVO_PIPE_B_SELECT (1 << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28)
3405,14 → 3464,14
#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
#define DVO_PRESERVE_MASK (0x7<<24)
#define DVOA_SRCDIM 0x61124
#define DVOB_SRCDIM 0x61144
#define DVOC_SRCDIM 0x61164
#define DVOA_SRCDIM _MMIO(0x61124)
#define DVOB_SRCDIM _MMIO(0x61144)
#define DVOC_SRCDIM _MMIO(0x61164)
#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
#define DVO_SRCDIM_VERTICAL_SHIFT 0
 
/* LVDS port control */
#define LVDS 0x61180
#define LVDS _MMIO(0x61180)
/*
* Enables the LVDS port. This bit must be set before DPLLs are enabled, as
* the DPLL semantics change when the LVDS is assigned to that pipe.
3462,13 → 3521,13
#define LVDS_B0B3_POWER_UP (3 << 2)
 
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_DATA _MMIO(0x61178)
/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_CTL 0x61170
#define VIDEO_DIP_CTL _MMIO(0x61170)
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT(port) ((port) << 29)
3495,7 → 3554,7
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
 
/* Panel power sequencing */
#define PP_STATUS 0x61200
#define PP_STATUS _MMIO(0x61200)
#define PP_ON (1 << 31)
/*
* Indicates that all dependencies of the panel are on:
3521,14 → 3580,14
#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define PP_CONTROL _MMIO(0x61204)
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
#define PP_OFF_DELAYS 0x6120c
#define PP_DIVISOR 0x61210
#define PP_ON_DELAYS _MMIO(0x61208)
#define PP_OFF_DELAYS _MMIO(0x6120c)
#define PP_DIVISOR _MMIO(0x61210)
 
/* Panel fitting */
#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230)
#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
3546,7 → 3605,7
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234)
#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
3558,25 → 3617,25
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
 
#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
 
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
_VLV_BLC_PWM_CTL2_B)
 
#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
_VLV_BLC_PWM_CTL_B)
 
#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
_VLV_BLC_HIST_CTL_B)
 
/* Backlight control */
#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
3599,7 → 3658,7
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254)
#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
3621,25 → 3680,25
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
 
#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
#define BLM_HISTOGRAM_ENABLE (1 << 31)
 
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
#define BLC_PWM_CPU_CTL2 0x48250
#define BLC_PWM_CPU_CTL 0x48254
#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
#define BLC_PWM_CPU_CTL _MMIO(0x48254)
 
#define HSW_BLC_PWM2_CTL 0x48350
#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
 
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250
#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
#define BLM_PCH_PWM_ENABLE (1 << 31)
#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254
#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
 
#define UTIL_PIN_CTL 0x48400
#define UTIL_PIN_CTL _MMIO(0x48400)
#define UTIL_PIN_ENABLE (1 << 31)
 
#define UTIL_PIN_PIPE(x) ((x) << 29)
3659,18 → 3718,18
#define _BXT_BLC_PWM_FREQ2 0xC8354
#define _BXT_BLC_PWM_DUTY2 0xC8358
 
#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \
#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
_BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \
#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
_BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \
#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
_BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
 
#define PCH_GTC_CTL 0xe7000
#define PCH_GTC_CTL _MMIO(0xe7000)
#define PCH_GTC_ENABLE (1 << 31)
 
/* TV port control */
#define TV_CTL 0x68000
#define TV_CTL _MMIO(0x68000)
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */
3737,7 → 3796,7
# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
# define TV_TEST_MODE_MASK (7 << 0)
 
#define TV_DAC 0x68004
#define TV_DAC _MMIO(0x68004)
# define TV_DAC_SAVE 0x00ffff00
/*
* Reports that DAC state change logic has reported change (RO).
3788,13 → 3847,13
* where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
* -1 (0x3) being the only legal negative value.
*/
#define TV_CSC_Y 0x68010
#define TV_CSC_Y _MMIO(0x68010)
# define TV_RY_MASK 0x07ff0000
# define TV_RY_SHIFT 16
# define TV_GY_MASK 0x00000fff
# define TV_GY_SHIFT 0
 
#define TV_CSC_Y2 0x68014
#define TV_CSC_Y2 _MMIO(0x68014)
# define TV_BY_MASK 0x07ff0000
# define TV_BY_SHIFT 16
/*
3805,13 → 3864,13
# define TV_AY_MASK 0x000003ff
# define TV_AY_SHIFT 0
 
#define TV_CSC_U 0x68018
#define TV_CSC_U _MMIO(0x68018)
# define TV_RU_MASK 0x07ff0000
# define TV_RU_SHIFT 16
# define TV_GU_MASK 0x000007ff
# define TV_GU_SHIFT 0
 
#define TV_CSC_U2 0x6801c
#define TV_CSC_U2 _MMIO(0x6801c)
# define TV_BU_MASK 0x07ff0000
# define TV_BU_SHIFT 16
/*
3822,13 → 3881,13
# define TV_AU_MASK 0x000003ff
# define TV_AU_SHIFT 0
 
#define TV_CSC_V 0x68020
#define TV_CSC_V _MMIO(0x68020)
# define TV_RV_MASK 0x0fff0000
# define TV_RV_SHIFT 16
# define TV_GV_MASK 0x000007ff
# define TV_GV_SHIFT 0
 
#define TV_CSC_V2 0x68024
#define TV_CSC_V2 _MMIO(0x68024)
# define TV_BV_MASK 0x07ff0000
# define TV_BV_SHIFT 16
/*
3839,7 → 3898,7
# define TV_AV_MASK 0x000007ff
# define TV_AV_SHIFT 0
 
#define TV_CLR_KNOBS 0x68028
#define TV_CLR_KNOBS _MMIO(0x68028)
/* 2s-complement brightness adjustment */
# define TV_BRIGHTNESS_MASK 0xff000000
# define TV_BRIGHTNESS_SHIFT 24
3853,7 → 3912,7
# define TV_HUE_MASK 0x000000ff
# define TV_HUE_SHIFT 0
 
#define TV_CLR_LEVEL 0x6802c
#define TV_CLR_LEVEL _MMIO(0x6802c)
/* Controls the DAC level for black */
# define TV_BLACK_LEVEL_MASK 0x01ff0000
# define TV_BLACK_LEVEL_SHIFT 16
3861,7 → 3920,7
# define TV_BLANK_LEVEL_MASK 0x000001ff
# define TV_BLANK_LEVEL_SHIFT 0
 
#define TV_H_CTL_1 0x68030
#define TV_H_CTL_1 _MMIO(0x68030)
/* Number of pixels in the hsync. */
# define TV_HSYNC_END_MASK 0x1fff0000
# define TV_HSYNC_END_SHIFT 16
3869,7 → 3928,7
# define TV_HTOTAL_MASK 0x00001fff
# define TV_HTOTAL_SHIFT 0
 
#define TV_H_CTL_2 0x68034
#define TV_H_CTL_2 _MMIO(0x68034)
/* Enables the colorburst (needed for non-component color) */
# define TV_BURST_ENA (1 << 31)
/* Offset of the colorburst from the start of hsync, in pixels minus one. */
3879,7 → 3938,7
# define TV_HBURST_LEN_SHIFT 0
# define TV_HBURST_LEN_MASK 0x0001fff
 
#define TV_H_CTL_3 0x68038
#define TV_H_CTL_3 _MMIO(0x68038)
/* End of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_END_SHIFT 16
# define TV_HBLANK_END_MASK 0x1fff0000
3887,7 → 3946,7
# define TV_HBLANK_START_SHIFT 0
# define TV_HBLANK_START_MASK 0x0001fff
 
#define TV_V_CTL_1 0x6803c
#define TV_V_CTL_1 _MMIO(0x6803c)
/* XXX */
# define TV_NBR_END_SHIFT 16
# define TV_NBR_END_MASK 0x07ff0000
3898,7 → 3957,7
# define TV_VI_END_F2_SHIFT 0
# define TV_VI_END_F2_MASK 0x0000003f
 
#define TV_V_CTL_2 0x68040
#define TV_V_CTL_2 _MMIO(0x68040)
/* Length of vsync, in half lines */
# define TV_VSYNC_LEN_MASK 0x07ff0000
# define TV_VSYNC_LEN_SHIFT 16
3914,7 → 3973,7
# define TV_VSYNC_START_F2_MASK 0x0000007f
# define TV_VSYNC_START_F2_SHIFT 0
 
#define TV_V_CTL_3 0x68044
#define TV_V_CTL_3 _MMIO(0x68044)
/* Enables generation of the equalization signal */
# define TV_EQUAL_ENA (1 << 31)
/* Length of vsync, in half lines */
3932,7 → 3991,7
# define TV_VEQ_START_F2_MASK 0x000007f
# define TV_VEQ_START_F2_SHIFT 0
 
#define TV_V_CTL_4 0x68048
#define TV_V_CTL_4 _MMIO(0x68048)
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
3946,7 → 4005,7
# define TV_VBURST_END_F1_MASK 0x000000ff
# define TV_VBURST_END_F1_SHIFT 0
 
#define TV_V_CTL_5 0x6804c
#define TV_V_CTL_5 _MMIO(0x6804c)
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
3960,7 → 4019,7
# define TV_VBURST_END_F2_MASK 0x000000ff
# define TV_VBURST_END_F2_SHIFT 0
 
#define TV_V_CTL_6 0x68050
#define TV_V_CTL_6 _MMIO(0x68050)
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
3974,7 → 4033,7
# define TV_VBURST_END_F3_MASK 0x000000ff
# define TV_VBURST_END_F3_SHIFT 0
 
#define TV_V_CTL_7 0x68054
#define TV_V_CTL_7 _MMIO(0x68054)
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
3988,7 → 4047,7
# define TV_VBURST_END_F4_MASK 0x000000ff
# define TV_VBURST_END_F4_SHIFT 0
 
#define TV_SC_CTL_1 0x68060
#define TV_SC_CTL_1 _MMIO(0x68060)
/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA1_EN (1 << 31)
/* Turns on the first subcarrier phase generation DDA */
4010,7 → 4069,7
# define TV_SCDDA1_INC_MASK 0x00000fff
# define TV_SCDDA1_INC_SHIFT 0
 
#define TV_SC_CTL_2 0x68064
#define TV_SC_CTL_2 _MMIO(0x68064)
/* Sets the rollover for the second subcarrier phase generation DDA */
# define TV_SCDDA2_SIZE_MASK 0x7fff0000
# define TV_SCDDA2_SIZE_SHIFT 16
4018,7 → 4077,7
# define TV_SCDDA2_INC_MASK 0x00007fff
# define TV_SCDDA2_INC_SHIFT 0
 
#define TV_SC_CTL_3 0x68068
#define TV_SC_CTL_3 _MMIO(0x68068)
/* Sets the rollover for the third subcarrier phase generation DDA */
# define TV_SCDDA3_SIZE_MASK 0x7fff0000
# define TV_SCDDA3_SIZE_SHIFT 16
4026,7 → 4085,7
# define TV_SCDDA3_INC_MASK 0x00007fff
# define TV_SCDDA3_INC_SHIFT 0
 
#define TV_WIN_POS 0x68070
#define TV_WIN_POS _MMIO(0x68070)
/* X coordinate of the display from the start of horizontal active */
# define TV_XPOS_MASK 0x1fff0000
# define TV_XPOS_SHIFT 16
4034,7 → 4093,7
# define TV_YPOS_MASK 0x00000fff
# define TV_YPOS_SHIFT 0
 
#define TV_WIN_SIZE 0x68074
#define TV_WIN_SIZE _MMIO(0x68074)
/* Horizontal size of the display window, measured in pixels*/
# define TV_XSIZE_MASK 0x1fff0000
# define TV_XSIZE_SHIFT 16
4046,7 → 4105,7
# define TV_YSIZE_MASK 0x00000fff
# define TV_YSIZE_SHIFT 0
 
#define TV_FILTER_CTL_1 0x68080
#define TV_FILTER_CTL_1 _MMIO(0x68080)
/*
* Enables automatic scaling calculation.
*
4079,7 → 4138,7
# define TV_HSCALE_FRAC_MASK 0x00003fff
# define TV_HSCALE_FRAC_SHIFT 0
 
#define TV_FILTER_CTL_2 0x68084
#define TV_FILTER_CTL_2 _MMIO(0x68084)
/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
4095,7 → 4154,7
# define TV_VSCALE_FRAC_MASK 0x00007fff
# define TV_VSCALE_FRAC_SHIFT 0
 
#define TV_FILTER_CTL_3 0x68088
#define TV_FILTER_CTL_3 _MMIO(0x68088)
/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
4115,7 → 4174,7
# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
# define TV_VSCALE_IP_FRAC_SHIFT 0
 
#define TV_CC_CONTROL 0x68090
#define TV_CC_CONTROL _MMIO(0x68090)
# define TV_CC_ENABLE (1 << 31)
/*
* Specifies which field to send the CC data in.
4131,7 → 4190,7
# define TV_CC_LINE_MASK 0x0000003f
# define TV_CC_LINE_SHIFT 0
 
#define TV_CC_DATA 0x68094
#define TV_CC_DATA _MMIO(0x68094)
# define TV_CC_RDY (1 << 31)
/* Second word of CC data to be transmitted. */
# define TV_CC_DATA_2_MASK 0x007f0000
4140,20 → 4199,20
# define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0
 
#define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */
#define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */
#define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */
#define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */
#define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */
#define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */
#define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */
#define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */
 
/* Display Port */
#define DP_A 0x64000 /* eDP */
#define DP_B 0x64100
#define DP_C 0x64200
#define DP_D 0x64300
#define DP_A _MMIO(0x64000) /* eDP */
#define DP_B _MMIO(0x64100)
#define DP_C _MMIO(0x64200)
#define DP_D _MMIO(0x64300)
 
#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B)
#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C)
#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D)
#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
 
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
4207,7 → 4266,7
 
/* eDP */
#define DP_PLL_FREQ_270MHZ (0 << 16)
#define DP_PLL_FREQ_160MHZ (1 << 16)
#define DP_PLL_FREQ_162MHZ (1 << 16)
#define DP_PLL_FREQ_MASK (3 << 16)
 
/* locked once port is enabled */
4240,34 → 4299,37
* is 20 bytes in each direction, hence the 5 fixed
* data registers
*/
#define DPA_AUX_CH_CTL 0x64010
#define DPA_AUX_CH_DATA1 0x64014
#define DPA_AUX_CH_DATA2 0x64018
#define DPA_AUX_CH_DATA3 0x6401c
#define DPA_AUX_CH_DATA4 0x64020
#define DPA_AUX_CH_DATA5 0x64024
#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010)
#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014)
#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018)
#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c)
#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020)
#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024)
 
#define DPB_AUX_CH_CTL 0x64110
#define DPB_AUX_CH_DATA1 0x64114
#define DPB_AUX_CH_DATA2 0x64118
#define DPB_AUX_CH_DATA3 0x6411c
#define DPB_AUX_CH_DATA4 0x64120
#define DPB_AUX_CH_DATA5 0x64124
#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110)
#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114)
#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118)
#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c)
#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120)
#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124)
 
#define DPC_AUX_CH_CTL 0x64210
#define DPC_AUX_CH_DATA1 0x64214
#define DPC_AUX_CH_DATA2 0x64218
#define DPC_AUX_CH_DATA3 0x6421c
#define DPC_AUX_CH_DATA4 0x64220
#define DPC_AUX_CH_DATA5 0x64224
#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210)
#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214)
#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218)
#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c)
#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220)
#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224)
 
#define DPD_AUX_CH_CTL 0x64310
#define DPD_AUX_CH_DATA1 0x64314
#define DPD_AUX_CH_DATA2 0x64318
#define DPD_AUX_CH_DATA3 0x6431c
#define DPD_AUX_CH_DATA4 0x64320
#define DPD_AUX_CH_DATA5 0x64324
#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310)
#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314)
#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318)
#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c)
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
 
#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
 
#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
#define DP_AUX_CH_CTL_DONE (1 << 30)
#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
4343,10 → 4405,10
#define _PIPEB_LINK_N_G4X 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
 
#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
 
/* Display & cursor control */
 
4462,15 → 4524,15
*/
#define PIPE_EDP_OFFSET 0x7f000
 
#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
dev_priv->info.display_mmio_offset)
 
#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
 
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
4482,9 → 4544,9
#define PIPEMISC_DITHER_ENABLE (1<<4)
#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
#define PIPEMISC_DITHER_TYPE_SP (0<<2)
#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
 
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
4505,7 → 4567,7
#define SPRITEE_FLIPDONE_INT_EN (1<<9)
#define PLANEC_FLIPDONE_INT_EN (1<<8)
 
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
#define PLANEC_INVALID_GTT_INT_EN (1<<25)
4535,7 → 4597,7
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
 
#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
4550,7 → 4612,7
#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
#define DSPARB_SPRITED_SHIFT_VLV 24
#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
4563,7 → 4625,7
#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
#define DSPARB_SPRITEE_SHIFT_VLV 0
#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
#define DSPARB_SPRITEF_SHIFT_VLV 8
4570,7 → 4632,7
#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
 
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
4581,7 → 4643,7
#define DSPFW_PLANEA_SHIFT 0
#define DSPFW_PLANEA_MASK (0x7f<<0)
#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
4597,7 → 4659,7
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
#define DSPFW_CURSOR_SR_SHIFT 24
4608,7 → 4670,7
#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
 
/* vlv/chv */
#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070)
#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
#define DSPFW_SPRITEB_WM1_SHIFT 16
#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
#define DSPFW_CURSORA_WM1_SHIFT 8
4615,7 → 4677,7
#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
#define DSPFW_SPRITEA_WM1_SHIFT 0
#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074)
#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
#define DSPFW_PLANEB_WM1_SHIFT 24
#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
#define DSPFW_PLANEA_WM1_SHIFT 16
4624,11 → 4686,11
#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
#define DSPFW_CURSOR_SR_WM1_SHIFT 0
#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078)
#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
#define DSPFW_SR_WM1_SHIFT 0
#define DSPFW_SR_WM1_MASK (0x1ff<<0)
#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
4637,7 → 4699,7
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
4646,7 → 4708,7
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
4657,7 → 4719,7
#define DSPFW_CURSORC_MASK (0x3f<<0)
 
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
4678,7 → 4740,7
#define DSPFW_SPRITEA_HI_MASK (1<<4)
#define DSPFW_PLANEA_HI_SHIFT 0
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
4701,7 → 4763,7
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
 
/* drain latency register values*/
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_SHIFT 24
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
#define DDL_PLANE_SHIFT 0
4709,7 → 4771,7
#define DDL_PRECISION_LOW (0<<7)
#define DRAIN_LATENCY_MASK 0x7f
 
#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
#define CBR_PND_DEADLINE_DISABLE (1<<31)
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
 
4747,42 → 4809,42
#define I965_CURSOR_DFT_WM 8
 
/* Watermark register definitions for SKL */
#define CUR_WM_A_0 0x70140
#define CUR_WM_B_0 0x71140
#define PLANE_WM_1_A_0 0x70240
#define PLANE_WM_1_B_0 0x71240
#define PLANE_WM_2_A_0 0x70340
#define PLANE_WM_2_B_0 0x71340
#define PLANE_WM_TRANS_1_A_0 0x70268
#define PLANE_WM_TRANS_1_B_0 0x71268
#define PLANE_WM_TRANS_2_A_0 0x70368
#define PLANE_WM_TRANS_2_B_0 0x71368
#define CUR_WM_TRANS_A_0 0x70168
#define CUR_WM_TRANS_B_0 0x71168
#define _CUR_WM_A_0 0x70140
#define _CUR_WM_B_0 0x71140
#define _PLANE_WM_1_A_0 0x70240
#define _PLANE_WM_1_B_0 0x71240
#define _PLANE_WM_2_A_0 0x70340
#define _PLANE_WM_2_B_0 0x71340
#define _PLANE_WM_TRANS_1_A_0 0x70268
#define _PLANE_WM_TRANS_1_B_0 0x71268
#define _PLANE_WM_TRANS_2_A_0 0x70368
#define _PLANE_WM_TRANS_2_B_0 0x71368
#define _CUR_WM_TRANS_A_0 0x70168
#define _CUR_WM_TRANS_B_0 0x71168
#define PLANE_WM_EN (1 << 31)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
#define PLANE_WM_BLOCKS_MASK 0x3ff
 
#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A_0, _CUR_WM_TRANS_B_0)
 
#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0)
#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
#define _PLANE_WM_BASE(pipe, plane) \
_PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
#define PLANE_WM(pipe, plane, level) \
(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
_MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
#define _PLANE_WM_TRANS_1(pipe) \
_PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
_PIPE(pipe, _PLANE_WM_TRANS_1_A_0, _PLANE_WM_TRANS_1_B_0)
#define _PLANE_WM_TRANS_2(pipe) \
_PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
_PIPE(pipe, _PLANE_WM_TRANS_2_A_0, _PLANE_WM_TRANS_2_B_0)
#define PLANE_WM_TRANS(pipe, plane) \
_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
_MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
 
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPEA_ILK _MMIO(0x45100)
#define WM0_PIPE_PLANE_MASK (0xffff<<16)
#define WM0_PIPE_PLANE_SHIFT 16
#define WM0_PIPE_SPRITE_MASK (0xff<<8)
4789,9 → 4851,9
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0xff)
 
#define WM0_PIPEB_ILK 0x45104
#define WM0_PIPEC_IVB 0x45200
#define WM1_LP_ILK 0x45108
#define WM0_PIPEB_ILK _MMIO(0x45104)
#define WM0_PIPEC_IVB _MMIO(0x45200)
#define WM1_LP_ILK _MMIO(0x45108)
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
4801,13 → 4863,13
#define WM1_LP_SR_MASK (0x7ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK 0x4510c
#define WM2_LP_ILK _MMIO(0x4510c)
#define WM2_LP_EN (1<<31)
#define WM3_LP_ILK 0x45110
#define WM3_LP_ILK _MMIO(0x45110)
#define WM3_LP_EN (1<<31)
#define WM1S_LP_ILK 0x45120
#define WM2S_LP_IVB 0x45124
#define WM3S_LP_IVB 0x45128
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
#define WM1S_LP_EN (1<<31)
 
#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
4815,7 → 4877,7
((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
 
/* Memory latency timer register */
#define MLTR_ILK 0x11222
#define MLTR_ILK _MMIO(0x11222)
#define MLTR_WM1_SHIFT 0
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
4823,7 → 4885,7
 
 
/* the address where we get all kinds of latency value */
#define SSKPD 0x5d10
#define SSKPD _MMIO(0x5d10)
#define SSKPD_WM_MASK 0x3f
#define SSKPD_WM0_SHIFT 0
#define SSKPD_WM1_SHIFT 8
4856,8 → 4918,8
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_G4X 0x70040
#define _PIPEA_FLIPCOUNT_G4X 0x70044
#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
 
/* Cursor A & B regs */
#define _CURACNTR 0x70080
4895,7 → 4957,7
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
#define CURSIZE _MMIO(0x700a0)
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
4904,7 → 4966,7
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
 
#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
dev_priv->info.display_mmio_offset)
 
4965,16 → 5027,16
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
 
#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS)
#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE)
#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF)
#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
 
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
4988,11 → 5050,11
#define _PRIMCNSTALPHA_A 0x60a10
#define PRIM_CONST_ALPHA_ENABLE (1<<31)
 
#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A)
#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A)
#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A)
 
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
5010,9 → 5072,10
* [10:1f] all
* [30:32] all
*/
#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
 
/* Pipe B */
#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
5094,18 → 5157,18
#define _DVSBSCALE 0x73204
#define _DVSBGAMC 0x73300
 
#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS)
#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF)
#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE)
#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE)
#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
 
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
5168,20 → 5231,20
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
 
#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS)
#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
#define SP_ENABLE (1<<31)
5231,18 → 5294,18
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
 
#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
 
/*
* CHV pipe B sprite CSC
5251,29 → 5314,29
* |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
* |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
*/
#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
 
#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
 
#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
 
#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
 
5354,7 → 5417,7
#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
#define PLANE_CTL(pipe, plane) \
_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
_MMIO_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
 
#define _PLANE_STRIDE_1_B 0x71188
#define _PLANE_STRIDE_2_B 0x71288
5366,7 → 5429,7
#define _PLANE_STRIDE_3(pipe) \
_PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
#define PLANE_STRIDE(pipe, plane) \
_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
_MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
 
#define _PLANE_POS_1_B 0x7118c
#define _PLANE_POS_2_B 0x7128c
5375,7 → 5438,7
#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
#define PLANE_POS(pipe, plane) \
_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
_MMIO_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
 
#define _PLANE_SIZE_1_B 0x71190
#define _PLANE_SIZE_2_B 0x71290
5384,7 → 5447,7
#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
#define PLANE_SIZE(pipe, plane) \
_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
_MMIO_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
 
#define _PLANE_SURF_1_B 0x7119c
#define _PLANE_SURF_2_B 0x7129c
5393,7 → 5456,7
#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
#define PLANE_SURF(pipe, plane) \
_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
_MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
 
#define _PLANE_OFFSET_1_B 0x711a4
#define _PLANE_OFFSET_2_B 0x712a4
5400,7 → 5463,7
#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
#define PLANE_OFFSET(pipe, plane) \
_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
_MMIO_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
 
#define _PLANE_KEYVAL_1_B 0x71194
#define _PLANE_KEYVAL_2_B 0x71294
5407,7 → 5470,7
#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
#define PLANE_KEYVAL(pipe, plane) \
_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
_MMIO_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
 
#define _PLANE_KEYMSK_1_B 0x71198
#define _PLANE_KEYMSK_2_B 0x71298
5414,7 → 5477,7
#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
#define PLANE_KEYMSK(pipe, plane) \
_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
_MMIO_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
 
#define _PLANE_KEYMAX_1_B 0x711a0
#define _PLANE_KEYMAX_2_B 0x712a0
5421,7 → 5484,7
#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
#define PLANE_KEYMAX(pipe, plane) \
_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
_MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
 
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
5430,7 → 5493,7
#define _PLANE_BUF_CFG_2(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
#define PLANE_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
_MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
 
#define _PLANE_NV12_BUF_CFG_1_B 0x71278
#define _PLANE_NV12_BUF_CFG_2_B 0x71378
5439,26 → 5502,26
#define _PLANE_NV12_BUF_CFG_2(pipe) \
_PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
#define PLANE_NV12_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
_MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
 
/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
 
/* VBIOS regs */
#define VGACNTRL 0x71400
#define VGACNTRL _MMIO(0x71400)
# define VGA_DISP_DISABLE (1 << 31)
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
 
#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
 
/* Ironlake */
 
#define CPU_VGACNTRL 0x41000
#define CPU_VGACNTRL _MMIO(0x41000)
 
#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
5471,26 → 5534,26
#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
 
/* refresh rate hardware control */
#define RR_HW_CTL 0x45300
#define RR_HW_CTL _MMIO(0x45300)
#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
 
#define FDI_PLL_BIOS_0 0x46000
#define FDI_PLL_BIOS_0 _MMIO(0x46000)
#define FDI_PLL_FB_CLOCK_MASK 0xff
#define FDI_PLL_BIOS_1 0x46004
#define FDI_PLL_BIOS_2 0x46008
#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
#define FDI_PLL_BIOS_1 _MMIO(0x46004)
#define FDI_PLL_BIOS_2 _MMIO(0x46008)
#define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c)
#define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010)
#define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014)
 
#define PCH_3DCGDIS0 0x46020
#define PCH_3DCGDIS0 _MMIO(0x46020)
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
 
#define PCH_3DCGDIS1 0x46024
#define PCH_3DCGDIS1 _MMIO(0x46024)
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
 
#define FDI_PLL_FREQ_CTL 0x46030
#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
5527,14 → 5590,14
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
 
#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1)
#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1)
#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2)
#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2)
#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1)
#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1)
#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2)
#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2)
 
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
5557,11 → 5620,11
#define _PFA_HSCALE 0x68090
#define _PFB_HSCALE 0x68890
 
#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
 
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
5571,9 → 5634,9
#define _PSA_WIN_POS 0x68170
#define _PSB_WIN_POS 0x68970
 
#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
#define PS_CTL(pipe) _MMIO_PIPE(pipe, _PSA_CTL, _PSB_CTL)
#define PS_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
#define PS_WIN_POS(pipe) _MMIO_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
 
/*
* Skylake scalers
5662,42 → 5725,42
#define _PS_ECC_STAT_1C 0x691D0
 
#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
#define SKL_PS_CTRL(pipe, id) _PIPE(pipe, \
#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe, \
#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
_ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe, \
#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
_ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
#define SKL_PS_WIN_SZ(pipe, id) _PIPE(pipe, \
#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
_ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
#define SKL_PS_VSCALE(pipe, id) _PIPE(pipe, \
#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
_ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
#define SKL_PS_HSCALE(pipe, id) _PIPE(pipe, \
#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
_ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
#define SKL_PS_VPHASE(pipe, id) _PIPE(pipe, \
#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
_ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
#define SKL_PS_HPHASE(pipe, id) _PIPE(pipe, \
#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
_ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
#define SKL_PS_ECC_STAT(pipe, id) _PIPE(pipe, \
#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
_ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)
_ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
 
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
 
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
#define GAMMA_MODE_MODE_MASK (3 << 0)
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
5704,6 → 5767,21
#define GAMMA_MODE_MODE_12BIT (2 << 0)
#define GAMMA_MODE_MODE_SPLIT (3 << 0)
 
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
#define CSR_HTP_ADDR_SKL 0x00500034
#define CSR_SSP_BASE _MMIO(0x8F074)
#define CSR_HTP_SKL _MMIO(0x8F004)
#define CSR_LAST_WRITE _MMIO(0x8F034)
#define CSR_LAST_WRITE_VALUE 0xc003b400
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
#define CSR_MMIO_START_RANGE 0x80000
#define CSR_MMIO_END_RANGE 0x8FFFF
#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
 
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29)
5755,20 → 5833,20
#define DE_PIPEA_VBLANK_IVB (1<<0)
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
 
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
 
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
#define DEISR _MMIO(0x44000)
#define DEIMR _MMIO(0x44004)
#define DEIIR _MMIO(0x44008)
#define DEIER _MMIO(0x4400c)
 
#define GTISR 0x44010
#define GTIMR 0x44014
#define GTIIR 0x44018
#define GTIER 0x4401c
#define GTISR _MMIO(0x44010)
#define GTIMR _MMIO(0x44014)
#define GTIIR _MMIO(0x44018)
#define GTIER _MMIO(0x4401c)
 
#define GEN8_MASTER_IRQ 0x44200
#define GEN8_MASTER_IRQ _MMIO(0x44200)
#define GEN8_MASTER_IRQ_CONTROL (1<<31)
#define GEN8_PCU_IRQ (1<<30)
#define GEN8_DE_PCH_IRQ (1<<23)
5785,10 → 5863,10
#define GEN8_GT_BCS_IRQ (1<<1)
#define GEN8_GT_RCS_IRQ (1<<0)
 
#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
 
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
5797,10 → 5875,10
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
 
#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
#define GEN8_DE_PIPE_ISR(pipe) _MMIO(0x44400 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
5833,10 → 5911,10
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
 
#define GEN8_DE_PORT_ISR 0x44440
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
5850,23 → 5928,23
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
 
#define GEN8_DE_MISC_ISR 0x44460
#define GEN8_DE_MISC_IMR 0x44464
#define GEN8_DE_MISC_IIR 0x44468
#define GEN8_DE_MISC_IER 0x4446c
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
#define GEN8_DE_MISC_IER _MMIO(0x4446c)
#define GEN8_DE_MISC_GSE (1 << 27)
 
#define GEN8_PCU_ISR 0x444e0
#define GEN8_PCU_IMR 0x444e4
#define GEN8_PCU_IIR 0x444e8
#define GEN8_PCU_IER 0x444ec
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
 
#define ILK_DISPLAY_CHICKEN2 0x42004
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define FUSE_STRAP 0x42014
#define FUSE_STRAP _MMIO(0x42014)
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
5875,7 → 5953,7
#define HSW_CDCLK_LIMIT (1 << 24)
#define ILK_DESKTOP (1 << 23)
 
#define ILK_DSPCLK_GATE_D 0x42020
#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
5882,11 → 5960,11
#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
 
#define IVB_CHICKEN3 0x4200c
#define IVB_CHICKEN3 _MMIO(0x4200c)
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
 
#define CHICKEN_PAR1_1 0x42080
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
 
5894,23 → 5972,23
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
#define DISP_ARB_CTL 0x45000
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 0x45004
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1<<6)
#define DBUF_CTL 0x45008
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
#define GEN7_MSG_CTL 0x45010
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
#define HSW_NDE_RSTWRN_OPT 0x46408
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
 
#define SKL_DFSM 0x51000
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
5917,47 → 5995,47
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
 
#define FF_SLICE_CS_CHICKEN2 0x20e4
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
 
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 0x7014
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
 
#define HIZ_CHICKEN 0x7018
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
 
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
 
#define GEN7_L3SQCREG1 0xB010
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
 
#define GEN8_L3SQCREG1 0xB100
#define GEN8_L3SQCREG1 _MMIO(0xB100)
#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
 
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3CNTLREG2 0xB020
#define GEN7_L3CNTLREG3 0xB024
#define GEN7_L3CNTLREG2 _MMIO(0xB020)
#define GEN7_L3CNTLREG3 _MMIO(0xB024)
 
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030)
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
 
#define GEN7_L3SQCREG4 0xb034
#define GEN7_L3SQCREG4 _MMIO(0xb034)
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
 
#define GEN8_L3SQCREG4 0xb118
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN8_LQSC_RO_PERF_DIS (1<<27)
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
 
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_CHICKEN0 _MMIO(0x7300)
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
5966,17 → 6044,17
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
 
/* GEN9 chicken */
#define SLICE_ECO_CHICKEN0 0x7308
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
 
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
 
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1 _MMIO(0xb038)
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
 
#define BDW_SCRATCH1 0xb11c
#define BDW_SCRATCH1 _MMIO(0xb11c)
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
 
/* PCH */
6070,12 → 6148,12
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
 
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
#define SDEIIR 0xc4008
#define SDEIER 0xc400c
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
 
#define SERR_INT 0xc4040
#define SERR_INT _MMIO(0xc4040)
#define SERR_INT_POISON (1<<31)
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
6083,7 → 6161,7
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
 
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
6120,7 → 6198,7
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
 
#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */
#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
#define PORTE_HOTPLUG_ENABLE (1 << 4)
#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
6127,23 → 6205,23
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
 
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
#define PCH_GPIOC 0xc5018
#define PCH_GPIOD 0xc501c
#define PCH_GPIOE 0xc5020
#define PCH_GPIOF 0xc5024
#define PCH_GPIOA _MMIO(0xc5010)
#define PCH_GPIOB _MMIO(0xc5014)
#define PCH_GPIOC _MMIO(0xc5018)
#define PCH_GPIOD _MMIO(0xc501c)
#define PCH_GPIOE _MMIO(0xc5020)
#define PCH_GPIOF _MMIO(0xc5024)
 
#define PCH_GMBUS0 0xc5100
#define PCH_GMBUS1 0xc5104
#define PCH_GMBUS2 0xc5108
#define PCH_GMBUS3 0xc510c
#define PCH_GMBUS4 0xc5110
#define PCH_GMBUS5 0xc5120
#define PCH_GMBUS0 _MMIO(0xc5100)
#define PCH_GMBUS1 _MMIO(0xc5104)
#define PCH_GMBUS2 _MMIO(0xc5108)
#define PCH_GMBUS3 _MMIO(0xc510c)
#define PCH_GMBUS4 _MMIO(0xc5110)
#define PCH_GMBUS5 _MMIO(0xc5120)
 
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
6150,12 → 6228,12
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
#define PCH_DPLL_TEST 0xc606c
#define PCH_DPLL_TEST _MMIO(0xc606c)
 
#define PCH_DREF_CONTROL 0xC6200
#define PCH_DREF_CONTROL _MMIO(0xC6200)
#define DREF_CONTROL_MASK 0x7fc3
#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
6178,7 → 6256,7
#define DREF_SSC4_DISABLE (0)
#define DREF_SSC4_ENABLE (1)
 
#define PCH_RAWCLK_FREQ 0xc6204
#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
#define FDL_TP1_TIMER_SHIFT 12
#define FDL_TP1_TIMER_MASK (3<<12)
#define FDL_TP2_TIMER_SHIFT 10
6185,12 → 6263,12
#define FDL_TP2_TIMER_MASK (3<<10)
#define RAWCLK_FREQ_MASK 0x3ff
 
#define PCH_DPLL_TMR_CFG 0xc6208
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
 
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
#define PCH_SSC4_PARMS _MMIO(0xc6210)
#define PCH_SSC4_AUX_PARMS _MMIO(0xc6214)
 
#define PCH_DPLL_SEL 0xc7000
#define PCH_DPLL_SEL _MMIO(0xc7000)
#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
#define TRANS_DPLLA_SEL(pipe) 0
#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
6238,79 → 6316,73
#define _VIDEO_DIP_DATA_B 0xe1208
#define _VIDEO_DIP_GCP_B 0xe1210
 
#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
/* Per-transcoder DIP controls (VLV) */
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
 
#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
 
#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
 
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
_MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \
_VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C)
#define VLV_TVIDEO_DIP_DATA(pipe) \
_PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
_MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \
_VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C)
#define VLV_TVIDEO_DIP_GCP(pipe) \
_PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
_MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
_VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
 
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
#define HSW_VIDEO_DIP_GCP_A 0x60210
 
#define HSW_VIDEO_DIP_CTL_B 0x61200
#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define HSW_VIDEO_DIP_GCP_B 0x61210
#define _HSW_VIDEO_DIP_CTL_A 0x60200
#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
#define _HSW_VIDEO_DIP_GCP_A 0x60210
 
#define HSW_TVIDEO_DIP_CTL(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4)
#define _HSW_VIDEO_DIP_CTL_B 0x61200
#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define _HSW_VIDEO_DIP_GCP_B 0x61210
 
#define HSW_STEREO_3D_CTL_A 0x70020
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
 
#define _HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1<<31)
#define HSW_STEREO_3D_CTL_B 0x71020
#define _HSW_STEREO_3D_CTL_B 0x71020
 
#define HSW_STEREO_3D_CTL(trans) \
_PIPE2(trans, HSW_STEREO_3D_CTL_A)
#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
 
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
6320,14 → 6392,13
#define _PCH_TRANS_VSYNC_B 0xe1014
#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
 
#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
_PCH_TRANS_VSYNCSHIFT_B)
#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
 
#define _PCH_TRANSB_DATA_M1 0xe1030
#define _PCH_TRANSB_DATA_N1 0xe1034
6338,19 → 6409,19
#define _PCH_TRANSB_LINK_M2 0xe1048
#define _PCH_TRANSB_LINK_N2 0xe104c
 
#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
 
#define _PCH_TRANSACONF 0xf0008
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */
#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
6371,12 → 6442,12
 
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
6383,7 → 6454,7
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
 
#define SOUTH_CHICKEN1 0xc2000
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
6390,7 → 6461,7
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SPT_PWM_GRANULARITY (1<<0)
#define SOUTH_CHICKEN2 0xc2004
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
#define LPT_PWM_GRANULARITY (1<<5)
6400,9 → 6471,9
#define _FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
6411,7 → 6482,7
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
#define _FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
6461,7 → 6532,7
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define _FDI_RXA_CTL 0xf000c
#define _FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
/* train, dp width same as FDI_TX */
#define FDI_FS_ERRC_ENABLE (1<<27)
6497,14 → 6568,14
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
 
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
#define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
6523,40 → 6594,37
#define _FDI_RXA_IMR 0xf0018
#define _FDI_RXB_IIR 0xf1014
#define _FDI_RXB_IMR 0xf1018
#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
#define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
#define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
 
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
#define FDI_PLL_CTL_1 _MMIO(0xfe000)
#define FDI_PLL_CTL_2 _MMIO(0xfe004)
 
#define PCH_LVDS 0xe1180
#define PCH_LVDS _MMIO(0xe1180)
#define LVDS_DETECTED (1 << 1)
 
/* vlv has 2 sets of panel control regs. */
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
 
#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
 
#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
#define VLV_PIPE_PP_ON_DELAYS(pipe) \
_PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
_PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
#define VLV_PIPE_PP_DIVISOR(pipe) \
_PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS)
#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL)
#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS)
#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS)
#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR)
 
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define _PCH_PP_STATUS 0xc7200
#define _PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define PANEL_UNLOCK_MASK (0xffff << 16)
#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
6566,7 → 6634,7
#define PANEL_POWER_RESET (1 << 1)
#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define PCH_PP_ON_DELAYS 0xc7208
#define _PCH_PP_ON_DELAYS 0xc7208
#define PANEL_PORT_SELECT_MASK (3 << 30)
#define PANEL_PORT_SELECT_LVDS (0 << 30)
#define PANEL_PORT_SELECT_DPA (1 << 30)
6577,18 → 6645,24
#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
 
#define PCH_PP_OFF_DELAYS 0xc720c
#define _PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
 
#define PCH_PP_DIVISOR 0xc7210
#define _PCH_PP_DIVISOR 0xc7210
#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
#define PP_REFERENCE_DIVIDER_SHIFT 8
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS)
#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL)
#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS)
#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS)
#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR)
 
/* BXT PPS changes - 2nd set of PPS registers */
#define _BXT_PP_STATUS2 0xc7300
#define _BXT_PP_CONTROL2 0xc7304
6595,35 → 6669,41
#define _BXT_PP_ON_DELAYS2 0xc7308
#define _BXT_PP_OFF_DELAYS2 0xc730c
 
#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2)
#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2)
#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2)
#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2)
#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
 
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
#define PCH_DPB_AUX_CH_DATA1 0xe4114
#define PCH_DPB_AUX_CH_DATA2 0xe4118
#define PCH_DPB_AUX_CH_DATA3 0xe411c
#define PCH_DPB_AUX_CH_DATA4 0xe4120
#define PCH_DPB_AUX_CH_DATA5 0xe4124
#define _PCH_DP_B 0xe4100
#define PCH_DP_B _MMIO(_PCH_DP_B)
#define _PCH_DPB_AUX_CH_CTL 0xe4110
#define _PCH_DPB_AUX_CH_DATA1 0xe4114
#define _PCH_DPB_AUX_CH_DATA2 0xe4118
#define _PCH_DPB_AUX_CH_DATA3 0xe411c
#define _PCH_DPB_AUX_CH_DATA4 0xe4120
#define _PCH_DPB_AUX_CH_DATA5 0xe4124
 
#define PCH_DP_C 0xe4200
#define PCH_DPC_AUX_CH_CTL 0xe4210
#define PCH_DPC_AUX_CH_DATA1 0xe4214
#define PCH_DPC_AUX_CH_DATA2 0xe4218
#define PCH_DPC_AUX_CH_DATA3 0xe421c
#define PCH_DPC_AUX_CH_DATA4 0xe4220
#define PCH_DPC_AUX_CH_DATA5 0xe4224
#define _PCH_DP_C 0xe4200
#define PCH_DP_C _MMIO(_PCH_DP_C)
#define _PCH_DPC_AUX_CH_CTL 0xe4210
#define _PCH_DPC_AUX_CH_DATA1 0xe4214
#define _PCH_DPC_AUX_CH_DATA2 0xe4218
#define _PCH_DPC_AUX_CH_DATA3 0xe421c
#define _PCH_DPC_AUX_CH_DATA4 0xe4220
#define _PCH_DPC_AUX_CH_DATA5 0xe4224
 
#define PCH_DP_D 0xe4300
#define PCH_DPD_AUX_CH_CTL 0xe4310
#define PCH_DPD_AUX_CH_DATA1 0xe4314
#define PCH_DPD_AUX_CH_DATA2 0xe4318
#define PCH_DPD_AUX_CH_DATA3 0xe431c
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
#define _PCH_DP_D 0xe4300
#define PCH_DP_D _MMIO(_PCH_DP_D)
#define _PCH_DPD_AUX_CH_CTL 0xe4310
#define _PCH_DPD_AUX_CH_DATA1 0xe4314
#define _PCH_DPD_AUX_CH_DATA2 0xe4318
#define _PCH_DPD_AUX_CH_DATA3 0xe431c
#define _PCH_DPD_AUX_CH_DATA4 0xe4320
#define _PCH_DPD_AUX_CH_DATA5 0xe4324
 
#define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
#define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
 
/* CPT */
#define PORT_TRANS_A_SEL_CPT 0
#define PORT_TRANS_B_SEL_CPT (1<<29)
6635,10 → 6715,10
#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
 
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
6691,40 → 6771,40
 
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
 
#define VLV_PMWGICZ 0x1300a4
#define VLV_PMWGICZ _MMIO(0x1300a4)
 
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_MEDIA_VLV 0x1300b8
#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
#define FORCEWAKE _MMIO(0xA18C)
#define FORCEWAKE_VLV _MMIO(0x1300b0)
#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
#define FORCEWAKE_ACK _MMIO(0x130090)
#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
 
#define VLV_GTLC_PW_STATUS 0x130094
#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MEDIA_GEN9 0xa270
#define FORCEWAKE_RENDER_GEN9 0xa278
#define FORCEWAKE_BLITTER_GEN9 0xa188
#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88
#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84
#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044
#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188)
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
#define FORCEWAKE_MT_ENABLE (1<<5)
#define VLV_SPAREG2H 0xA194
#define VLV_SPAREG2H _MMIO(0xA194)
 
#define GTFIFODBG 0x120000
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDROPERR (1<<6)
#define GT_FIFO_BLOBDROPERR (1<<5)
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
6733,23 → 6813,23
#define GT_FIFO_IAWRERR (1<<1)
#define GT_FIFO_IARDERR (1<<0)
 
#define GTFIFOCTL 0x120008
#define GTFIFOCTL _MMIO(0x120008)
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
 
#define HSW_IDICR 0x9008
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT 0x120010
#define HSW_EDRAM_PRESENT _MMIO(0x120010)
#define EDRAM_ENABLED 0x1
 
#define GEN6_UCGCTL1 0x9400
#define GEN6_UCGCTL1 _MMIO(0x9400)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
 
#define GEN6_UCGCTL2 0x9404
#define GEN6_UCGCTL2 _MMIO(0x9404)
# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
6757,22 → 6837,22
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
 
#define GEN6_UCGCTL3 0x9408
#define GEN6_UCGCTL3 _MMIO(0x9408)
 
#define GEN7_UCGCTL4 0x940c
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
 
#define GEN6_RCGCTL1 0x9410
#define GEN6_RCGCTL2 0x9414
#define GEN6_RSTCTL 0x9420
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
#define GEN6_RSTCTL _MMIO(0x9420)
 
#define GEN8_UCGCTL6 0x9430
#define GEN8_UCGCTL6 _MMIO(0x9430)
#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
 
#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
6779,8 → 6859,8
#define GEN9_FREQUENCY(x) ((x)<<23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
#define GEN6_RC_CONTROL 0xA090
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
6790,9 → 6870,9
#define GEN7_RC_CTL_TO_MODE (1<<28)
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
#define GEN6_RP_DOWN_TIMEOUT 0xA010
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
#define GEN6_RPSTAT1 _MMIO(0xA01C)
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
#define GEN9_CAGF_SHIFT 23
6799,7 → 6879,7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_CONTROL _MMIO(0xA024)
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
6813,53 → 6893,53
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
#define GEN6_RP_CUR_UP_EI 0xA050
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
#define GEN6_CURICONT_MASK 0xffffff
#define GEN6_RP_CUR_UP 0xA054
#define GEN6_RP_CUR_UP _MMIO(0xA054)
#define GEN6_CURBSYTAVG_MASK 0xffffff
#define GEN6_RP_PREV_UP 0xA058
#define GEN6_RP_CUR_DOWN_EI 0xA05C
#define GEN6_RP_PREV_UP _MMIO(0xA058)
#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C)
#define GEN6_CURIAVG_MASK 0xffffff
#define GEN6_RP_CUR_DOWN 0xA060
#define GEN6_RP_PREV_DOWN 0xA064
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
#define GEN6_RPDEUHWTC 0xA080
#define GEN6_RPDEUC 0xA084
#define GEN6_RPDEUCSW 0xA088
#define GEN6_RC_STATE 0xA094
#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
#define GEN6_RC_SLEEP 0xA0B0
#define GEN6_RCUBMABDTMR 0xA0B0
#define GEN6_RC1e_THRESHOLD 0xA0B4
#define GEN6_RC6_THRESHOLD 0xA0B8
#define GEN6_RC6p_THRESHOLD 0xA0BC
#define VLV_RCEDATA 0xA0BC
#define GEN6_RC6pp_THRESHOLD 0xA0C0
#define GEN6_PMINTRMSK 0xA168
#define GEN6_RP_CUR_DOWN _MMIO(0xA060)
#define GEN6_RP_PREV_DOWN _MMIO(0xA064)
#define GEN6_RP_UP_EI _MMIO(0xA068)
#define GEN6_RP_DOWN_EI _MMIO(0xA06C)
#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070)
#define GEN6_RPDEUHWTC _MMIO(0xA080)
#define GEN6_RPDEUC _MMIO(0xA084)
#define GEN6_RPDEUCSW _MMIO(0xA088)
#define GEN6_RC_STATE _MMIO(0xA094)
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
#define GEN6_RC_SLEEP _MMIO(0xA0B0)
#define GEN6_RCUBMABDTMR _MMIO(0xA0B0)
#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4)
#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8)
#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC)
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
#define GEN9_PG_ENABLE 0xA210
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
#define GEN9_RENDER_PG_ENABLE (1<<0)
#define GEN9_MEDIA_PG_ENABLE (1<<1)
 
#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
#define PIXEL_OVERLAP_CNT_SHIFT 30
 
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
#define GEN6_PMIIR 0x44028
#define GEN6_PMIER 0x4402C
#define GEN6_PMISR _MMIO(0x44020)
#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
#define GEN6_PMIIR _MMIO(0x44028)
#define GEN6_PMIER _MMIO(0x4402C)
#define GEN6_PM_MBOX_EVENT (1<<25)
#define GEN6_PM_THERMAL_EVENT (1<<24)
#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
6871,30 → 6951,30
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
 
#define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4)
#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4)
#define GEN7_GT_SCRATCH_REG_NUM 8
 
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
#define VLV_GFX_CLK_STATUS_BIT (1<<3)
#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
 
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
#define VLV_COUNTER_CONTROL _MMIO(0x138104)
#define VLV_COUNT_RANGE_HIGH (1<<15)
#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
#define VLV_GT_RENDER_RC6 0x138108
#define VLV_GT_MEDIA_RC6 0x13810C
#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
 
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
#define VLV_RENDER_C0_COUNT 0x138118
#define VLV_MEDIA_C0_COUNT 0x13811C
#define GEN6_GT_GFX_RC6p _MMIO(0x13810C)
#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
 
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1<<31)
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
6917,12 → 6997,12
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
 
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
#define GEN6_RC0 0
6930,26 → 7010,26
#define GEN6_RC6 3
#define GEN6_RC7 4
 
#define GEN8_GT_SLICE_INFO 0x138064
#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
#define GEN8_LSLICESTAT_MASK 0x7
 
#define CHV_POWER_SS0_SIG1 0xa720
#define CHV_POWER_SS1_SIG1 0xa728
#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
#define CHV_SS_PG_ENABLE (1<<1)
#define CHV_EU08_PG_ENABLE (1<<9)
#define CHV_EU19_PG_ENABLE (1<<17)
#define CHV_EU210_PG_ENABLE (1<<25)
 
#define CHV_POWER_SS0_SIG2 0xa724
#define CHV_POWER_SS1_SIG2 0xa72c
#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
#define CHV_EU311_PG_ENABLE (1<<1)
 
#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4)
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
 
#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8)
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
6959,18 → 7039,17
#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
 
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_MISCCPCTL _MMIO(0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
 
#define GEN8_GARBCNTL 0xB004
#define GEN8_GARBCNTL _MMIO(0xB004)
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
 
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
#define GEN7_PARITY_ERROR_VALID (1<<13)
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
6983,119 → 7062,102
((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
#define GEN7_L3CDERRST1_ENABLE (1<<7)
 
#define GEN7_L3LOG_BASE 0xB070
#define HSW_L3LOG_BASE_SLICE1 0xB270
#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
#define GEN7_L3LOG_SIZE 0x80
 
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
 
#define GEN8_ROW_CHICKEN 0xe4f0
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
 
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1<<0)
 
#define HSW_ROW_CHICKEN3 0xe49c
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
 
#define HALF_SLICE_CHICKEN2 0xe180
#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
#define GEN8_ST_PO_DISABLE (1<<13)
 
#define HALF_SLICE_CHICKEN3 0xe184
#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
 
#define GEN9_HALF_SLICE_CHICKEN7 0xe194
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
 
/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
 
#define G4X_AUD_CNTL_ST 0x620B4
#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
#define G4X_ELDV_DEVCTG (1 << 14)
#define G4X_ELD_ADDR_MASK (0xf << 5)
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
 
#define _IBX_HDMIW_HDMIEDID_A 0xE2050
#define _IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
_IBX_HDMIW_HDMIEDID_A, \
#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
_IBX_HDMIW_HDMIEDID_B)
#define _IBX_AUD_CNTL_ST_A 0xE20B4
#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
_IBX_AUD_CNTL_ST_A, \
#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
_IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 0xE20C0
#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
 
#define _CPT_HDMIW_HDMIEDID_A 0xE5050
#define _CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
_CPT_HDMIW_HDMIEDID_A, \
_CPT_HDMIW_HDMIEDID_B)
#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
#define _CPT_AUD_CNTL_ST_A 0xE50B4
#define _CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
_CPT_AUD_CNTL_ST_A, \
_CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
 
#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
_VLV_HDMIW_HDMIEDID_A, \
_VLV_HDMIW_HDMIEDID_B)
#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
_VLV_AUD_CNTL_ST_A, \
_VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
 
/* These are the 4 32-bit write offset registers for each stream
* output buffer. It determines the offset from the
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
 
#define _IBX_AUD_CONFIG_A 0xe2000
#define _IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
_IBX_AUD_CONFIG_A, \
_IBX_AUD_CONFIG_B)
#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
#define _CPT_AUD_CONFIG_A 0xe5000
#define _CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
_CPT_AUD_CONFIG_A, \
_CPT_AUD_CONFIG_B)
#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
_VLV_AUD_CONFIG_A, \
_VLV_AUD_CONFIG_B)
#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
 
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
7120,61 → 7182,51
/* HSW Audio */
#define _HSW_AUD_CONFIG_A 0x65000
#define _HSW_AUD_CONFIG_B 0x65100
#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
_HSW_AUD_CONFIG_A, \
_HSW_AUD_CONFIG_B)
#define HSW_AUD_CFG(pipe) _MMIO_PIPE(pipe, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
 
#define _HSW_AUD_MISC_CTRL_A 0x65010
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
_HSW_AUD_MISC_CTRL_A, \
_HSW_AUD_MISC_CTRL_B)
#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
 
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
_HSW_AUD_DIP_ELD_CTRL_ST_A, \
_HSW_AUD_DIP_ELD_CTRL_ST_B)
#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
 
/* Audio Digital Converter */
#define _HSW_AUD_DIG_CNVT_1 0x65080
#define _HSW_AUD_DIG_CNVT_2 0x65180
#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
_HSW_AUD_DIG_CNVT_1, \
_HSW_AUD_DIG_CNVT_2)
#define AUD_DIG_CNVT(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
#define DIP_PORT_SEL_MASK 0x3
 
#define _HSW_AUD_EDID_DATA_A 0x65050
#define _HSW_AUD_EDID_DATA_B 0x65150
#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
_HSW_AUD_EDID_DATA_A, \
_HSW_AUD_EDID_DATA_B)
#define HSW_AUD_EDID_DATA(pipe) _MMIO_PIPE(pipe, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
 
#define HSW_AUD_PIPE_CONV_CFG 0x6507c
#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0
#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
 
#define HSW_AUD_CHICKENBIT 0x65f10
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
 
/* HSW Power Wells */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_BIOS _MMIO(0x45400) /* CTL1 */
#define HSW_PWR_WELL_DRIVER _MMIO(0x45404) /* CTL2 */
#define HSW_PWR_WELL_KVMR _MMIO(0x45408) /* CTL3 */
#define HSW_PWR_WELL_DEBUG _MMIO(0x4540C) /* CTL4 */
#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
 
/* SKL Fuse Status */
#define SKL_FUSE_STATUS 0x42000
#define SKL_FUSE_STATUS _MMIO(0x42000)
#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
7181,11 → 7233,11
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
 
/* Per-pipe DDI Function Control */
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
#define TRANS_DDI_FUNC_CTL_C 0x62400
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
 
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
7215,9 → 7267,9
#define TRANS_DDI_BFI_ENABLE (1<<4)
 
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
7233,9 → 7285,9
#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
 
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_ACT_SENT (1<<24)
#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
7245,9 → 7297,9
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
 
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define _DDI_BUF_CTL_A 0x64000
#define _DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf<<24)
7260,17 → 7312,17
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
 
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8)
#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4)
#define _DDI_BUF_TRANS_A 0x64E00
#define _DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
 
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_ADDR _MMIO(0xC6000)
#define SBI_DATA _MMIO(0xC6004)
#define SBI_CTL_STAT _MMIO(0xC6008)
#define SBI_CTL_DEST_ICLK (0x0<<16)
#define SBI_CTL_DEST_MPHY (0x1<<16)
#define SBI_CTL_OP_IORD (0x2<<8)
7283,6 → 7335,7
#define SBI_READY (0x0<<0)
 
/* SBI offsets */
#define SBI_SSCDIVINTPHASE 0x0200
#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
7290,6 → 7343,7
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCDITHPHASE 0x0204
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
#define SBI_SSCCTL_PATHALT (1<<3)
7301,12 → 7355,12
#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
 
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
#define PIXCLK_GATE _MMIO(0xC6020)
#define PIXCLK_GATE_UNGATE (1<<0)
#define PIXCLK_GATE_GATE (0<<0)
 
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_CTL _MMIO(0x46020)
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SSC (2<<28)
7318,9 → 7372,9
#define SPLL_PLL_FREQ_MASK (3<<26)
 
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define _WRPLL_CTL1 0x46040
#define _WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SSC (1<<28)
#define WRPLL_PLL_NON_SSC (2<<28)
7337,9 → 7391,9
#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
 
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
#define _PORT_CLK_SEL_A 0x46100
#define _PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
7351,20 → 7405,20
#define PORT_CLK_SEL_MASK (7<<29)
 
/* Transcoder clock selection */
#define TRANS_CLK_SEL_A 0x46140
#define TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
#define _TRANS_CLK_SEL_A 0x46140
#define _TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
 
#define CDCLK_FREQ 0x46200
#define CDCLK_FREQ _MMIO(0x46200)
 
#define TRANSA_MSA_MISC 0x60410
#define TRANSB_MSA_MISC 0x61410
#define TRANSC_MSA_MISC 0x62410
#define TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
 
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
7374,7 → 7428,7
#define TRANS_MSA_16_BPC (4<<5)
 
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_CTL _MMIO(0x130040)
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
7394,7 → 7448,7
*/
 
/* CDCLK_CTL */
#define CDCLK_CTL 0x46000
#define CDCLK_CTL _MMIO(0x46000)
#define CDCLK_FREQ_SEL_MASK (3<<26)
#define CDCLK_FREQ_450_432 (0<<26)
#define CDCLK_FREQ_540 (1<<26)
7410,12 → 7464,12
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
 
/* LCPLL_CTL */
#define LCPLL1_CTL 0x46010
#define LCPLL2_CTL 0x46014
#define LCPLL1_CTL _MMIO(0x46010)
#define LCPLL2_CTL _MMIO(0x46014)
#define LCPLL_PLL_ENABLE (1<<31)
 
/* DPLL control1 */
#define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1 _MMIO(0x6C058)
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
7430,7 → 7484,7
#define DPLL_CTRL1_LINK_RATE_2160 5
 
/* DPLL control2 */
#define DPLL_CTRL2 0x6C05C
#define DPLL_CTRL2 _MMIO(0x6C05C)
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
7438,21 → 7492,21
#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
 
/* DPLL Status */
#define DPLL_STATUS 0x6C060
#define DPLL_STATUS _MMIO(0x6C060)
#define DPLL_LOCK(id) (1<<((id)*8))
 
/* DPLL cfg */
#define DPLL1_CFGCR1 0x6C040
#define DPLL2_CFGCR1 0x6C048
#define DPLL3_CFGCR1 0x6C050
#define _DPLL1_CFGCR1 0x6C040
#define _DPLL2_CFGCR1 0x6C048
#define _DPLL3_CFGCR1 0x6C050
#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
 
#define DPLL1_CFGCR2 0x6C044
#define DPLL2_CFGCR2 0x6C04C
#define DPLL3_CFGCR2 0x6C054
#define _DPLL1_CFGCR2 0x6C044
#define _DPLL2_CFGCR2 0x6C04C
#define _DPLL3_CFGCR2 0x6C054
#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
7470,41 → 7524,41
#define DPLL_CFGCR2_PDIV_7 (4<<2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8)
#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8)
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
 
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL 0x6d000
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
#define BXT_DE_PLL_RATIO_MASK 0xff
 
#define BXT_DE_PLL_ENABLE 0x46070
#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
#define BXT_DE_PLL_LOCK (1 << 30)
 
/* GEN9 DC */
#define DC_STATE_EN 0x45504
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_DC9 (1<<3)
#define DC_STATE_EN_UPTO_DC6 (2<<0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
 
#define DC_STATE_DEBUG 0x45520
#define DC_STATE_DEBUG _MMIO(0x45520)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
 
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW 0x138144
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
 
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
PIPE_WM_LINETIME_B)
#define _PIPE_WM_LINETIME_A 0x45270
#define _PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
7511,17 → 7565,18
#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
 
/* SFUSE_STRAP */
#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP _MMIO(0xc2014)
#define SFUSE_STRAP_FUSE_LOCK (1<<13)
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
#define SFUSE_STRAP_CRT_DISABLED (1<<6)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
 
#define WM_MISC 0x45260
#define WM_MISC _MMIO(0x45260)
#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
 
#define WM_DBG 0x45280
#define WM_DBG _MMIO(0x45280)
#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
#define WM_DBG_DISALLOW_SPRITE (1<<2)
7558,28 → 7613,29
#define _PIPE_B_CSC_POSTOFF_ME 0x49144
#define _PIPE_B_CSC_POSTOFF_LO 0x49148
 
#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
#define PIPE_CSC_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
#define PIPE_CSC_COEFF_BY(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
#define PIPE_CSC_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
#define PIPE_CSC_COEFF_BU(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
#define PIPE_CSC_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
#define PIPE_CSC_COEFF_BV(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
#define PIPE_CSC_MODE(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
#define PIPE_CSC_PREOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
#define PIPE_CSC_PREOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
#define PIPE_CSC_PREOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
#define PIPE_CSC_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
/* MIPI DSI registers */
 
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
 
/* BXT MIPI clock controls */
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
 
#define BXT_MIPI_CLOCK_CTL 0x46090
#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090)
#define BXT_MIPI1_DIV_SHIFT 26
#define BXT_MIPI2_DIV_SHIFT 10
#define BXT_MIPI_DIV_SHIFT(port) \
7641,20 → 7697,20
/* BXT MIPI mode configure */
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \
#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
_BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
 
#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \
#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
_BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
 
#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \
#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
_BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
 
#define BXT_DSI_PLL_CTL 0x161000
#define BXT_DSI_PLL_CTL _MMIO(0x161000)
#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
7670,21 → 7726,20
#define BXT_DSI_PLL_RATIO_MAX 0x7D
#define BXT_DSI_PLL_RATIO_MIN 0x22
#define BXT_DSI_PLL_RATIO_MASK 0xFF
#define BXT_REF_CLOCK_KHZ 19500
#define BXT_REF_CLOCK_KHZ 19200
 
#define BXT_DSI_PLL_ENABLE 0x46080
#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
#define BXT_DSI_PLL_LOCKED (1 << 30)
 
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
 
/* BXT port control */
#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \
_BXT_MIPIC_PORT_CTRL)
#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
 
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
7728,8 → 7783,7
 
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \
_MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
 
7740,8 → 7794,7
 
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
_MIPIC_DEVICE_READY)
#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
7751,12 → 7804,10
 
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \
_MIPIC_INTR_STAT)
#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \
_MIPIC_INTR_EN)
#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
7792,8 → 7843,7
 
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
_MIPIC_DSI_FUNC_PRG)
#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
7816,32 → 7866,27
 
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
_MIPIC_HS_TX_TIMEOUT)
#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
_MIPIC_LP_RX_TIMEOUT)
#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \
_MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
 
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
_MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
 
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
_MIPIC_DPI_RESOLUTION)
#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
7849,8 → 7894,7
 
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \
_MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
7858,50 → 7902,41
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
 
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
_MIPIC_HBP_COUNT)
#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
 
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
_MIPIC_HFP_COUNT)
#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
 
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
 
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
_MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
 
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
_MIPIC_VBP_COUNT)
#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
 
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
_MIPIC_VFP_COUNT)
#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
 
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
 
/* regs above are bits 15:0 */
 
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
_MIPIC_DPI_CONTROL)
#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
7912,21 → 7947,19
 
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \
_MIPIC_DPI_DATA)
#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
 
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
_MIPIC_INIT_COUNT)
#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
 
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
7933,8 → 7966,7
 
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \
_MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
7944,8 → 7976,7
 
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
_MIPIC_EOT_DISABLE)
#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
7957,8 → 7988,7
 
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
_MIPIC_LP_BYTECLK)
#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
 
7965,23 → 7995,19
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
_MIPIC_LP_GEN_DATA)
#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
 
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
_MIPIC_HS_GEN_DATA)
#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
 
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
_MIPIC_LP_GEN_CTRL)
#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
_MIPIC_HS_GEN_CTRL)
#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
7994,8 → 8020,7
 
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
_MIPIC_GEN_FIFO_STAT)
#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
8013,8 → 8038,7
 
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \
_MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
8021,8 → 8045,7
 
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
_MIPIC_DPHY_PARAM)
#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
8035,15 → 8058,11
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
_MIPIC_DBI_BW_CTRL)
#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
 
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
8051,19 → 8070,16
 
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \
_MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
 
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \
_MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
_MIPIC_INTR_EN_REG_1)
#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
 
/* XXX: only pipe A ?!? */
8083,8 → 8099,7
 
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \
_MIPIC_CTRL)
#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
8097,14 → 8112,11
#define RGB_FLIP_TO_BGR (1 << 2)
 
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT_C (2 << 7)
#define BXT_PIPE_SELECT_B (1 << 7)
#define BXT_PIPE_SELECT_A (0 << 7)
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
 
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
_MIPIC_DATA_ADDRESS)
#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
8111,15 → 8123,13
 
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
_MIPIC_DATA_LENGTH)
#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
 
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \
_MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
8128,21 → 8138,17
 
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
_MIPIC_COMMAND_LENGTH)
#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
 
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
#define MIPI_READ_DATA_RETURN(port, n) \
(_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
 
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \
_MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
 
/* For UMS only (deprecated): */
8150,12 → 8156,12
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
 
#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/
#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/
#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/
#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/
#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/
#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */
#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */
#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/i915_vgpu.c
69,13 → 69,13
if (!IS_HASWELL(dev))
return;
 
magic = readq(dev_priv->regs + vgtif_reg(magic));
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
 
version = INTEL_VGT_IF_VERSION_ENCODE(
readw(dev_priv->regs + vgtif_reg(version_major)),
readw(dev_priv->regs + vgtif_reg(version_minor)));
__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) {
DRM_INFO("VGT interface version mismatch!\n");
return;
/drivers/video/drm/i915/i915_vgpu.h
92,14 → 92,10
uint32_t g2v_notify;
uint32_t rsv6[7];
 
uint32_t pdp0_lo;
uint32_t pdp0_hi;
uint32_t pdp1_lo;
uint32_t pdp1_hi;
uint32_t pdp2_lo;
uint32_t pdp2_hi;
uint32_t pdp3_lo;
uint32_t pdp3_hi;
struct {
uint32_t lo;
uint32_t hi;
} pdp[4];
 
uint32_t execlist_context_descriptor_lo;
uint32_t execlist_context_descriptor_hi;
108,7 → 104,7
} __packed;
 
#define vgtif_reg(x) \
(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
_MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
 
/* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0
/drivers/video/drm/i915/intel_atomic.c
94,6 → 94,10
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
 
crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
 
return &crtc_state->base;
}
205,8 → 209,6
* but since this plane is unchanged just do the
* minimum required validation.
*/
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
intel_crtc->atomic.wait_for_flips = true;
crtc_state->base.planes_changed = true;
}
 
/drivers/video/drm/i915/intel_atomic_plane.c
84,6 → 84,7
state = &intel_state->base;
 
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->wait_req = NULL;
 
return state;
}
100,6 → 101,7
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}
 
/drivers/video/drm/i915/intel_audio.c
161,9 → 161,9
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
int reg_edid)
i915_reg_t reg_eldv, uint32_t bits_eldv,
i915_reg_t reg_elda, uint32_t bits_elda,
i915_reg_t reg_edid)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
364,8 → 364,7
enum port port = intel_dig_port->port;
enum pipe pipe = intel_crtc->pipe;
uint32_t tmp, eldv;
int aud_config;
int aud_cntrl_st2;
i915_reg_t aud_config, aud_cntrl_st2;
 
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
376,7 → 375,7
if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv)) {
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
aud_config = VLV_AUD_CFG(pipe);
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else {
416,10 → 415,7
uint32_t eldv;
uint32_t tmp;
int len, i;
int hdmiw_hdmiedid;
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld));
439,7 → 435,8
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(connector->dev)) {
} else if (IS_VALLEYVIEW(connector->dev) ||
IS_CHERRYVIEW(connector->dev)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
525,6 → 522,12
dev_priv->display.audio_codec_enable(connector, intel_encoder,
adjusted_mode);
 
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = connector;
/* referred in audio callbacks */
dev_priv->dig_port_map[port] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
 
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
}
548,6 → 551,11
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
 
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = NULL;
dev_priv->dig_port_map[port] = NULL;
mutex_unlock(&dev_priv->av_mutex);
 
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
}
591,7 → 599,7
struct drm_i915_private *dev_priv = dev_to_i915(dev);
u32 tmp;
 
if (!IS_SKYLAKE(dev_priv))
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return;
 
/*
632,18 → 640,18
int port, int rate)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
struct drm_device *drm_dev = dev_priv->dev;
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
struct intel_crtc *crtc;
struct drm_display_mode *mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
enum pipe pipe = -1;
enum pipe pipe = INVALID_PIPE;
u32 tmp;
int n;
int err = 0;
 
/* HSW, BDW SKL need this fix */
/* HSW, BDW, SKL, KBL need this fix */
if (!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv) &&
!IS_BROADWELL(dev_priv) &&
!IS_HASWELL(dev_priv))
return 0;
650,26 → 658,22
 
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
for_each_intel_encoder(drm_dev, intel_encoder) {
if (intel_encoder->type != INTEL_OUTPUT_HDMI)
continue;
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
if (port == intel_dig_port->port) {
intel_encoder = dev_priv->dig_port_map[port];
/* intel_encoder might be NULL for DP MST */
if (!intel_encoder || !intel_encoder->base.crtc ||
intel_encoder->type != INTEL_OUTPUT_HDMI) {
DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
crtc = to_intel_crtc(intel_encoder->base.crtc);
if (!crtc) {
DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__);
continue;
}
pipe = crtc->pipe;
break;
}
}
 
if (pipe == INVALID_PIPE) {
DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
mutex_unlock(&dev_priv->av_mutex);
return -ENODEV;
err = -ENODEV;
goto unlock;
}
 
DRM_DEBUG_KMS("pipe %c connects port %c\n",
pipe_name(pipe), port_name(port));
mode = &crtc->config->base.adjusted_mode;
682,8 → 686,7
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
mutex_unlock(&dev_priv->av_mutex);
return 0;
goto unlock;
}
 
n = audio_config_get_n(mode, rate);
693,8 → 696,7
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
mutex_unlock(&dev_priv->av_mutex);
return 0;
goto unlock;
}
 
/* 3. set the N/CTS/M */
702,10 → 704,39
tmp = audio_config_setup_n_reg(n, tmp);
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
unlock:
mutex_unlock(&dev_priv->av_mutex);
return 0;
return err;
}
 
static int i915_audio_component_get_eld(struct device *dev, int port,
bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
 
mutex_lock(&dev_priv->av_mutex);
intel_encoder = dev_priv->dig_port_map[port];
/* intel_encoder might be NULL for DP MST */
if (intel_encoder) {
ret = 0;
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
*enabled = intel_dig_port->audio_connector != NULL;
if (*enabled) {
eld = intel_dig_port->audio_connector->eld;
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
}
 
mutex_unlock(&dev_priv->av_mutex);
return ret;
}
 
static const struct i915_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
713,6 → 744,7
.codec_wake_override = i915_audio_component_codec_wake_override,
.get_cdclk_freq = i915_audio_component_get_cdclk_freq,
.sync_audio_rate = i915_audio_component_sync_audio_rate,
.get_eld = i915_audio_component_get_eld,
};
 
static int i915_audio_component_bind(struct device *i915_dev,
773,6 → 805,16
*/
void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
 
// ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
// if (ret < 0) {
// DRM_ERROR("failed to add audio component (%d)\n", ret);
// /* continue with reduced functionality */
// return;
// }
 
dev_priv->audio_component_registered = true;
}
 
/**
784,4 → 826,9
*/
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
return;
 
// component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
dev_priv->audio_component_registered = false;
}
/drivers/video/drm/i915/intel_bios.c
24,7 → 24,7
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/dmi.h>
 
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
332,10 → 332,10
drm_mode_debug_printmodeline(panel_fixed_mode);
}
 
static int intel_bios_ssc_frequency(struct drm_device *dev,
static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
bool alternate)
{
switch (INTEL_INFO(dev)->gen) {
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
return alternate ? 66667 : 48000;
case 3:
350,16 → 350,20
parse_general_features(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
const struct bdb_general_features *general;
 
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
if (!general)
return;
 
dev_priv->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
if (bdb->version >= 155 &&
(HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
dev_priv->vbt.int_crt_support = general->int_crt_support;
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
dev_priv->vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
370,7 → 374,6
dev_priv->vbt.display_clock_mode,
dev_priv->vbt.fdi_rx_polarity_inverted);
}
}
 
static void
parse_general_definitions(struct drm_i915_private *dev_priv,
1054,10 → 1057,9
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
enum port port;
 
if (!HAS_DDI(dev))
if (!HAS_DDI(dev_priv))
return;
 
if (!dev_priv->vbt.child_dev_num)
1170,7 → 1172,6
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
enum port port;
 
dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
1195,8 → 1196,8
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
!HAS_PCH_SPLIT(dev));
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
 
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
1211,88 → 1212,79
}
}
 
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
{
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
"VBIOS ROM for %s\n",
id->ident);
return 1;
const void *_vbt = vbt;
 
return _vbt + vbt->bdb_offset;
}
 
static const struct dmi_system_id intel_no_opregion_vbt[] = {
/**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
* @buf: pointer to a buffer to validate
* @size: size of the buffer
*
* Returns true on valid VBT.
*/
bool intel_bios_is_valid_vbt(const void *buf, size_t size)
{
.callback = intel_no_opregion_vbt_callback,
.ident = "ThinkCentre A57",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
},
},
{ }
};
 
static const struct bdb_header *validate_vbt(const void *base,
size_t size,
const void *_vbt,
const char *source)
{
size_t offset = _vbt - base;
const struct vbt_header *vbt = _vbt;
const struct vbt_header *vbt = buf;
const struct bdb_header *bdb;
 
if (offset + sizeof(struct vbt_header) > size) {
if (!vbt)
return false;
 
if (sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n");
return NULL;
return false;
}
 
if (memcmp(vbt->signature, "$VBT", 4)) {
DRM_DEBUG_DRIVER("VBT invalid signature\n");
return NULL;
return false;
}
 
offset += vbt->bdb_offset;
if (offset + sizeof(struct bdb_header) > size) {
if (vbt->bdb_offset + sizeof(struct bdb_header) > size) {
DRM_DEBUG_DRIVER("BDB header incomplete\n");
return NULL;
return false;
}
 
bdb = base + offset;
if (offset + bdb->bdb_size > size) {
bdb = get_bdb_header(vbt);
if (vbt->bdb_offset + bdb->bdb_size > size) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
return NULL;
return false;
}
 
DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
source, vbt->signature);
return bdb;
return vbt;
}
 
static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
{
const struct bdb_header *bdb = NULL;
size_t i;
 
/* Scour memory looking for the VBT signature. */
for (i = 0; i + 4 < size; i++) {
if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
void *vbt;
 
if (ioread32(bios + i) != *((const u32 *) "$VBT"))
continue;
 
/*
* This is the one place where we explicitly discard the
* address space (__iomem) of the BIOS/VBT. From now on
* everything is based on 'base', and treated as regular
* memory.
* This is the one place where we explicitly discard the address
* space (__iomem) of the BIOS/VBT.
*/
void *_bios = (void __force *) bios;
vbt = (void __force *) bios + i;
if (intel_bios_is_valid_vbt(vbt, size - i))
return vbt;
 
bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM");
break;
}
}
 
return bdb;
return NULL;
}
 
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* intel_bios_init - find VBT and initialize settings from the BIOS
* @dev: DRM device
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
1301,24 → 1293,19
* Returns 0 on success, nonzero on failure.
*/
int
intel_parse_bios(struct drm_device *dev)
intel_bios_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
const struct bdb_header *bdb = NULL;
struct pci_dev *pdev = dev_priv->dev->pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
 
if (HAS_PCH_NOP(dev))
if (HAS_PCH_NOP(dev_priv))
return -ENODEV;
 
init_vbt_defaults(dev_priv);
 
/* XXX Should this validation be moved to intel_opregion.c? */
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE,
dev_priv->opregion.vbt, "OpRegion");
 
if (bdb == NULL) {
if (!vbt) {
size_t size;
 
bios = pci_map_rom(pdev, &size);
1325,13 → 1312,20
if (!bios)
return -1;
 
bdb = find_vbt(bios, size);
if (!bdb) {
vbt = find_vbt(bios, size);
if (!vbt) {
pci_unmap_rom(pdev, bios);
return -1;
}
 
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
}
 
bdb = get_bdb_header(vbt);
 
DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
(int)sizeof(vbt->signature), vbt->signature, bdb->version);
 
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
1351,42 → 1345,3
 
return 0;
}
 
/**
* intel_bios_is_port_present - is the specified digital port present
* @dev_priv: i915 device instance
* @port: port to check
*
* Return true if the device in %port is present.
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
static const struct {
u16 dp, hdmi;
} port_mapping[] = {
[PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
int i;
 
/* FIXME maybe deal with port A as well? */
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
 
if (!dev_priv->vbt.child_dev_num)
return false;
 
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
if ((p_child->common.dvo_port == port_mapping[port].dp ||
p_child->common.dvo_port == port_mapping[port].hdmi) &&
(p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
return true;
}
 
return false;
}
/drivers/video/drm/i915/intel_bios.h
28,8 → 28,6
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
 
#include <drm/drmP.h>
 
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */
588,8 → 586,6
struct psr_table psr_table[16];
} __packed;
 
int intel_parse_bios(struct drm_device *dev);
 
/*
* Driver<->VBIOS interaction occurs through scratch bits in
* GR18 & SWF*.
/drivers/video/drm/i915/intel_crt.c
50,7 → 50,7
* encoder's enable/disable callbacks */
struct intel_connector *connector;
bool force_hotplug_required;
u32 adpa_reg;
i915_reg_t adpa_reg;
};
 
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
71,15 → 71,18
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
tmp = I915_READ(crt->adpa_reg);
 
if (!(tmp & ADPA_DAC_ENABLE))
return false;
goto out;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
86,7 → 89,11
else
*pipe = PORT_TO_PIPE(tmp);
 
return true;
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
445,7 → 452,6
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
struct edid *edid;
struct i2c_adapter *i2c;
bool ret = false;
 
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
462,10 → 468,10
*/
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
ret = true;
} else {
return true;
}
 
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
472,7 → 478,7
 
kfree(edid);
 
return ret;
return false;
}
 
static enum drm_connector_status
487,12 → 493,8
uint32_t vsample;
uint32_t vblank, vblank_start, vblank_end;
uint32_t dsl;
uint32_t bclrpat_reg;
uint32_t vtotal_reg;
uint32_t vblank_reg;
uint32_t vsync_reg;
uint32_t pipeconf_reg;
uint32_t pipe_dsl_reg;
i915_reg_t bclrpat_reg, vtotal_reg,
vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
uint8_t st00;
enum drm_connector_status status;
 
525,7 → 527,7
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE);
st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
570,7 → 572,7
do {
count++;
/* Read the ST00 VGA status register */
st00 = I915_READ8(VGA_MSR_WRITE);
st00 = I915_READ8(_VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
} while ((I915_READ(pipe_dsl_reg) == dsl));
788,11 → 790,37
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
i915_reg_t adpa_reg;
u32 adpa;
 
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
return;
 
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
 
adpa = I915_READ(adpa_reg);
if ((adpa & ADPA_DAC_ENABLE) == 0) {
/*
* On some machines (some IVB at least) CRT can be
* fused off, but there's no known fuse bit to
* indicate that. On these machine the ADPA register
* works normally, except the DAC enable bit won't
* take. So the only way to tell is attempt to enable
* it and see what happens.
*/
I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
I915_WRITE(adpa_reg, adpa);
}
 
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt)
return;
809,7 → 837,7
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
DRM_MODE_ENCODER_DAC, NULL);
 
intel_connector_attach_encoder(intel_connector, &crt->base);
 
826,15 → 854,10
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
 
if (HAS_PCH_SPLIT(dev))
crt->adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
crt->adpa_reg = VLV_ADPA;
else
crt->adpa_reg = ADPA;
crt->adpa_reg = adpa_reg;
 
crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
if (HAS_PCH_SPLIT(dev)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
/drivers/video/drm/i915/intel_csr.c
47,21 → 47,10
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
 
/*
* SKL CSR registers for DC5 and DC6
*/
#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
#define CSR_HTP_ADDR_SKL 0x00500034
#define CSR_SSP_BASE 0x8F074
#define CSR_HTP_SKL 0x8F004
#define CSR_LAST_WRITE 0x8F034
#define CSR_LAST_WRITE_VALUE 0xc003b400
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
 
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define CSR_MMIO_START_RANGE 0x80000
#define CSR_MMIO_END_RANGE 0x8FFFF
 
struct intel_css_header {
/* 0x09 for DMC */
177,6 → 166,14
char substepping;
};
 
/*
* Kabylake derivated from Skylake H0, so SKL H0
* is the right firmware for KBL A0 (revid 0).
*/
static const struct stepping_info kbl_stepping_info[] = {
{'H', '0'}, {'I', '0'}
};
 
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
184,98 → 181,58
{'J', '0'}, {'K', '0'}
};
 
static struct stepping_info bxt_stepping_info[] = {
static const struct stepping_info bxt_stepping_info[] = {
{'A', '0'}, {'A', '1'}, {'A', '2'},
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
 
static char intel_get_stepping(struct drm_device *dev)
static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
{
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].stepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].stepping;
else
return -ENODATA;
}
const struct stepping_info *si;
unsigned int size;
 
static char intel_get_substepping(struct drm_device *dev)
{
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].substepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].substepping;
else
return -ENODATA;
if (IS_KABYLAKE(dev)) {
size = ARRAY_SIZE(kbl_stepping_info);
si = kbl_stepping_info;
} else if (IS_SKYLAKE(dev)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev)) {
size = ARRAY_SIZE(bxt_stepping_info);
si = bxt_stepping_info;
} else {
return NULL;
}
 
/**
* intel_csr_load_status_get() - to get firmware loading status.
* @dev_priv: i915 device.
*
* This function helps to get the firmware loading status.
*
* Return: Firmware loading status.
*/
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
{
enum csr_state state;
if (INTEL_REVID(dev) < size)
return si + INTEL_REVID(dev);
 
mutex_lock(&dev_priv->csr_lock);
state = dev_priv->csr.state;
mutex_unlock(&dev_priv->csr_lock);
 
return state;
return NULL;
}
 
/**
* intel_csr_load_status_set() - help to set firmware loading status.
* @dev_priv: i915 device.
* @state: enumeration of firmware loading status.
*
* Set the firmware loading status.
*/
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state)
{
mutex_lock(&dev_priv->csr_lock);
dev_priv->csr.state = state;
mutex_unlock(&dev_priv->csr_lock);
}
 
/**
* intel_csr_load_program() - write the firmware from memory to register.
* @dev: drm device.
* @dev_priv: i915 drm device.
*
* CSR firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
void intel_csr_load_program(struct drm_device *dev)
void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
 
if (!IS_GEN9(dev)) {
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
return;
}
 
/*
* FIXME: Firmware gets lost on S3/S4, but not when entering system
* standby or suspend-to-idle (which is just like forced runtime pm).
* Unfortunately the ACPI subsystem doesn't yet give us a way to
* differentiate this, hence figure it out with this hack.
*/
if (I915_READ(CSR_PROGRAM(0)))
if (!dev_priv->csr.dmc_payload) {
DRM_ERROR("Tried to program CSR with empty payload\n");
return;
}
 
mutex_lock(&dev_priv->csr_lock);
fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM(i), payload[i]);
285,35 → 242,34
dev_priv->csr.mmiodata[i]);
}
 
dev_priv->csr.state = FW_LOADED;
mutex_unlock(&dev_priv->csr_lock);
dev_priv->csr.dc_state = 0;
}
 
static void finish_csr_load(const struct firmware *fw, void *context)
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{
struct drm_i915_private *dev_priv = context;
struct drm_device *dev = dev_priv->dev;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
char stepping = intel_get_stepping(dev);
char substepping = intel_get_substepping(dev);
const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
char stepping, substepping;
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
bool fw_loaded = false;
 
if (!fw) {
i915_firmware_load_error_print(csr->fw_path, 0);
goto out;
}
if (!fw)
return NULL;
 
if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
if (!stepping_info) {
DRM_ERROR("Unknown stepping info, firmware loading failed\n");
goto out;
return NULL;
}
 
stepping = stepping_info->stepping;
substepping = stepping_info->substepping;
 
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
320,8 → 276,22
(css_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
(css_header->header_len * 4));
goto out;
return NULL;
}
 
csr->version = css_header->version;
 
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
return NULL;
}
 
readcount += sizeof(struct intel_css_header);
 
/* Extract Package Header information*/
331,7 → 301,7
(package_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong package header length %u bytes\n",
(package_header->header_len * 4));
goto out;
return NULL;
}
readcount += sizeof(struct intel_package_header);
 
351,7 → 321,7
}
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
goto out;
return NULL;
}
readcount += dmc_offset;
 
360,7 → 330,7
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
(dmc_header->header_len));
goto out;
return NULL;
}
readcount += sizeof(struct intel_dmc_header);
 
368,7 → 338,7
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
DRM_ERROR("Firmware has wrong mmio count %u\n",
dmc_header->mmio_count);
goto out;
return NULL;
}
csr->mmio_count = dmc_header->mmio_count;
for (i = 0; i < dmc_header->mmio_count; i++) {
376,9 → 346,9
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]);
goto out;
return NULL;
}
csr->mmioaddr[i] = dmc_header->mmioaddr[i];
csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
csr->mmiodata[i] = dmc_header->mmiodata[i];
}
 
386,59 → 356,79
nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) {
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
goto out;
return NULL;
}
csr->dmc_fw_size = dmc_header->fw_size;
 
csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
if (!csr->dmc_payload) {
dmc_payload = kmalloc(nbytes, GFP_KERNEL);
if (!dmc_payload) {
DRM_ERROR("Memory allocation failed for dmc payload\n");
goto out;
return NULL;
}
 
dmc_payload = csr->dmc_payload;
memcpy(dmc_payload, &fw->data[readcount], nbytes);
 
return dmc_payload;
}
 
static void csr_load_work_fn(struct drm_i915_private *dev_priv)
{
struct intel_csr *csr;
const struct firmware *fw;
int ret;
 
csr = &dev_priv->csr;
 
ret = request_firmware(&fw, dev_priv->csr.fw_path,
&dev_priv->dev->pdev->dev);
if (!fw)
goto out;
 
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
if (!dev_priv->csr.dmc_payload)
goto out;
 
/* load csr program during system boot, as needed for DC states */
intel_csr_load_program(dev);
fw_loaded = true;
intel_csr_load_program(dev_priv);
 
DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
out:
if (fw_loaded)
intel_runtime_pm_put(dev_priv);
else
intel_csr_load_status_set(dev_priv, FW_FAILED);
if (dev_priv->csr.dmc_payload) {
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
DRM_INFO("Finished loading %s (v%u.%u)\n",
dev_priv->csr.fw_path,
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
}
 
release_firmware(fw);
}
 
/**
* intel_csr_ucode_init() - initialize the firmware loading.
* @dev: drm device.
* @dev_priv: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
void intel_csr_ucode_init(struct drm_device *dev)
void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_csr *csr = &dev_priv->csr;
int ret;
 
if (!HAS_CSR(dev))
if (!HAS_CSR(dev_priv))
return;
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
else {
DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
intel_csr_load_status_set(dev_priv, FW_FAILED);
return;
}
#if 0
 
DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
 
/*
445,44 → 435,23
* Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend.
*/
intel_runtime_pm_get(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
/* CSR supported for platform, load firmware */
ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
&dev_priv->dev->pdev->dev,
GFP_KERNEL, dev_priv,
finish_csr_load);
if (ret) {
i915_firmware_load_error_print(csr->fw_path, ret);
intel_csr_load_status_set(dev_priv, FW_FAILED);
csr_load_work_fn(dev_priv);
}
#endif
}
 
/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev: drm device.
* @dev_priv: i915 drm device.
*
* Firmmware unloading includes freeing the internal momory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_device *dev)
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_CSR(dev))
if (!HAS_CSR(dev_priv))
return;
 
intel_csr_load_status_set(dev_priv, FW_FAILED);
 
kfree(dev_priv->csr.dmc_payload);
}
 
void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
"CSR is not loaded.\n");
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
"CSR program storage start is NULL\n");
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
}
/drivers/video/drm/i915/intel_ddi.c
133,12 → 133,12
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
{ 0x00009010, 0x000000C7, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x000000DF, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
};
 
/* Skylake U */
146,12 → 146,12
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
};
 
/* Skylake Y */
159,12 → 159,12
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
};
 
/*
345,7 → 345,7
static bool
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
{
return intel_dig_port->hdmi.hdmi_reg;
return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
}
 
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
353,10 → 353,10
{
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev)) {
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} else if (IS_SKL_ULT(dev)) {
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
} else {
373,7 → 373,7
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev)) {
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_y_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
381,7 → 381,7
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
}
} else if (IS_SKL_ULT(dev)) {
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_u_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
408,7 → 408,7
{
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev)) {
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
} else {
448,7 → 448,7
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries);
584,7 → 584,7
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
uint32_t reg = DDI_BUF_CTL(port);
i915_reg_t reg = DDI_BUF_CTL(port);
int i;
 
for (i = 0; i < 16; i++) {
683,15 → 683,16
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
break;
}
 
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
 
return;
/*
* Leave things enabled even if we failed to train FDI.
* Results in less fireworks from the state checker.
*/
if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
DRM_ERROR("FDI link training failed!\n");
break;
}
 
temp = I915_READ(DDI_BUF_CTL(PORT_E));
720,7 → 721,12
POSTING_READ(FDI_RX_MISC(PIPE_A));
}
 
DRM_ERROR("FDI link training failed!\n");
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
}
 
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
939,7 → 945,8
/* Otherwise a < c && b >= d, do nothing */
}
 
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
int refclk = LC_FREQ;
int n, p, r;
975,7 → 982,7
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t dpll)
{
uint32_t cfgcr1_reg, cfgcr2_reg;
i915_reg_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
 
1120,10 → 1127,10
link_clock = 270000;
break;
case PORT_CLK_SEL_WRPLL1:
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
break;
case PORT_CLK_SEL_WRPLL2:
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
break;
case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
1192,7 → 1199,7
 
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev))
else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_BROXTON(dev))
bxt_ddi_clock_get(encoder, pipe_config);
1789,7 → 1796,7
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else if (IS_BROXTON(dev))
1951,7 → 1958,7
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
 
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1970,13 → 1977,16
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
 
power_domain = intel_display_port_power_domain(intel_encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
ret = false;
goto out;
}
 
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
1988,23 → 1998,33
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
return (type == DRM_MODE_CONNECTOR_HDMIA);
ret = type == DRM_MODE_CONNECTOR_HDMIA;
break;
 
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
return (type == DRM_MODE_CONNECTOR_DisplayPort);
ret = type == DRM_MODE_CONNECTOR_eDP ||
type == DRM_MODE_CONNECTOR_DisplayPort;
break;
 
case TRANS_DDI_MODE_SELECT_DP_MST:
/* if the transcoder is in MST state then
* connector isn't connected */
return false;
ret = false;
break;
 
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
ret = type == DRM_MODE_CONNECTOR_VGA;
break;
 
default:
return false;
ret = false;
break;
}
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2016,15 → 2036,18
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
bool ret;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
tmp = I915_READ(DDI_BUF_CTL(port));
 
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
goto out;
 
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
2042,25 → 2065,32
break;
}
 
return true;
} else {
ret = true;
 
goto out;
}
 
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
 
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
return false;
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
TRANS_DDI_MODE_SELECT_DP_MST)
goto out;
 
*pipe = i;
return true;
ret = true;
 
goto out;
}
}
}
 
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
 
return false;
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
2106,7 → 2136,7
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
iboost = ddi_translations[port].i_boost;
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) {
2113,7 → 2143,7
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
iboost = ddi_translations[port].i_boost;
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) {
2120,7 → 2150,7
iboost = hdmi_iboost;
} else {
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
iboost = ddi_translations[port].i_boost;
iboost = ddi_translations[level].i_boost;
}
} else {
return;
2272,7 → 2302,7
 
level = translate_signal_level(signal_levels);
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
2280,23 → 2310,14
return DDI_BUF_TRANS_SELECT(level);
}
 
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
 
if (IS_SKYLAKE(dev)) {
uint32_t dpll = crtc->config->ddi_pll_sel;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
uint32_t dpll = pipe_config->ddi_pll_sel;
uint32_t val;
 
/*
2303,7 → 2324,7
* DPLL0 is used for eDP and is the only "private" DPLL (as
* opposed to shared) on SKL
*/
if (type == INTEL_OUTPUT_EDP) {
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(dpll != SKL_DPLL0);
 
val = I915_READ(DPLL_CTRL1);
2311,7 → 2332,7
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
 
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
2327,11 → 2348,29
 
I915_WRITE(DPLL_CTRL2, val);
 
} else if (INTEL_INFO(dev)->gen < 9) {
WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
} else if (INTEL_INFO(dev_priv)->gen < 9) {
WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
}
}
 
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
 
intel_ddi_clk_select(intel_encoder, crtc->config);
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
2390,7 → 2429,7
intel_edp_panel_off(intel_dp);
}
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
2500,12 → 2539,14
{
uint32_t val;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
return val & WRPLL_PLL_ENABLE;
}
 
2515,12 → 2556,14
{
uint32_t val;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
return val & SPLL_PLL_ENABLE;
}
 
2562,7 → 2605,7
};
 
struct skl_dpll_regs {
u32 ctl, cfgcr1, cfgcr2;
i915_reg_t ctl, cfgcr1, cfgcr2;
};
 
/* this array is indexed by the *shared* pll id */
2575,13 → 2618,13
},
{
/* DPLL 2 */
.ctl = WRPLL_CTL1,
.ctl = WRPLL_CTL(0),
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
},
{
/* DPLL 3 */
.ctl = WRPLL_CTL2,
.ctl = WRPLL_CTL(1),
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
},
2637,16 → 2680,19
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
ret = false;
 
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
 
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
return false;
goto out;
 
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
2656,8 → 2702,12
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
ret = true;
 
return true;
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
return ret;
}
 
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
2924,13 → 2974,16
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
ret = false;
 
val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
return false;
goto out;
 
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2977,7 → 3030,12
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
return ret;
}
 
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
3001,7 → 3059,7
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_shared_dplls_init(dev_priv);
else if (IS_BROXTON(dev))
bxt_shared_dplls_init(dev_priv);
3008,15 → 3066,15
else
hsw_shared_dplls_init(dev_priv);
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
int cdclk_freq;
 
cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->skl_boot_cdclk = cdclk_freq;
if (skl_sanitize_cdclk(dev_priv))
DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
else
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
} else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
3035,11 → 3093,11
}
}
 
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port;
uint32_t val;
bool wait = false;
3150,7 → 3208,7
pipe_config->has_hdmi_sink = true;
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
if (intel_hdmi->infoframe_enabled(&encoder->base))
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
break;
case TRANS_DDI_MODE_SELECT_DVI:
3278,7 → 3336,7
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
DRM_MODE_ENCODER_TMDS, NULL);
 
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
3294,6 → 3352,20
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
 
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
* wasn't lit up at boot. Force this bit on in our internal
* configuration so that we use the proper lane count for our
* calculations.
*/
if (IS_BROXTON(dev) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
}
}
 
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
3307,8 → 3379,7
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
&& port == PORT_B)
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
/drivers/video/drm/i915/intel_display.c
44,6 → 44,8
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
#include <linux/dma-buf.h>
 
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
186,7 → 188,7
uint32_t clkcfg;
 
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return 200;
 
clkcfg = I915_READ(CLKCFG);
214,7 → 216,7
 
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!IS_VALLEYVIEW(dev_priv))
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
 
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
715,11 → 717,12
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
 
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
!IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
 
if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
1096,7 → 1099,7
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPEDSL(pipe);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
 
1136,7 → 1139,7
enum pipe pipe = crtc->pipe;
 
if (INTEL_INFO(dev)->gen >= 4) {
int reg = PIPECONF(cpu_transcoder);
i915_reg_t reg = PIPECONF(cpu_transcoder);
 
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1286,7 → 1289,7
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int pp_reg;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
1304,7 → 1307,7
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = VLV_PIPE_PP_CONTROL(pipe);
panel_pipe = pipe;
1348,6 → 1351,7
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum intel_display_power_domain power_domain;
 
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1354,12 → 1358,14
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
 
intel_display_power_put(dev_priv, power_domain);
} else {
cur_state = false;
}
 
I915_STATE_WARN(cur_state != state,
1422,7 → 1428,7
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
1481,8 → 1487,7
return false;
 
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1546,12 → 1551,13
}
 
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
enum pipe pipe, i915_reg_t reg,
u32 port_sel)
{
u32 val = I915_READ(reg);
I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
i915_mmio_reg_offset(reg), pipe_name(pipe));
 
I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
1559,12 → 1565,12
}
 
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
enum pipe pipe, i915_reg_t reg)
{
u32 val = I915_READ(reg);
I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
i915_mmio_reg_offset(reg), pipe_name(pipe));
 
I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
1600,14 → 1606,11
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = pipe_config->dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
1645,8 → 1648,6
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
 
mutex_lock(&dev_priv->sb_lock);
 
/* Enable back the 10bit clock to display controller */
1689,7 → 1690,7
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
1838,7 → 1839,7
unsigned int expected_mask)
{
u32 port_mask;
int dpll_reg;
i915_reg_t dpll_reg;
 
switch (dport->port) {
case PORT_B:
1963,7 → 1964,8
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t reg, val, pipeconf_val;
i915_reg_t reg;
uint32_t val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(!HAS_PCH_SPLIT(dev));
2052,7 → 2054,8
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
uint32_t reg, val;
i915_reg_t reg;
uint32_t val;
 
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
2069,7 → 2072,7
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
if (!HAS_PCH_IBX(dev)) {
if (HAS_PCH_CPT(dev)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
2107,10 → 2110,9
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pch_transcoder;
int reg;
i915_reg_t reg;
u32 val;
 
DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2130,7 → 2132,7
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv->dev))
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
if (crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
2171,7 → 2173,7
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
i915_reg_t reg;
u32 val;
 
DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2270,20 → 2272,20
fb_format_modifier, 0));
}
 
static int
static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct intel_rotation_info *info = &view->rotation_info;
struct intel_rotation_info *info = &view->params.rotation_info;
unsigned int tile_height, tile_pitch;
 
*view = i915_ggtt_view_normal;
 
if (!plane_state)
return 0;
return;
 
if (!intel_rotation_90_or_270(plane_state->rotation))
return 0;
return;
 
*view = i915_ggtt_view_rotated;
 
2310,8 → 2312,6
info->size_uv = info->width_pages_uv * info->height_pages_uv *
PAGE_SIZE;
}
 
return 0;
}
 
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2319,7 → 2319,7
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
IS_VALLEYVIEW(dev_priv))
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
else if (INTEL_INFO(dev_priv)->gen >= 4)
return 4 * 1024;
2330,9 → 2330,7
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request)
const struct drm_plane_state *plane_state)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
2367,9 → 2365,7
return -EINVAL;
}
 
ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
if (ret)
return ret;
intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
2388,11 → 2384,10
*/
intel_runtime_pm_get(dev_priv);
 
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
pipelined_request, &view);
ret = i915_gem_object_pin_to_display_plane(obj, alignment,
&view);
if (ret)
goto err_interruptible;
goto err_pm;
 
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
2418,14 → 2413,12
i915_gem_object_pin_fence(obj);
}
 
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
return 0;
 
err_unpin:
i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
err_pm:
intel_runtime_pm_put(dev_priv);
return ret;
}
2435,12 → 2428,10
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
int ret;
 
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
WARN_ONCE(ret, "Couldn't get view from plane state!");
intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
if (view.type == I915_GGTT_VIEW_NORMAL)
i915_gem_object_unpin_fence(obj);
2695,7 → 2686,7
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg = DSPCNTR(plane);
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
 
if (!visible || !fb) {
2825,7 → 2816,7
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg = DSPCNTR(plane);
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
 
if (!visible || !fb) {
2954,22 → 2945,22
struct drm_i915_gem_object *obj,
unsigned int plane)
{
const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
struct i915_ggtt_view view;
struct i915_vma *vma;
u64 offset;
 
if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
view = &i915_ggtt_view_rotated;
intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
intel_plane->base.state);
 
vma = i915_gem_obj_to_ggtt_view(obj, view);
vma = i915_gem_obj_to_ggtt_view(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
view->type))
view.type))
return -1;
 
offset = vma->node.start;
 
if (plane == 1) {
offset += vma->ggtt_view.rotation_info.uv_start_page *
offset += vma->ggtt_view.params.rotation_info.uv_start_page *
PAGE_SIZE;
}
 
3199,8 → 3190,8
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->fbc.disable_fbc)
dev_priv->fbc.disable_fbc(dev_priv);
if (dev_priv->fbc.deactivate)
dev_priv->fbc.deactivate(dev_priv);
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
3229,10 → 3220,9
struct intel_plane_state *plane_state;
 
drm_modeset_lock_crtc(crtc, &plane->base);
 
plane_state = to_intel_plane_state(plane->base.state);
 
if (plane_state->base.fb)
if (crtc->state->active && plane_state->base.fb)
plane->commit_plane(&plane->base, plane_state);
 
drm_modeset_unlock_crtc(crtc);
3308,32 → 3298,6
drm_modeset_unlock_all(dev);
}
 
static void
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
 
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
dev_priv->mm.interruptible = false;
ret = i915_gem_object_wait_rendering(obj, true);
dev_priv->mm.interruptible = was_interruptible;
 
WARN_ON(ret);
}
 
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3403,7 → 3367,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
/* enable normal train */
reg = FDI_TX_CTL(pipe);
3445,7 → 3410,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, tries;
i915_reg_t reg;
u32 temp, tries;
 
/* FDI needs bits from pipe first */
assert_pipe_enabled(dev_priv, pipe);
3545,7 → 3511,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i, retry;
i915_reg_t reg;
u32 temp, i, retry;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
3677,7 → 3644,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i, j;
i915_reg_t reg;
u32 temp, i, j;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
3794,9 → 3762,9
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
 
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
3831,7 → 3799,8
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
3861,7 → 3830,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
i915_reg_t reg;
u32 temp;
 
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
3948,21 → 3918,29
drm_crtc_vblank_put(&intel_crtc->base);
 
wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
 
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
 
queue_work(dev_priv->wq, &work->work);
}
 
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
long ret;
 
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
 
ret = wait_event_interruptible_timeout(
dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0)) {
60*HZ);
 
if (ret < 0)
return ret;
 
if (ret == 0) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
spin_lock_irq(&dev->event_lock);
3973,11 → 3951,22
spin_unlock_irq(&dev->event_lock);
}
 
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->primary->fb);
mutex_unlock(&dev->struct_mutex);
return 0;
}
 
static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
 
mutex_lock(&dev_priv->sb_lock);
 
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
/* Program iCLKIP clock to the desired frequency */
3989,19 → 3978,8
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
 
mutex_lock(&dev_priv->sb_lock);
lpt_disable_iclkip(dev_priv);
 
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
 
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
SBI_SSCCTL_DISABLE,
SBI_ICLK);
 
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (clock == 20000) {
auxdiv = 1;
4018,7 → 3996,7
u32 iclk_pi_range = 64;
u32 desired_divisor, msb_divisor_value, pi_value;
 
desired_divisor = (iclk_virtual_root_freq / clock);
desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
msb_divisor_value = desired_divisor / iclk_pi_range;
pi_value = desired_divisor % iclk_pi_range;
 
4040,6 → 4018,8
phasedir,
phaseinc);
 
mutex_lock(&dev_priv->sb_lock);
 
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4061,12 → 4041,12
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
mutex_unlock(&dev_priv->sb_lock);
 
/* Wait for initialization time */
udelay(24);
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4137,6 → 4117,22
}
}
 
/* Return which DP Port should be selected for Transcoder DP control */
static enum port
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
encoder->type == INTEL_OUTPUT_EDP)
return enc_to_dig_port(&encoder->base)->port;
}
 
return -1;
}
 
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
4151,7 → 4147,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
u32 temp;
 
assert_pch_transcoder_disabled(dev_priv, pipe);
 
4163,6 → 4159,12
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
/*
* Sometimes spurious CPU pipe underruns happen during FDI
* training, at least with VGA+HDMI cloning. Suppress them.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
 
4196,10 → 4198,14
 
intel_fdi_normal_train(crtc);
 
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
i915_reg_t reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
4207,19 → 4213,19
temp |= TRANS_DP_OUTPUT_ENABLE;
temp |= bpc << 9; /* same format but at 11:9 */
 
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 
switch (intel_trans_dp_port_sel(crtc)) {
case PCH_DP_B:
case PORT_B:
temp |= TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
case PORT_C:
temp |= TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
case PORT_D:
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
4359,7 → 4365,7
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
 
temp = I915_READ(dslreg);
4652,7 → 4658,7
return;
 
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
if (intel_crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
4669,7 → 4675,7
}
 
for (i = 0; i < 256; i++) {
u32 palreg;
i915_reg_t palreg;
 
if (HAS_GMCH_DISPLAY(dev))
palreg = PALETTE(pipe, i);
4723,14 → 4729,6
int pipe = intel_crtc->pipe;
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, pipe);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
4748,9 → 4746,9
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* Underruns don't raise interrupts, so check manually. */
if (HAS_GMCH_DISPLAY(dev))
i9xx_check_fifo_underruns(dev_priv);
/* Underruns don't always raise interrupts, so check manually. */
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
}
 
/**
4807,9 → 4805,9
static void intel_post_plane_update(struct intel_crtc *crtc)
{
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane *plane;
 
if (atomic->wait_vblank)
intel_wait_for_vblank(dev, crtc->pipe);
4816,22 → 4814,17
 
intel_frontbuffer_flip(dev, atomic->fb_bits);
 
if (atomic->disable_cxsr)
crtc->wm.cxsr_allowed = true;
 
if (crtc->atomic.update_wm_post)
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(&crtc->base);
 
if (atomic->update_fbc)
intel_fbc_update(dev_priv);
intel_fbc_update(crtc);
 
if (atomic->post_enable_primary)
intel_post_enable_primary(&crtc->base);
 
drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
intel_update_sprite_watermarks(plane, &crtc->base,
0, 0, 0, false, false);
 
memset(atomic, 0, sizeof(*atomic));
}
 
4840,23 → 4833,11
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct drm_plane *p;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
 
/* Track fb's for any planes being disabled */
drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
struct intel_plane *plane = to_intel_plane(p);
 
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
plane->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
}
 
if (atomic->wait_for_flips)
intel_crtc_wait_for_pending_flips(&crtc->base);
 
if (atomic->disable_fbc)
intel_fbc_disable_crtc(crtc);
intel_fbc_deactivate(crtc);
 
if (crtc->atomic.disable_ips)
hsw_disable_ips(crtc);
4864,10 → 4845,13
if (atomic->pre_disable_primary)
intel_pre_disable_primary(&crtc->base);
 
if (atomic->disable_cxsr) {
if (pipe_config->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
intel_set_memory_cxsr(dev_priv, false);
}
 
if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
intel_update_watermarks(&crtc->base);
}
 
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4902,6 → 4886,9
return;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
 
if (intel_crtc->config->has_dp_encoder)
4919,7 → 4906,6
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
4957,6 → 4943,13
 
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
 
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
intel_fbc_enable(intel_crtc);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
4974,11 → 4967,14
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (WARN_ON(intel_crtc->active))
return;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
 
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
 
5003,21 → 4999,20
 
intel_crtc->active = true;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
else
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (encoder->pre_enable)
encoder->pre_enable(encoder);
}
 
if (intel_crtc->config->has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
if (intel_crtc->config->has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
}
 
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_pipe_clock(intel_crtc);
 
if (INTEL_INFO(dev)->gen >= 9)
5032,7 → 5027,7
intel_crtc_load_lut(crtc);
 
intel_ddi_set_pipe_settings(crtc);
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_transcoder_func(crtc);
 
intel_update_watermarks(crtc);
5041,7 → 5036,7
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
 
if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
 
assert_vblank_disabled(crtc);
5052,6 → 5047,14
intel_opregion_notify_encoder(encoder, true);
}
 
if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev, pipe);
intel_wait_for_vblank(dev, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
}
 
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5059,6 → 5062,8
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
 
intel_fbc_enable(intel_crtc);
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5083,8 → 5088,10
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
5091,15 → 5098,22
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
/*
* Sometimes spurious CPU pipe underruns happen when the
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_disable_pipe(intel_crtc);
 
ironlake_pfit_disable(intel_crtc, false);
 
if (intel_crtc->config->has_pch_encoder)
if (intel_crtc->config->has_pch_encoder) {
ironlake_fdi_disable(crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
5109,6 → 5123,9
ironlake_disable_pch_transcoder(dev_priv, pipe);
 
if (HAS_PCH_CPT(dev)) {
i915_reg_t reg;
u32 temp;
 
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
5125,6 → 5142,10
 
ironlake_fdi_pll_disable(intel_crtc);
}
 
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void haswell_crtc_disable(struct drm_crtc *crtc)
5134,8 → 5155,11
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
5144,15 → 5168,12
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
 
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
 
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
if (INTEL_INFO(dev)->gen >= 9)
5160,17 → 5181,23
else
ironlake_pfit_disable(intel_crtc, false);
 
if (!is_dsi)
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_disable_pipe_clock(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (intel_crtc->config->has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
intel_ddi_fdi_disable(crtc);
 
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
intel_fbc_disable_crtc(intel_crtc);
}
 
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5201,15 → 5228,15
{
switch (port) {
case PORT_A:
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
return POWER_DOMAIN_PORT_DDI_A_LANES;
case PORT_B:
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
return POWER_DOMAIN_PORT_DDI_B_LANES;
case PORT_C:
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
return POWER_DOMAIN_PORT_DDI_C_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
return POWER_DOMAIN_PORT_DDI_D_LANES;
case PORT_E:
return POWER_DOMAIN_PORT_DDI_E_2_LANES;
return POWER_DOMAIN_PORT_DDI_E_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
5236,10 → 5263,6
}
}
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
 
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
5304,13 → 5327,11
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned long mask;
enum transcoder transcoder;
enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
 
if (!crtc->state->active)
return 0;
 
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (intel_crtc->config->pch_pfit.enabled ||
5397,7 → 5418,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5454,7 → 5475,7
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
5814,32 → 5835,16
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
DRM_ERROR("DBuf power disable timeout\n");
 
/*
* DMC assumes ownership of LCPLL and will get confused if we touch it.
*/
if (dev_priv->csr.dmc_payload) {
/* disable DPLL0 */
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
~LCPLL_PLL_ENABLE);
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
DRM_ERROR("Couldn't disable DPLL0\n");
}
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
 
void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
u32 val;
unsigned int required_vco;
 
/* enable PCH reset handshake */
val = I915_READ(HSW_NDE_RSTWRN_OPT);
I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
 
/* enable PG1 and Misc I/O */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
/* DPLL0 not enabled (happens on early BIOS versions) */
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
/* enable DPLL0 */
5860,6 → 5865,45
DRM_ERROR("DBuf power enable timeout\n");
}
 
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
uint32_t cdctl = I915_READ(CDCLK_CTL);
int freq = dev_priv->skl_boot_cdclk;
 
/*
* check if the pre-os intialized the display
* There is SWF18 scratchpad register defined which is set by the
* pre-os which can be used by the OS drivers to check the status
*/
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
 
/* Is PLL enabled and locked ? */
if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
goto sanitize;
 
/* DPLL okay; verify the cdclock
*
* Noticed in some instances that the freq selection is correct but
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
/* All well; nothing to sanitize */
return false;
sanitize:
/*
* As of now initialize with max cdclk till
* we get dynamic cdclk support
* */
dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
skl_init_cdclk(dev_priv);
 
/* we did have to sanitize */
return true;
}
 
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
6141,13 → 6185,10
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
bool is_dsi;
 
if (WARN_ON(intel_crtc->active))
return;
 
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
 
6170,7 → 6211,7
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
if (!is_dsi) {
if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
6188,6 → 6229,7
 
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
 
assert_vblank_disabled(crtc);
6249,6 → 6291,8
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
intel_fbc_enable(intel_crtc);
}
 
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6296,7 → 6340,7
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
6311,6 → 6355,8
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6324,7 → 6370,8
return;
 
if (to_intel_plane_state(crtc->primary->state)->visible) {
intel_crtc_wait_for_pending_flips(crtc);
WARN_ON(intel_crtc->unpin_work);
 
intel_pre_disable_primary(crtc);
 
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6449,13 → 6496,11
 
int intel_connector_init(struct intel_connector *connector)
{
struct drm_connector_state *connector_state;
drm_atomic_helper_connector_reset(&connector->base);
 
connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
if (!connector_state)
if (!connector->base.state)
return -ENOMEM;
 
connector->base.state = connector_state;
return 0;
}
 
6644,6 → 6689,15
pipe_config_supports_ips(dev_priv, pipe_config);
}
 
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
/* GDG double wide on either pipe, otherwise pipe A only */
return INTEL_INFO(dev_priv)->gen < 4 &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
 
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
6653,24 → 6707,25
 
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
int clock_limit = dev_priv->max_cdclk_freq;
int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
 
/*
* Enable pixel doubling when the dot clock
* Enable double wide mode when the dot clock
* is > 90% of the (display) core speed.
*
* GDG double wide on either pipe,
* otherwise pipe A only.
*/
if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
if (intel_crtc_supports_double_wide(crtc) &&
adjusted_mode->crtc_clock > clock_limit) {
clock_limit *= 2;
pipe_config->double_wide = true;
}
 
if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
if (adjusted_mode->crtc_clock > clock_limit) {
DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
adjusted_mode->crtc_clock, clock_limit,
yesno(pipe_config->double_wide));
return -EINVAL;
}
}
 
/*
* Pipe horizontal size must be even in:
7146,7 → 7201,7
 
WARN_ON(!crtc_state->base.state);
 
if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
refclk = 100000;
} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7434,7 → 7489,7
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
i915_reg_t dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7845,7 → 7900,7
pipeconf |= PIPECONF_DOUBLE_WIDE;
 
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
7885,7 → 7940,8
} else
pipeconf |= PIPECONF_PROGRESSIVE;
 
if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7900,8 → 7956,6
int refclk, num_connectors = 0;
intel_clock_t clock;
bool ok;
bool is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
7911,26 → 7965,14
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
if (crtc_state->has_dsi_encoder)
return 0;
 
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
 
switch (encoder->type) {
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
default:
break;
}
 
if (connector_state->crtc == &crtc->base)
num_connectors++;
}
 
if (is_dsi)
return 0;
 
if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc_state, num_connectors);
 
8133,20 → 8175,24
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
ret = false;
 
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
goto out;
 
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
8162,7 → 8208,8
}
}
 
if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
 
if (INTEL_INFO(dev)->gen < 4)
8190,7 → 8237,7
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
8223,7 → 8270,12
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void ironlake_init_pch_refclk(struct drm_device *dev)
8230,7 → 8282,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
8237,7 → 8288,6
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
bool using_ssc_source = false;
 
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
8264,23 → 8314,9
can_ssc = true;
}
 
/* Check if any DPLLs are using the SSC source */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
u32 temp = I915_READ(PCH_DPLL(i));
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
has_panel, has_lvds, has_ck505);
 
if (!(temp & DPLL_VCO_ENABLE))
continue;
 
if ((temp & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
using_ssc_source = true;
break;
}
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
has_panel, has_lvds, has_ck505, using_ssc_source);
 
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
8316,9 → 8352,9
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
} else if (using_ssc_source) {
final |= DREF_SSC_SOURCE_ENABLE;
final |= DREF_SSC1_ENABLE;
} else {
final |= DREF_SSC_SOURCE_DISABLE;
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
}
 
if (final == val)
8364,7 → 8400,7
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
DRM_DEBUG_KMS("Disabling CPU source output\n");
DRM_DEBUG_KMS("Disabling SSC entirely\n");
 
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
8375,9 → 8411,6
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
 
if (!using_ssc_source) {
DRM_DEBUG_KMS("Disabling SSC source\n");
 
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
val |= DREF_SSC_SOURCE_DISABLE;
8389,7 → 8422,6
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
}
 
BUG_ON(val != final);
}
8562,6 → 8594,67
mutex_unlock(&dev_priv->sb_lock);
}
 
#define BEND_IDX(steps) ((50 + (steps)) / 5)
 
static const uint16_t sscdivintphase[] = {
[BEND_IDX( 50)] = 0x3B23,
[BEND_IDX( 45)] = 0x3B23,
[BEND_IDX( 40)] = 0x3C23,
[BEND_IDX( 35)] = 0x3C23,
[BEND_IDX( 30)] = 0x3D23,
[BEND_IDX( 25)] = 0x3D23,
[BEND_IDX( 20)] = 0x3E23,
[BEND_IDX( 15)] = 0x3E23,
[BEND_IDX( 10)] = 0x3F23,
[BEND_IDX( 5)] = 0x3F23,
[BEND_IDX( 0)] = 0x0025,
[BEND_IDX( -5)] = 0x0025,
[BEND_IDX(-10)] = 0x0125,
[BEND_IDX(-15)] = 0x0125,
[BEND_IDX(-20)] = 0x0225,
[BEND_IDX(-25)] = 0x0225,
[BEND_IDX(-30)] = 0x0325,
[BEND_IDX(-35)] = 0x0325,
[BEND_IDX(-40)] = 0x0425,
[BEND_IDX(-45)] = 0x0425,
[BEND_IDX(-50)] = 0x0525,
};
 
/*
* Bend CLKOUT_DP
* steps -50 to 50 inclusive, in steps of 5
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
{
uint32_t tmp;
int idx = BEND_IDX(steps);
 
if (WARN_ON(steps % 5 != 0))
return;
 
if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
return;
 
mutex_lock(&dev_priv->sb_lock);
 
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
else
tmp = 0x00000000;
intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
tmp &= 0xffff0000;
tmp |= sscdivintphase[idx];
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
#undef BEND_IDX
 
static void lpt_init_pch_refclk(struct drm_device *dev)
{
struct intel_encoder *encoder;
8577,11 → 8670,13
}
}
 
if (has_vga)
if (has_vga) {
lpt_bend_clkout_dp(to_i915(dev), 0);
lpt_enable_clkout_dp(dev, true, true);
else
} else {
lpt_disable_clkout_dp(dev);
}
}
 
/*
* Initialize reference clocks when the driver loads
8943,7 → 9038,7
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
 
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9284,18 → 9379,21
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
return false;
goto out;
 
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
9358,7 → 9456,12
 
ironlake_get_pfit_config(crtc, pipe_config);
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9372,8 → 9475,8
 
I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
9719,14 → 9822,10
else
cdclk = 337500;
 
/*
* FIXME move the cdclk caclulation to
* compute_config() so we can fail gracegully.
*/
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
cdclk = dev_priv->max_cdclk_freq;
return -EINVAL;
}
 
to_intel_atomic_state(state)->cdclk = cdclk;
9821,6 → 9920,7
break;
case PORT_CLK_SEL_SPLL:
pipe_config->shared_dpll = DPLL_ID_SPLL;
break;
}
}
 
9837,7 → 9937,7
 
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_BROXTON(dev))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
9873,13 → 9973,18
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain pfit_domain;
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
uint32_t tmp;
bool ret;
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
power_domain_mask = BIT(power_domain);
 
ret = false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
9905,13 → 10010,14
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
 
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
goto out;
power_domain_mask |= BIT(power_domain);
 
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
return false;
goto out;
 
haswell_get_ddi_port_state(crtc, pipe_config);
 
9921,14 → 10027,14
skl_init_scalers(dev, crtc, pipe_config);
}
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
 
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
9946,7 → 10052,13
pipe_config->pixel_multiplier = 1;
}
 
return true;
ret = true;
 
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10178,10 → 10290,8
int ret;
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference(&obj->base);
if (!intel_fb)
return ERR_PTR(-ENOMEM);
}
 
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret)
10188,10 → 10298,9
goto err;
 
return &intel_fb->base;
 
err:
drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
 
return ERR_PTR(ret);
}
 
10231,6 → 10340,7
struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
10245,7 → 10355,11
bpp);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 
return intel_framebuffer_create(dev, &mode_cmd, obj);
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
drm_gem_object_unreference_unlocked(&obj->base);
 
return fb;
}
 
static struct drm_framebuffer *
10780,7 → 10894,7
spin_unlock_irq(&dev->event_lock);
 
if (work) {
cancel_work_sync(&work->work);
// cancel_work_sync(&work->work);
kfree(work);
}
 
11148,7 → 11262,7
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
11158,7 → 11272,7
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
intel_ring_emit(ring, 0);
11198,11 → 11312,16
return true;
else if (i915.enable_execlists)
return true;
// else if (obj->base.dma_buf &&
// !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
// false))
// return true;
else
return ring != i915_gem_request_get_ring(obj->last_write_req);
}
 
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
unsigned int rotation,
struct intel_unpin_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
11209,7 → 11328,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride;
u32 ctl, stride, tile_height;
 
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
11233,9 → 11352,16
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
} else {
stride = fb->pitches[0] /
intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
}
 
/*
* Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11256,10 → 11382,9
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
i915_reg_t reg = DSPCNTR(intel_crtc->plane);
u32 dspcntr;
u32 reg;
 
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
 
if (obj->tiling_mode != I915_TILING_NONE)
11293,7 → 11418,7
intel_pipe_update_start(crtc);
 
if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
skl_do_mmio_flip(crtc, work);
skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
else
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(crtc, work);
11305,6 → 11430,9
{
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
 
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
11314,6 → 11442,12
i915_gem_request_unreference__unlocked(mmio_flip->req);
}
 
/* For framebuffer backed by dmabuf, wait for fence */
// if (obj->base.dma_buf)
// WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
// false, false,
// MAX_SCHEDULE_TIMEOUT) < 0);
 
intel_do_mmio_flip(mmio_flip);
kfree(mmio_flip);
}
11320,10 → 11454,7
 
static int intel_queue_mmio_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
struct drm_i915_gem_object *obj)
{
struct intel_mmio_flip *mmio_flip;
 
11334,6 → 11465,7
mmio_flip->i915 = to_i915(dev);
mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
mmio_flip->crtc = to_intel_crtc(crtc);
mmio_flip->rotation = crtc->primary->state->rotation;
 
INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
schedule_work(&mmio_flip->work);
11400,6 → 11532,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
 
WARN_ON(!in_interrupt());
 
if (crtc == NULL)
return;
 
11515,7 → 11649,7
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
11537,9 → 11671,14
* synchronisation, so all we want here is to pin the framebuffer
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, ring, &request);
if (ret)
goto cleanup_pending;
}
 
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
crtc->primary->state,
mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
crtc->primary->state);
if (ret)
goto cleanup_pending;
 
11548,8 → 11687,7
work->gtt_offset += intel_crtc->dspaddr_offset;
 
if (mmio_flip) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
ret = intel_queue_mmio_flip(dev, crtc, obj);
if (ret)
goto cleanup_unpin;
 
11580,7 → 11718,7
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
 
intel_fbc_disable_crtc(intel_crtc);
intel_fbc_deactivate(intel_crtc);
intel_frontbuffer_flip_prepare(dev,
to_intel_plane(primary)->frontbuffer_bit);
 
11663,21 → 11801,41
static bool intel_wm_need_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
/* Update watermarks on tiling changes. */
if (!plane->state->fb || !state->fb ||
plane->state->fb->modifier[0] != state->fb->modifier[0] ||
plane->state->rotation != state->rotation)
struct intel_plane_state *new = to_intel_plane_state(state);
struct intel_plane_state *cur = to_intel_plane_state(plane->state);
 
/* Update watermarks on tiling or size changes. */
if (new->visible != cur->visible)
return true;
 
if (plane->state->crtc_w != state->crtc_w)
if (!cur->base.fb || !new->base.fb)
return false;
 
if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
cur->base.rotation != new->base.rotation ||
drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
return true;
 
return false;
}
 
static bool needs_scaling(struct intel_plane_state *state)
{
int src_w = drm_rect_width(&state->src) >> 16;
int src_h = drm_rect_height(&state->src) >> 16;
int dst_w = drm_rect_width(&state->dst);
int dst_h = drm_rect_height(&state->dst);
 
return (src_w != dst_w || src_h != dst_h);
}
 
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
struct drm_crtc *crtc = crtc_state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
11690,7 → 11848,6
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
 
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
 
11703,14 → 11860,6
return ret;
}
 
/*
* Disabling a plane is always okay; we just need to update
* fb tracking in a special way since cleanup_fb() won't
* get called by the plane helpers.
*/
if (old_plane_state->base.fb && !fb)
intel_crtc->atomic.disabled_planes |= 1 << i;
 
was_visible = old_plane_state->visible;
visible = to_intel_plane_state(plane_state)->visible;
 
11734,24 → 11883,24
turn_off, turn_on, mode_changed);
 
if (turn_on) {
intel_crtc->atomic.update_wm_pre = true;
pipe_config->update_wm_pre = true;
 
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
intel_crtc->atomic.disable_cxsr = true;
/* to potentially re-enable cxsr */
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_wm_post = true;
}
if (plane->type != DRM_PLANE_TYPE_CURSOR)
pipe_config->disable_cxsr = true;
} else if (turn_off) {
intel_crtc->atomic.update_wm_post = true;
pipe_config->update_wm_post = true;
 
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
if (is_crtc_enabled)
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.disable_cxsr = true;
pipe_config->disable_cxsr = true;
}
} else if (intel_wm_need_update(plane, plane_state)) {
intel_crtc->atomic.update_wm_pre = true;
/* FIXME bollocks */
pipe_config->update_wm_pre = true;
pipe_config->update_wm_post = true;
}
 
if (visible || was_visible)
11760,7 → 11909,6
 
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
intel_crtc->atomic.wait_for_flips = true;
intel_crtc->atomic.pre_disable_primary = turn_off;
intel_crtc->atomic.post_enable_primary = turn_on;
 
11808,11 → 11956,23
case DRM_PLANE_TYPE_CURSOR:
break;
case DRM_PLANE_TYPE_OVERLAY:
if (turn_off && !mode_changed) {
/*
* WaCxSRDisabledForSpriteScaling:ivb
*
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
if (IS_IVYBRIDGE(dev) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state)) {
to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
} else if (turn_off && !mode_changed) {
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_sprite_watermarks |=
1 << i;
}
 
break;
}
return 0;
}
11885,7 → 12045,7
}
 
if (mode_changed && !crtc_state->active)
intel_crtc->atomic.update_wm_post = true;
pipe_config->update_wm_post = true;
 
if (mode_changed && crtc_state->enable &&
dev_priv->display.crtc_compute_clock &&
11897,6 → 12057,12
}
 
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
if (ret)
return ret;
}
 
if (INTEL_INFO(dev)->gen >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
11952,13 → 12118,23
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
 
/* Clamp bpp to 8 on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0 && bpp > 24) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
bpp);
pipe_config->pipe_bpp = 24;
/* Clamp bpp to default limit on screens without EDID 1.4 */
if (connector->base.display_info.bpc == 0) {
int type = connector->base.connector_type;
int clamp_bpp = 24;
 
/* Fall back to 18 bpp when DP sink capability is unknown. */
if (type == DRM_MODE_CONNECTOR_DisplayPort ||
type == DRM_MODE_CONNECTOR_eDP)
clamp_bpp = 18;
 
if (bpp > clamp_bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
bpp, clamp_bpp);
pipe_config->pipe_bpp = clamp_bpp;
}
}
}
 
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11970,7 → 12146,7
struct drm_connector_state *connector_state;
int bpp, i;
 
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
bpp = 10*3;
else if (INTEL_INFO(dev)->gen >= 5)
bpp = 12*3;
12086,7 → 12262,7
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->ddi_pll_sel,
12344,8 → 12520,20
crtc->hwmode = crtc->state->adjusted_mode;
else
crtc->hwmode.crtc_clock = 0;
 
/*
* Update legacy state to satisfy fbc code. This can
* be removed when fbc uses the atomic state.
*/
if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
struct drm_plane_state *plane_state = crtc->primary->state;
 
crtc->primary->fb = plane_state->fb;
crtc->x = plane_state->src_x >> 16;
crtc->y = plane_state->src_y >> 16;
}
}
}
 
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
12369,7 → 12557,7
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe))
for_each_if (mask & (1 <<(intel_crtc)->pipe))
 
static bool
intel_compare_m_n(unsigned int m, unsigned int n,
12553,6 → 12741,8
} else
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
PIPE_CONF_CHECK_I(has_dsi_encoder);
 
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12570,7 → 12760,7
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
 
13107,6 → 13297,45
return 0;
}
 
/*
* Handle calculation of various watermark data at the end of the atomic check
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
static void calc_watermark_data(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_plane *plane;
struct drm_plane_state *pstate;
 
/*
* Calculate watermark configuration details now that derived
* plane/crtc state is all properly updated.
*/
drm_for_each_crtc(crtc, dev) {
cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
crtc->state;
 
if (cstate->active)
intel_state->wm_config.num_pipes_active++;
}
drm_for_each_legacy_plane(plane, dev) {
pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
plane->state;
 
if (!to_intel_plane_state(pstate)->visible)
continue;
 
intel_state->wm_config.sprites_enabled = true;
if (pstate->crtc_w != pstate->src_w >> 16 ||
pstate->crtc_h != pstate->src_h >> 16)
intel_state->wm_config.sprites_scaled = true;
}
}
 
/**
* intel_atomic_check - validate state object
* @dev: drm device
13115,6 → 13344,7
static int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret, i;
13182,12 → 13412,86
if (ret)
return ret;
} else
to_intel_atomic_state(state)->cdclk =
to_i915(state->dev)->cdclk_freq;
intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
 
return drm_atomic_helper_check_planes(state->dev, state);
ret = drm_atomic_helper_check_planes(state->dev, state);
if (ret)
return ret;
 
calc_watermark_data(state);
 
return 0;
}
 
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
 
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (state->legacy_cursor_update)
continue;
 
ret = intel_crtc_wait_for_pending_flips(crtc);
if (ret)
return ret;
 
// if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
// flush_workqueue(dev_priv->wq);
}
 
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
 
ret = drm_atomic_helper_prepare_planes(dev, state);
if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
u32 reset_counter;
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
 
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
 
if (!intel_plane_state->wait_req)
continue;
 
ret = __i915_wait_request(intel_plane_state->wait_req,
reset_counter, true,
NULL, NULL);
 
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret == -EIO)
ret = 0;
 
if (ret)
break;
}
 
if (!ret)
return 0;
 
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
}
 
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
13209,22 → 13513,20
bool async)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret = 0;
int i;
bool any_ms = false;
 
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
ret = intel_atomic_prepare_commit(dev, state, async);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
return ret;
}
 
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
 
drm_atomic_helper_swap_state(dev, state);
dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13240,6 → 13542,16
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_disable_shared_dpll(intel_crtc);
 
/*
* Underruns don't always raise
* interrupts, so check manually.
*/
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
 
if (!crtc->state->active)
intel_update_watermarks(crtc);
}
}
 
13262,6 → 13574,9
to_intel_crtc_state(crtc->state)->update_pipe;
unsigned long put_domains = 0;
 
if (modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
13277,6 → 13592,8
if (!modeset)
intel_pre_plane_update(intel_crtc);
 
if (crtc->state->active &&
(crtc->state->planes_changed || update_pipe))
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
 
if (put_domains)
13283,12 → 13600,18
modeset_put_power_domains(dev_priv, put_domains);
 
intel_post_plane_update(intel_crtc);
 
if (modeset)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
 
/* FIXME: add subpixel order */
 
drm_atomic_helper_wait_for_vblanks(dev, state);
 
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
 
if (any_ms)
intel_modeset_check_state(dev, state);
13353,7 → 13676,7
{
uint32_t val;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(PCH_DPLL(pll->id));
13361,6 → 13684,8
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
return val & DPLL_VCO_ENABLE;
}
 
13457,6 → 13782,8
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
* Must be called with struct_mutex held.
*
* Returns 0 on success, negative error code on failure.
*/
int
13467,29 → 13794,61
struct drm_framebuffer *fb = new_state->fb;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret = 0;
 
if (!obj)
if (!obj && !old_obj)
return 0;
 
mutex_lock(&dev->struct_mutex);
if (old_obj) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
 
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
if (needs_modeset(crtc_state))
ret = i915_gem_object_wait_rendering(old_obj, true);
 
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret && ret != -EIO)
return ret;
}
 
/* For framebuffer backed by dmabuf, wait for fence */
 
if (!obj) {
ret = 0;
} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = 1;
ret = i915_gem_object_attach_phys(obj, align);
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
}
 
if (ret == 0)
if (ret == 0) {
if (obj) {
struct intel_plane_state *plane_state =
to_intel_plane_state(new_state);
 
i915_gem_request_assign(&plane_state->wait_req,
obj->last_write_req);
}
 
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
}
 
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
13499,6 → 13858,8
* @fb: old framebuffer that was on plane
*
* Cleans up a framebuffer that has just been removed from a plane.
*
* Must be called with struct_mutex held.
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
13505,18 → 13866,28
const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
 
if (!obj)
old_intel_state = to_intel_plane_state(old_state);
 
if (!obj && !old_obj)
return;
 
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state);
mutex_unlock(&dev->struct_mutex);
 
/* prepare_fb aborted? */
if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
(obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 
}
}
 
int
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13534,7 → 13905,7
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
if (!crtc_clock || !cdclk)
if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
return DRM_PLANE_HELPER_NO_SCALING;
 
/*
13583,19 → 13954,9
struct drm_framebuffer *fb = state->base.fb;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
struct drm_rect *src = &state->src;
 
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
 
plane->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
 
if (!crtc->state->active)
return;
 
dev_priv->display.update_primary_plane(crtc, fb,
state->src.x1 >> 16,
state->src.y1 >> 16);
13620,11 → 13981,7
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
 
if (intel_crtc->atomic.update_wm_pre)
intel_update_watermarks(crtc);
 
/* Perform vblank evasion around commit operation */
if (crtc->state->active)
intel_pipe_update_start(intel_crtc);
 
if (modeset)
13641,7 → 13998,6
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (crtc->state->active)
intel_pipe_update_end(intel_crtc);
}
 
13719,7 → 14075,7
drm_universal_plane_init(dev, &primary->base, 0,
&intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
DRM_PLANE_TYPE_PRIMARY, NULL);
 
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
13871,7 → 14227,7
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
DRM_PLANE_TYPE_CURSOR, NULL);
 
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
13948,7 → 14304,7
goto fail;
 
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
cursor, &intel_crtc_funcs);
cursor, &intel_crtc_funcs, NULL);
if (ret)
goto fail;
 
14074,9 → 14430,16
if (IS_CHERRYVIEW(dev))
return false;
 
if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
 
/* DDI E can't be used if DDI A requires 4 lanes */
if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
 
if (!dev_priv->vbt.int_crt_support)
return false;
 
return true;
}
 
14110,7 → 14473,7
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_SKYLAKE(dev))
if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
intel_ddi_init(dev, PORT_A);
 
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14126,7 → 14489,7
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
if (IS_SKYLAKE(dev) &&
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14141,7 → 14504,7
 
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
if (!found)
intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14159,9 → 14522,7
 
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
bool has_edp, has_port;
 
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
14170,37 → 14531,27
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*
* Sadly the straps seem to be missing sometimes even for HDMI
* ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
* and VBT for the presence of the port. Additionally we can't
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
has_edp = intel_dp_is_edp(dev, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
!intel_dp_is_edp(dev, PORT_B))
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_B))
intel_dp_init(dev, VLV_DP_B, PORT_B);
 
has_edp = intel_dp_is_edp(dev, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
!intel_dp_is_edp(dev, PORT_C))
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_C))
intel_dp_init(dev, VLV_DP_C, PORT_C);
 
if (IS_CHERRYVIEW(dev)) {
/*
* eDP not supported on port D,
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
/* eDP not supported on port D, so don't check VBT */
if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED)
intel_dp_init(dev, CHV_DP_D, PORT_D);
if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
 
intel_dsi_init(dev);
14209,7 → 14560,7
 
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, true);
found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14223,7 → 14574,7
 
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, GEN3_SDVOC, false);
found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
}
 
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14242,6 → 14593,9
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
// if (SUPPORTS_TV(dev))
// intel_tv_init(dev);
 
intel_psr_init(dev);
 
for_each_intel_encoder(dev, encoder) {
14317,7 → 14671,7
* pixels and 32K bytes."
*/
return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14421,7 → 14775,8
}
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
INTEL_INFO(dev)->gen < 9) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
14437,7 → 14792,7
}
break;
case DRM_FORMAT_ABGR2101010:
if (!IS_VALLEYVIEW(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
14486,8 → 14841,9
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd2 *user_mode_cmd)
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
14496,7 → 14852,11
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
 
return intel_framebuffer_create(dev, &mode_cmd, obj);
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
drm_gem_object_unreference_unlocked(&obj->base);
 
return fb;
}
 
#ifndef CONFIG_DRM_FBDEV_EMULATION
14560,7 → 14920,7
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
14581,7 → 14941,7
}
 
/* Returns the core display clock speed */
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
else if (IS_BROXTON(dev))
14593,7 → 14953,7
else if (IS_HASWELL(dev))
dev_priv->display.get_display_clock_speed =
haswell_get_display_clock_speed;
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
else if (IS_GEN5(dev))
14621,9 → 14981,6
else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_PINEVIEW(dev))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
14654,7 → 15011,7
dev_priv->display.modeset_calc_cdclk =
broadwell_modeset_calc_cdclk;
}
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
14870,7 → 15227,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg = i915_vgacntrl_reg(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14982,9 → 15339,6
i915_disable_vga(dev);
intel_setup_outputs(dev);
 
/* Just in case the BIOS is doing something questionable. */
intel_fbc_disable(dev_priv);
 
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev);
drm_modeset_unlock_all(dev);
15071,10 → 15425,9
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
 
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->config->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* restore vblank interrupts to correct state */
15141,6 → 15494,7
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
crtc->base.state->connector_mask = 0;
 
/* Because we only establish the connector -> encoder ->
* crtc links if something is active, this means the
15228,7 → 15582,7
void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15247,10 → 15601,12
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
 
i915_redisable_vga_power_on(dev);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
 
static bool primary_get_hw_state(struct intel_plane *plane)
15267,7 → 15623,7
struct intel_plane_state *plane_state =
to_intel_plane_state(primary->state);
 
plane_state->visible =
plane_state->visible = crtc->active &&
primary_get_hw_state(to_intel_plane(primary));
 
if (plane_state->visible)
15343,7 → 15699,21
for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->base.encoder = &connector->encoder->base;
 
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
 
if (encoder->base.crtc &&
encoder->base.crtc->state->active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
encoder->base.crtc->state->connector_mask |=
1 << drm_connector_index(&connector->base);
}
 
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
15428,7 → 15798,7
pll->on = false;
}
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_wm_get_hw_state(dev);
else if (IS_GEN9(dev))
skl_wm_get_hw_state(dev);
15524,8 → 15894,7
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
c->primary->state,
NULL, NULL);
c->primary->state);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
15553,7 → 15922,7
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_connector *connector;
 
intel_disable_gt_powersave(dev);
 
15580,13 → 15949,9
flush_scheduled_work();
 
/* destroy the backlight and sysfs files before encoders/connectors */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct intel_connector *intel_connector;
for_each_intel_connector(dev, connector)
connector->unregister(connector);
 
intel_connector = to_intel_connector(connector);
intel_connector->unregister(intel_connector);
}
 
drm_mode_config_cleanup(dev);
 
intel_cleanup_overlay(dev);
/drivers/video/drm/i915/intel_dp.c
28,6 → 28,7
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/notifier.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
387,8 → 388,7
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
struct intel_dp *tmp;
 
if (encoder->type != INTEL_OUTPUT_EDP)
515,7 → 515,7
struct drm_device *dev = dev_priv->dev;
struct intel_encoder *encoder;
 
if (WARN_ON(!IS_VALLEYVIEW(dev)))
if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
return;
 
/*
528,7 → 528,7
* should use them always.
*/
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
 
if (encoder->type != INTEL_OUTPUT_EDP)
539,7 → 539,8
}
}
 
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
static i915_reg_t
_pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
551,7 → 552,8
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
}
 
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
static i915_reg_t
_pp_stat_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
579,9 → 581,9
 
pps_lock(intel_dp);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
u32 pp_ctrl_reg, pp_div_reg;
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
 
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608,7 → 610,7
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (IS_VALLEYVIEW(dev) &&
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
 
622,7 → 624,7
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (IS_VALLEYVIEW(dev) &&
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
 
652,7 → 654,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
 
679,7 → 681,7
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
return index ? 0 : intel_hrawclk(dev) / 2;
return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
}
 
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
692,10 → 694,10
return 0;
 
if (intel_dig_port->port == PORT_A) {
return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
}
 
709,7 → 711,7
if (index)
return 0;
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
} else if (HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
717,7 → 719,7
default: return 0;
}
} else {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
}
 
750,7 → 752,7
else
precharge = 5;
 
if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
789,8 → 791,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t ch_data = ch_ctl + 4;
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
853,7 → 854,7
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
intel_dp_pack_aux(send + i,
send_bytes - i));
 
913,11 → 914,32
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
 
/*
* By BSpec: "Message sizes of 0 or >20 are not allowed."
* We have no idea of what happened so we return -EBUSY so
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
recv_bytes);
/*
* FIXME: This patch was created on top of a series that
* organize the retries at drm level. There EBUSY should
* also take care for 1ms wait before retrying.
* That aux retries re-org is still needed and after that is
* merged we remove this sleep from here.
*/
usleep_range(1000, 1500);
ret = -EBUSY;
goto out;
}
 
if (recv_bytes > recv_size)
recv_bytes = recv_size;
 
for (i = 0; i < recv_bytes; i += 4)
intel_dp_unpack_aux(I915_READ(ch_data + i),
intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
recv + i, recv_bytes - i);
 
ret = recv_bytes;
1003,87 → 1025,193
return ret;
}
 
static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
const char *name = NULL;
uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
int ret;
switch (port) {
case PORT_B:
case PORT_C:
case PORT_D:
return DP_AUX_CH_CTL(port);
default:
MISSING_CASE(port);
return DP_AUX_CH_CTL(PORT_B);
}
}
 
/* On SKL we don't have Aux for port E so we rely on VBT to set
* a proper alternate aux channel.
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
switch (port) {
case PORT_B:
case PORT_C:
case PORT_D:
return DP_AUX_CH_DATA(port, index);
default:
MISSING_CASE(port);
return DP_AUX_CH_DATA(PORT_B, index);
}
}
 
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
switch (port) {
case PORT_A:
return DP_AUX_CH_CTL(port);
case PORT_B:
case PORT_C:
case PORT_D:
return PCH_DP_AUX_CH_CTL(port);
default:
MISSING_CASE(port);
return DP_AUX_CH_CTL(PORT_A);
}
}
 
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
switch (port) {
case PORT_A:
return DP_AUX_CH_DATA(port, index);
case PORT_B:
case PORT_C:
case PORT_D:
return PCH_DP_AUX_CH_DATA(port, index);
default:
MISSING_CASE(port);
return DP_AUX_CH_DATA(PORT_A, index);
}
}
 
/*
* On SKL we don't have Aux for port E so we rely
* on VBT to set a proper alternate aux channel.
*/
if (IS_SKYLAKE(dev) && port == PORT_E) {
static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[PORT_E];
 
switch (info->alternate_aux_channel) {
case DP_AUX_A:
return PORT_A;
case DP_AUX_B:
porte_aux_ctl_reg = DPB_AUX_CH_CTL;
break;
return PORT_B;
case DP_AUX_C:
porte_aux_ctl_reg = DPC_AUX_CH_CTL;
break;
return PORT_C;
case DP_AUX_D:
porte_aux_ctl_reg = DPD_AUX_CH_CTL;
break;
case DP_AUX_A:
return PORT_D;
default:
porte_aux_ctl_reg = DPA_AUX_CH_CTL;
MISSING_CASE(info->alternate_aux_channel);
return PORT_A;
}
}
 
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
if (port == PORT_E)
port = skl_porte_aux_port(dev_priv);
 
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
name = "DPDDC-A";
break;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
name = "DPDDC-B";
break;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
name = "DPDDC-C";
break;
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
case PORT_E:
intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
name = "DPDDC-E";
break;
return DP_AUX_CH_CTL(port);
default:
BUG();
MISSING_CASE(port);
return DP_AUX_CH_CTL(PORT_A);
}
}
 
/*
* The AUX_CTL register is usually DP_CTL + 0x10.
*
* On Haswell and Broadwell though:
* - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
* - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
*
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
*/
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
if (port == PORT_E)
port = skl_porte_aux_port(dev_priv);
 
intel_dp->aux.name = name;
switch (port) {
case PORT_A:
case PORT_B:
case PORT_C:
case PORT_D:
return DP_AUX_CH_DATA(port, index);
default:
MISSING_CASE(port);
return DP_AUX_CH_DATA(PORT_A, index);
}
}
 
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
else if (HAS_PCH_SPLIT(dev_priv))
return ilk_aux_ctl_reg(dev_priv, port);
else
return g4x_aux_ctl_reg(dev_priv, port);
}
 
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
else if (HAS_PCH_SPLIT(dev_priv))
return ilk_aux_data_reg(dev_priv, port, index);
else
return g4x_aux_data_reg(dev_priv, port, index);
}
 
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
int i;
 
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
}
 
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
drm_dp_aux_unregister(&intel_dp->aux);
kfree(intel_dp->aux.name);
}
 
static int
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
int ret;
 
intel_aux_reg_init(intel_dp);
 
intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
if (!intel_dp->aux.name)
return -ENOMEM;
 
intel_dp->aux.dev = dev->dev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
DRM_DEBUG_KMS("registering %s bus for %s\n", name,
"");
DRM_DEBUG_KMS("registering %s bus for %s\n",
intel_dp->aux.name,
connector->base.kdev->kobj.name);
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
name, ret);
return;
intel_dp->aux.name, ret);
kfree(intel_dp->aux.name);
return ret;
}
 
ret = sysfs_create_link(&connector->base.kdev->kobj,
1090,9 → 1218,13
&intel_dp->aux.ddc.dev.kobj,
intel_dp->aux.ddc.dev.kobj.name);
if (ret < 0) {
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
drm_dp_aux_unregister(&intel_dp->aux);
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
intel_dp->aux.name, ret);
intel_dp_aux_fini(intel_dp);
return ret;
}
 
return 0;
}
 
static void
1184,10 → 1316,13
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
 
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
 
/* WaDisableHBR2:skl */
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
return false;
 
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1198,14 → 1333,16
}
 
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
int size;
 
if (IS_BROXTON(dev)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
1214,7 → 1351,7
}
 
/* This depends on the fact that 5.4 is last value in the array */
if (!intel_dp_source_supports_hbr2(dev))
if (!intel_dp_source_supports_hbr2(intel_dp))
size--;
 
return size;
1279,12 → 1416,11
static int intel_dp_common_rates(struct intel_dp *intel_dp,
int *common_rates)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
const int *source_rates, *sink_rates;
int source_len, sink_len;
 
sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
source_len = intel_dp_source_rates(dev, &source_rates);
source_len = intel_dp_source_rates(intel_dp, &source_rates);
 
return intersect_rates(source_rates, source_len,
sink_rates, sink_len,
1309,7 → 1445,6
 
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
const int *source_rates, *sink_rates;
int source_len, sink_len, common_len;
int common_rates[DP_MAX_SUPPORTED_RATES];
1318,7 → 1453,7
if ((drm_debug & DRM_UT_KMS) == 0)
return;
 
source_len = intel_dp_source_rates(dev, &source_rates);
source_len = intel_dp_source_rates(intel_dp, &source_rates);
snprintf_int_array(str, sizeof(str), source_rates, source_len);
DRM_DEBUG_KMS("source rates: %s\n", str);
 
1360,7 → 1495,7
return rate_to_index(rate, intel_dp->sink_rates);
}
 
static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select)
{
if (intel_dp->num_sink_rates) {
1421,7 → 1556,7
return ret;
}
 
if (!HAS_PCH_SPLIT(dev))
if (HAS_GMCH_DISPLAY(dev))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
1525,7 → 1660,7
&pipe_config->dp_m2_n2);
}
 
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config);
else if (IS_BROXTON(dev))
/* handled in ddi */;
1537,37 → 1672,6
return true;
}
 
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
crtc->config->port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
if (crtc->config->port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
dpa_ctl |= DP_PLL_FREQ_160MHZ;
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
 
I915_WRITE(DP_A, dpa_ctl);
 
POSTING_READ(DP_A);
udelay(500);
}
 
void intel_dp_set_link_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
1612,9 → 1716,6
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
 
if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
/* Split out the IBX/CPU vs CPT settings */
 
if (IS_GEN7(dev) && port == PORT_A) {
1641,7 → 1742,7
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
crtc->config->limited_color_range)
!IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1675,7 → 1776,7
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
1765,7 → 1866,7
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
 
lockdep_assert_held(&dev_priv->pps_mutex);
1773,7 → 1874,7
if (!is_edp(intel_dp))
return false;
 
cancel_delayed_work(&intel_dp->panel_vdd_work);
// cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
 
if (edp_have_panel_vdd(intel_dp))
1841,7 → 1942,7
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
1928,7 → 2029,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
1990,7 → 2091,7
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
2041,7 → 2142,7
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
/*
* If we enable the backlight right away following a panel power
2082,7 → 2183,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
if (!is_edp(intel_dp))
return;
2141,27 → 2242,61
_intel_edp_backlight_off(intel_dp);
}
 
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
 
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
 
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
state_string(state), state_string(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
 
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
 
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
assert_pipe_disabled(dev_priv,
to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev_priv, crtc->pipe);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
 
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
crtc->config->port_clock);
 
/* We don't adjust intel_dp->DP while tearing down the link, to
* facilitate link retraining (e.g. after hotplug). Hence clear all
* enable bits here to ensure that we don't enable too much. */
intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
if (crtc->config->port_clock == 162000)
intel_dp->DP |= DP_PLL_FREQ_162MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
 
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(500);
 
intel_dp->DP |= DP_PLL_ENABLE;
 
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
2170,24 → 2305,18
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
assert_pipe_disabled(dev_priv,
to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev_priv, crtc->pipe);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
 
dpa_ctl = I915_READ(DP_A);
WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
"dp pll off, should be on\n");
WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
DRM_DEBUG_KMS("disabling eDP PLL\n");
 
/* We can't rely on the value tracked for the DP register in
* intel_dp->DP because link_down must not change that (otherwise link
* re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
intel_dp->DP &= ~DP_PLL_ENABLE;
 
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
2232,15 → 2361,18
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
tmp = I915_READ(intel_dp->output_reg);
 
if (!(tmp & DP_PORT_EN))
return false;
goto out;
 
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
2251,12 → 2383,14
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
return true;
ret = true;
 
goto out;
}
}
 
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
intel_dp->output_reg);
i915_mmio_reg_offset(intel_dp->output_reg));
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
2263,7 → 2397,12
*pipe = PORT_TO_PIPE(tmp);
}
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void intel_dp_get_config(struct intel_encoder *encoder,
2308,7 → 2447,7
pipe_config->base.adjusted_mode.flags |= flags;
 
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
!IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
 
pipe_config->has_dp_encoder = true;
2319,7 → 2458,7
intel_dp_get_m_n(crtc, pipe_config);
 
if (port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
2384,6 → 2523,8
enum port port = dp_to_dig_port(intel_dp)->port;
 
intel_dp_link_down(intel_dp);
 
/* Only ilk+ has port A */
if (port == PORT_A)
ironlake_edp_pll_off(intel_dp);
}
2543,6 → 2684,8
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
/* enable with pattern 1 (as per spec) */
_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2558,6 → 2701,8
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
2570,6 → 2715,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = crtc->pipe;
 
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
2576,18 → 2723,41
 
pps_lock(intel_dp);
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
 
/*
* We get an occasional spurious underrun between the port
* enable and vdd enable, when enabling port A eDP.
*
* FIXME: Not sure if this applies to (PCH) port D eDP as well
*/
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_dp_enable_port(intel_dp);
 
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* Underrun reporting for the other pipe was disabled in
* g4x_pre_enable_dp(). The eDP PLL and port have now been
* enabled, so it's now safe to re-enable underrun reporting.
*/
intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
}
 
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
 
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
pps_unlock(intel_dp);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
unsigned int lane_mask = 0x0;
 
if (IS_CHERRYVIEW(dev))
2603,7 → 2773,7
 
if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
pipe_name(pipe));
intel_audio_codec_enable(encoder);
}
}
2626,17 → 2796,30
 
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
 
intel_dp_prepare(encoder);
 
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* We get FIFO underruns on the other pipe when
* enabling the CPU eDP PLL, and when enabling CPU
* eDP port. We could potentially avoid the PLL
* underrun with a vblank wait just prior to enabling
* the PLL, but that doesn't appear to help the port
* enable case. Just sweep it all under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
}
 
/* Only ilk+ has port A */
if (dport->port == PORT_A) {
ironlake_set_pll_cpu_edp(intel_dp);
if (port == PORT_A)
ironlake_edp_pll_on(intel_dp);
}
}
 
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
2643,7 → 2826,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
enum pipe pipe = intel_dp->pps_pipe;
int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
 
edp_panel_vdd_off_sync(intel_dp);
 
2675,8 → 2858,7
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
enum port port;
 
3041,7 → 3223,7
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
static bool
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_dpcd_read_wake(&intel_dp->aux,
3051,7 → 3233,7
}
 
/* These are source-specific values. */
static uint8_t
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
3064,7 → 3246,7
if (dev_priv->edp_low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev))
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3074,7 → 3256,7
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
 
static uint8_t
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
3105,7 → 3287,7
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
3416,38 → 3598,6
return 0;
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t voltage_max;
uint8_t preemph_max;
 
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
 
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
 
static uint32_t
gen4_signal_levels(uint8_t train_set)
{
3545,13 → 3695,13
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
 
3586,73 → 3736,27
(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
 
*DP = (*DP & ~mask) | signal_levels;
}
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
 
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
 
_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
 
I915_WRITE(intel_dp->output_reg, *DP);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
 
buf[0] = dp_train_pat;
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
/* don't write DP_TRAINING_LANEx_SET on disable */
len = 1;
} else {
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
len = intel_dp->lane_count + 1;
}
 
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len);
 
return ret == len;
}
 
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
 
static bool
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
int ret;
 
intel_get_adjust_train(intel_dp, link_status);
intel_dp_set_signal_levels(intel_dp, DP);
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
 
I915_WRITE(intel_dp->output_reg, *DP);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
 
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->lane_count);
 
return ret == intel_dp->lane_count;
}
 
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
3683,213 → 3787,7
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
 
/* Enable corresponding port and start training pattern 1 */
static void
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
 
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
 
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
 
/* Write the link configuration data */
link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
DP |= DP_PORT_EN;
 
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
 
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
break;
}
 
 
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_ERROR("too many full retries, give up\n");
break;
}
intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
 
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
/* Update training set as requested by target */
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
DRM_ERROR("failed to update link training\n");
break;
}
}
 
intel_dp->DP = DP;
}
 
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 
/*
* Training Pattern 3 for HBR2 or 1.2 devices that support it.
*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported but still not enabled.
*/
if (intel_dp_source_supports_hbr2(dev) &&
drm_dp_tps3_supported(intel_dp->dpcd))
training_pattern = DP_TRAINING_PATTERN_3;
else if (intel_dp->link_rate == 540000)
DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
}
 
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
break;
}
 
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
 
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
break;
}
 
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
 
/* Update training set as requested by target */
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
DRM_ERROR("failed to update link training\n");
break;
}
++tries;
}
 
intel_dp_set_idle_link_train(intel_dp);
 
intel_dp->DP = DP;
 
if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
}
 
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_DISABLE);
}
 
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_link_training_channel_equalization(intel_dp);
}
 
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3931,6 → 3829,13
* matching HDMI port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
/* always enable with pattern 1 (as per spec) */
DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3940,9 → 3845,15
DP &= ~DP_PORT_EN;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
 
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
 
msleep(intel_dp->panel_power_down_delay);
 
intel_dp->DP = DP;
}
 
static bool
3990,7 → 3901,7
}
 
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
yesno(intel_dp_source_supports_hbr2(dev)),
yesno(intel_dp_source_supports_hbr2(intel_dp)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
 
/* Intermediate frequency support */
4080,9 → 3991,12
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
int count = 0;
int attempts = 10;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4097,7 → 4011,22
goto out;
}
 
intel_dp->sink_crc.started = false;
do {
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
goto out;
}
count = buf & DP_TEST_COUNT_MASK;
} while (--attempts && count);
 
if (attempts == 0) {
DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
ret = -ETIMEDOUT;
}
 
out:
hsw_enable_ips(intel_crtc);
return ret;
4106,16 → 4035,11
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
 
if (intel_dp->sink_crc.started) {
ret = intel_dp_sink_crc_stop(intel_dp);
if (ret)
return ret;
}
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
 
4122,11 → 4046,15
if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
 
intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
 
if (buf & DP_TEST_SINK_START) {
ret = intel_dp_sink_crc_stop(intel_dp);
if (ret)
return ret;
}
 
hsw_disable_ips(intel_crtc);
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4135,7 → 4063,7
return -EIO;
}
 
intel_dp->sink_crc.started = true;
intel_wait_for_vblank(dev, intel_crtc->pipe);
return 0;
}
 
4147,7 → 4075,6
u8 buf;
int count, ret;
int attempts = 6;
bool old_equal_new;
 
ret = intel_dp_sink_crc_start(intel_dp);
if (ret)
4163,35 → 4090,17
}
count = buf & DP_TEST_COUNT_MASK;
 
/*
* Count might be reset during the loop. In this case
* last known count needs to be reset as well.
*/
if (count == 0)
intel_dp->sink_crc.last_count = 0;
} while (--attempts && count == 0);
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
ret = -EIO;
goto stop;
}
 
old_equal_new = (count == intel_dp->sink_crc.last_count &&
!memcmp(intel_dp->sink_crc.last_crc, crc,
6 * sizeof(u8)));
 
} while (--attempts && (count == 0 || old_equal_new));
 
intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
 
if (attempts == 0) {
if (old_equal_new) {
DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
} else {
DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
ret = -ETIMEDOUT;
goto stop;
}
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
ret = -EIO;
goto stop;
}
 
stop:
4291,13 → 4200,6
uint8_t rxdata = 0;
int status = 0;
 
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
intel_dp->aux.i2c_nack_count = 0;
intel_dp->aux.i2c_defer_count = 0;
 
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
if (status <= 0) {
DRM_DEBUG_KMS("Could not read test request from sink\n");
4413,6 → 4315,14
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
/*
* Clearing compliance test variables to allow capturing
* of values for next automated test request.
*/
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
if (!intel_encoder->base.crtc)
return;
 
4443,7 → 4353,9
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
 
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
4646,7 → 4558,7
*
* Return %true if @port is connected, %false otherwise.
*/
static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
4661,41 → 4573,6
return g4x_digital_port_connected(dev_priv, port);
}
 
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
}
 
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
enum drm_connector_status status;
 
status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
}
 
if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
}
 
static struct edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
4768,12 → 4645,19
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (HAS_PCH_SPLIT(dev))
status = ironlake_dp_detect(intel_dp);
else if (intel_digital_port_connected(to_i915(dev),
dp_to_dig_port(intel_dp)))
status = intel_dp_detect_dpcd(intel_dp);
else
status = g4x_dp_detect(intel_dp);
if (status != connector_status_connected)
status = connector_status_disconnected;
 
if (status != connector_status_connected) {
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
goto out;
}
 
intel_dp_probe_oui(intel_dp);
 
4787,6 → 4671,14
goto out;
}
 
/*
* Clearing NACK and defer counts to get their exact values
* while reading EDID which are required by Compliance tests
* 4.2.2.4 and 4.2.2.5
*/
intel_dp->aux.i2c_nack_count = 0;
intel_dp->aux.i2c_defer_count = 0;
 
intel_dp_set_edid(intel_dp);
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
4991,10 → 4883,10
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_aux_fini(intel_dp);
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
5003,7 → 4895,10
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
 
if (intel_dp->edp_notifier.notifier_call) {
intel_dp->edp_notifier.notifier_call = NULL;
}
}
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
5019,7 → 4914,7
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
5052,15 → 4947,13
 
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp;
 
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
 
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(encoder);
 
pps_lock(intel_dp);
 
/*
5067,7 → 4960,7
* Read out the current power sequencer assignment,
* in case the BIOS did something with it.
*/
if (IS_VALLEYVIEW(encoder->dev))
if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
vlv_initial_power_sequencer_setup(intel_dp);
 
intel_edp_panel_vdd_sanitize(intel_dp);
5132,6 → 5025,9
intel_display_power_get(dev_priv, power_domain);
 
if (long_hpd) {
/* indicate that we need to restart link training */
intel_dp->train_set_valid = false;
 
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
 
5176,25 → 5072,6
return ret;
}
 
/* Return which DP Port should be selected for Transcoder DP control */
int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
struct intel_dp *intel_dp;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
 
return -1;
}
 
/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
5266,7 → 5143,7
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
5388,7 → 5265,7
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
5443,7 → 5320,7
 
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (port == PORT_A)
5550,17 → 5427,17
DRM_ERROR("Unsupported refreshrate type\n");
}
} else if (INTEL_INFO(dev)->gen > 6) {
u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
u32 val;
 
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else {
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5636,7 → 5513,7
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
 
cancel_delayed_work_sync(&dev_priv->drrs.work);
// cancel_delayed_work_sync(&dev_priv->drrs.work);
}
 
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5689,7 → 5566,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
cancel_delayed_work(&dev_priv->drrs.work);
// cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5734,7 → 5611,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
cancel_delayed_work(&dev_priv->drrs.work);
// cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5927,7 → 5804,9
}
mutex_unlock(&dev->mode_config.mutex);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
// intel_dp->edp_notifier.notifier_call = edp_notify_handler;
// register_reboot_notifier(&intel_dp->edp_notifier);
 
/*
* Figure out the current pipe for the initial backlight setup.
5966,7 → 5845,7
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int type;
int type, ret;
 
intel_dp->pps_pipe = INVALID_PIPE;
 
5973,7 → 5852,7
/* intel_dp vfuncs */
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5987,6 → 5866,9
else
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
if (HAS_DDI(dev))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
 
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
6005,8 → 5887,8
intel_encoder->type = INTEL_OUTPUT_EDP;
 
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
port != PORT_B && port != PORT_C))
if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
 
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6038,7 → 5920,7
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
6057,7 → 5939,7
if (is_edp(intel_dp)) {
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_initial_power_sequencer_setup(intel_dp);
else
intel_dp_init_panel_power_sequencer(dev, intel_dp);
6064,7 → 5946,9
pps_unlock(intel_dp);
}
 
intel_dp_aux_init(intel_dp, intel_connector);
ret = intel_dp_aux_init(intel_dp, intel_connector);
if (ret)
goto fail;
 
/* init MST on ports that can support it */
if (HAS_DP_MST(dev) &&
6073,21 → 5957,10
intel_connector->base.base.id);
 
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
intel_dp_aux_fini(intel_dp);
intel_dp_mst_encoder_cleanup(intel_dig_port);
goto fail;
}
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
 
intel_dp_add_properties(intel_dp, connector);
 
6103,11 → 5976,27
i915_debugfs_connector_add(connector);
 
return true;
 
fail:
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
}
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
 
bool intel_dp_init(struct drm_device *dev,
int output_reg,
enum port port)
return false;
}
 
void
intel_dp_init(struct drm_device *dev,
i915_reg_t output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
6117,7 → 6006,7
 
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return false;
return;
 
intel_connector = intel_connector_alloc();
if (!intel_connector)
6126,8 → 6015,9
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS, NULL))
goto err_encoder_init;
 
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->disable = intel_disable_dp;
6172,14 → 6062,16
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
 
return true;
return;
 
err_init_connector:
drm_encoder_cleanup(encoder);
err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
return false;
 
return;
}
 
void intel_dp_mst_suspend(struct drm_device *dev)
/drivers/video/drm/i915/intel_dp_link_training.c
0,0 → 1,342
/*
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#include "intel_drv.h"
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t voltage_max;
uint8_t preemph_max;
 
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
 
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
 
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
 
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
 
buf[0] = dp_train_pat;
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
/* don't write DP_TRAINING_LANEx_SET on disable */
len = 1;
} else {
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
len = intel_dp->lane_count + 1;
}
 
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len);
 
return ret == len;
}
 
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
if (!intel_dp->train_set_valid)
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
return intel_dp_set_link_train(intel_dp, dp_train_pat);
}
 
static bool
intel_dp_update_link_train(struct intel_dp *intel_dp)
{
int ret;
 
intel_dp_set_signal_levels(intel_dp);
 
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->lane_count);
 
return ret == intel_dp->lane_count;
}
 
/* Enable corresponding port and start training pattern 1 */
static void
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
int i;
uint8_t voltage;
int voltage_tries, loop_tries;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
 
if (intel_dp->prepare_link_retrain)
intel_dp->prepare_link_retrain(intel_dp);
 
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
 
/* Write the link configuration data */
link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
intel_dp->DP |= DP_PORT_EN;
 
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
 
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
break;
}
 
/*
* if we used previously trained voltage and pre-emphasis values
* and we don't get clock recovery, reset link training values
*/
if (intel_dp->train_set_valid) {
DRM_DEBUG_KMS("clock recovery not ok, reset");
/* clear the flag as we are not reusing train set */
intel_dp->train_set_valid = false;
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
continue;
}
 
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_ERROR("too many full retries, give up\n");
break;
}
intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
 
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
DRM_ERROR("failed to update link training\n");
break;
}
}
}
 
/*
* Pick training pattern for channel equalization. Training Pattern 3 for HBR2
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
{
u32 training_pattern = DP_TRAINING_PATTERN_2;
bool source_tps3, sink_tps3;
 
/*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
 
if (source_tps3 && sink_tps3) {
training_pattern = DP_TRAINING_PATTERN_3;
} else if (intel_dp->link_rate == 540000) {
if (!source_tps3)
DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
}
 
return training_pattern;
}
 
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
bool channel_eq = false;
int tries, cr_tries;
u32 training_pattern;
 
training_pattern = intel_dp_training_pattern(intel_dp);
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
}
 
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
break;
}
 
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
 
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
break;
}
 
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
 
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
DRM_ERROR("failed to update link training\n");
break;
}
++tries;
}
 
intel_dp_set_idle_link_train(intel_dp);
 
if (channel_eq) {
intel_dp->train_set_valid = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
}
}
 
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
intel_dp_set_link_train(intel_dp,
DP_TRAINING_PATTERN_DISABLE);
}
 
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_link_training_channel_equalization(intel_dp);
}
/drivers/video/drm/i915/intel_dp_mst.c
173,20 → 173,14
intel_mst->port = found->port;
 
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
 
intel_dp_set_link_params(intel_dp, intel_crtc->config);
 
/* FIXME: add support for SKL */
if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port),
intel_crtc->config->ddi_pll_sel);
 
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 
 
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
414,7 → 408,10
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
 
if (dev_priv->fbdev)
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
&connector->base);
#endif
}
 
422,7 → 419,10
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
 
if (dev_priv->fbdev)
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
&connector->base);
#endif
}
 
510,7 → 510,7
drm_kms_helper_hotplug_event(dev);
}
 
static struct drm_dp_mst_topology_cbs mst_cbs = {
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
534,7 → 534,7
intel_mst->primary = intel_dig_port;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST);
DRM_MODE_ENCODER_DPMST, NULL);
 
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
/drivers/video/drm/i915/intel_drv.h
124,8 → 124,6
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
struct list_head fbdev_list;
struct drm_display_mode *our_mode;
int preferred_bpp;
};
 
251,6 → 249,7
unsigned int cdclk;
bool dpll_set;
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
};
 
struct intel_plane_state {
281,6 → 280,9
int scaler_id;
 
struct drm_intel_sprite_colorkey ckey;
 
/* async flip related structures */
struct drm_i915_gem_request *wait_req;
};
 
struct intel_initial_plane_config {
335,6 → 337,21
/* drm_mode->private_flags */
#define I915_MODE_FLAG_INHERITED 1
 
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
 
struct skl_pipe_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
uint32_t linetime;
};
 
struct intel_crtc_state {
struct drm_crtc_state base;
 
349,7 → 366,9
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
 
bool update_pipe;
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
 
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
377,6 → 396,9
* accordingly. */
bool has_dp_encoder;
 
/* DSI has special cases */
bool has_dsi_encoder;
 
/* Whether we should send NULL infoframes. Required for audio. */
bool has_hdmi_sink;
 
469,6 → 491,20
 
/* w/a for waiting 2 vblanks during crtc enable */
enum pipe hsw_workaround_pipe;
 
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
bool disable_lp_wm;
 
struct {
/*
* optimal watermarks, programmed post-vblank when this state
* is committed
*/
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} optimal;
} wm;
};
 
struct vlv_wm_state {
480,28 → 516,14
bool cxsr;
};
 
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
 
struct intel_mmio_flip {
struct work_struct work;
struct drm_i915_private *i915;
struct drm_i915_gem_request *req;
struct intel_crtc *crtc;
unsigned int rotation;
};
 
struct skl_pipe_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
uint32_t linetime;
};
 
/*
* Tracking of operations that need to be performed at the beginning/end of an
* atomic commit, outside the atomic section where interrupts are disabled.
510,13 → 532,9
*/
struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc;
bool disable_ips;
bool disable_cxsr;
bool pre_disable_primary;
bool update_wm_pre, update_wm_post;
unsigned disabled_planes;
 
/* Sleepable operations to perform after commit */
unsigned fb_bits;
568,9 → 586,10
/* per-pipe watermark state */
struct {
/* watermarks currently being used */
struct intel_pipe_wm active;
/* SKL wm values currently in use */
struct skl_pipe_wm skl_active;
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */
bool cxsr_allowed;
} wm;
678,7 → 697,7
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
 
struct intel_hdmi {
u32 hdmi_reg;
i915_reg_t hdmi_reg;
int ddc_bus;
bool limited_color_range;
bool color_range_auto;
694,7 → 713,8
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode);
bool (*infoframe_enabled)(struct drm_encoder *encoder);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
 
struct intel_dp_mst_encoder;
720,15 → 740,10
M2_N2
};
 
struct sink_crc {
bool started;
u8 last_crc[6];
int last_count;
};
 
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
i915_reg_t aux_ch_data_reg[5];
uint32_t DP;
int link_rate;
uint8_t lane_count;
742,7 → 757,6
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
struct sink_crc sink_crc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
756,6 → 770,8
unsigned long last_power_on;
unsigned long last_backlight_off;
 
struct notifier_block edp_notifier;
 
/*
* Pipe whose power sequencer is currently locked into
* this port. Only relevant on VLV/CHV.
783,6 → 799,11
int send_bytes,
uint32_t aux_clock_divider);
 
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
 
bool train_set_valid;
 
/* Displayport compliance testing */
unsigned long compliance_test_type;
unsigned long compliance_test_data;
797,6 → 818,8
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
 
struct intel_dp_mst_encoder {
940,7 → 963,8
enum pipe pipe);
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder);
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
 
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
971,6 → 995,8
 
 
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void intel_prepare_ddi(struct drm_device *dev);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
985,7 → 1011,7
bool intel_ddi_pll_select(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
1053,6 → 1079,15
{
drm_wait_one_vblank(dev, pipe);
}
static inline void
intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
{
const struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
 
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
1066,9 → 1101,7
struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request);
const struct drm_plane_state *plane_state);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
1149,7 → 1182,10
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
1170,7 → 1206,6
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
 
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1185,16 → 1220,12
u32 skl_plane_ctl_rotation(unsigned int rotation);
 
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_device *dev);
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state);
void intel_csr_load_program(struct drm_device *dev);
void intel_csr_ucode_fini(struct drm_device *dev);
void assert_csr_loaded(struct drm_i915_private *dev_priv);
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
 
/* intel_dp.c */
bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1230,8 → 1261,26
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
uint8_t dp_train_pat);
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp);
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
 
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1246,7 → 1295,7
/* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_initial_config_async(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
1257,7 → 1306,7
return 0;
}
 
static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
{
}
 
1275,9 → 1324,11
#endif
 
/* intel_fbc.c */
bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
void intel_fbc_update(struct drm_i915_private *dev_priv);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
void intel_fbc_deactivate(struct intel_crtc *crtc);
void intel_fbc_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc);
void intel_fbc_disable(struct drm_i915_private *dev_priv);
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1285,11 → 1336,10
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
1365,8 → 1415,13
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
 
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
1374,9 → 1429,95
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
 
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN_ONCE(dev_priv->pm.suspended,
"Device suspended during HW access\n");
}
 
static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{
assert_rpm_device_not_suspended(dev_priv);
/* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
* too much noise. */
if (!atomic_read(&dev_priv->pm.wakeref_count))
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
 
static inline int
assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
{
int seq = atomic_read(&dev_priv->pm.atomic_seq);
 
assert_rpm_wakelock_held(dev_priv);
 
return seq;
}
 
static inline void
assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
{
WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
"HW access outside of RPM atomic section\n");
}
 
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
*
* This function disable asserts that check if we hold an RPM wakelock
* reference, while keeping the device-not-suspended checks still enabled.
* It's meant to be used only in special circumstances where our rule about
* the wakelock refcount wrt. the device power state doesn't hold. According
* to this rule at any point where we access the HW or want to keep the HW in
* an active state we must hold an RPM wakelock reference acquired via one of
* the intel_runtime_pm_get() helpers. Currently there are a few special spots
* where this rule doesn't hold: the IRQ and suspend/resume handlers, the
* forcewake release timer, and the GPU RPS and hangcheck works. All other
* users should avoid using this function.
*
* Any calls to this function must have a symmetric call to
* enable_rpm_wakeref_asserts().
*/
static inline void
disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
atomic_inc(&dev_priv->pm.wakeref_count);
}
 
/**
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
* @dev_priv: i915 device instance
*
* This function re-enables the RPM assert checks after disabling them with
* disable_rpm_wakeref_asserts. It's meant to be used only in special
* circumstances otherwise its use should be avoided.
*
* Any calls to this function must have a symmetric call to
* disable_rpm_wakeref_asserts().
*/
static inline void
enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
atomic_dec(&dev_priv->pm.wakeref_count);
}
 
/* TODO: convert users of these to rely instead on proper RPM refcounting */
#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
disable_rpm_wakeref_asserts(dev_priv)
 
#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
enable_rpm_wakeref_asserts(dev_priv)
 
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
 
1393,12 → 1534,6
void intel_suspend_hw(struct drm_device *dev);
int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1426,7 → 1561,8
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
bool intel_sdvo_init(struct drm_device *dev,
i915_reg_t reg, enum port port);
 
 
/* intel_sprite.c */
1474,4 → 1610,12
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
 
int drm_core_init(void);
void set_fake_framebuffer();
int kolibri_framebuffer_init(void *param);
void shmem_file_delete(struct file *filep);
void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_dsi.c
60,7 → 60,8
DRM_ERROR("DPI FIFOs are not empty\n");
}
 
static void write_data(struct drm_i915_private *dev_priv, u32 reg,
static void write_data(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const u8 *data, u32 len)
{
u32 i, j;
75,7 → 76,8
}
}
 
static void read_data(struct drm_i915_private *dev_priv, u32 reg,
static void read_data(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u8 *data, u32 len)
{
u32 i, j;
98,7 → 100,8
struct mipi_dsi_packet packet;
ssize_t ret;
const u8 *header, *data;
u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
i915_reg_t data_reg, ctrl_reg;
u32 data_mask, ctrl_mask;
 
ret = mipi_dsi_create_packet(&packet, msg);
if (ret < 0)
263,16 → 266,18
}
 
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *config)
struct intel_crtc_state *pipe_config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
DRM_DEBUG_KMS("\n");
 
pipe_config->has_dsi_encoder = true;
 
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
364,7 → 369,7
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_dsi_device_ready(encoder);
else if (IS_BROXTON(dev))
bxt_dsi_device_ready(encoder);
377,10 → 382,10
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
 
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
u32 port_ctrl;
 
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
389,8 → 394,9
}
 
for_each_dsi_port(port, intel_dsi->ports) {
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
 
temp = I915_READ(port_ctrl);
 
416,13 → 422,13
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
 
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
u32 port_ctrl;
 
for_each_dsi_port(port, intel_dsi->ports) {
/* de-assert ip_tg_enable signal */
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
temp = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
POSTING_READ(port_ctrl);
458,6 → 464,8
intel_panel_enable_backlight(intel_dsi->attached_connector);
}
 
static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
 
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
470,6 → 478,9
 
DRM_DEBUG_KMS("\n");
 
intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
 
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
476,7 → 487,7
 
msleep(intel_dsi->panel_on_delay);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled
580,11 → 591,13
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
u32 port_ctrl = 0;
 
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
 
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
598,12 → 611,6
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
if (IS_BROXTON(dev))
port_ctrl = BXT_MIPI_PORT_CTRL(port);
else if (IS_VALLEYVIEW(dev))
/* Common bit for both MIPI Port A & MIPI Port C */
port_ctrl = MIPI_PORT_CTRL(PORT_A);
 
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
656,20 → 663,24
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
u32 dpi_enabled, func, ctrl_reg;
enum port port;
bool ret;
 
DRM_DEBUG_KMS("\n");
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 dpi_enabled, func;
 
func = I915_READ(MIPI_DSI_FUNC_PRG(port));
ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
 
/* Due to some hardware limitations on BYT, MIPI Port C DPI
676,8 → 687,7
* Enable bit does not get set. To check whether DSI Port C
* was enabled in BIOS, check the Pipe B enable bit
*/
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
(port == PORT_C))
if (IS_VALLEYVIEW(dev) && port == PORT_C)
dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
PIPECONF_ENABLE;
 
684,12 → 694,16
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true;
ret = true;
 
goto out;
}
}
}
out:
intel_display_power_put(dev_priv, power_domain);
 
return false;
return ret;
}
 
static void intel_dsi_get_config(struct intel_encoder *encoder,
698,6 → 712,8
u32 pclk = 0;
DRM_DEBUG_KMS("\n");
 
pipe_config->has_dsi_encoder = true;
 
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
706,7 → 722,8
 
if (IS_BROXTON(encoder->base.dev))
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
else if (IS_VALLEYVIEW(encoder->base.dev))
else if (IS_VALLEYVIEW(encoder->base.dev) ||
IS_CHERRYVIEW(encoder->base.dev))
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
 
if (!pclk)
859,7 → 876,7
}
 
for_each_dsi_port(port, intel_dsi->ports) {
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
875,21 → 892,12
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
} else if (IS_BROXTON(dev)) {
/*
* FIXME:
* BXT can connect any PIPE to any MIPI port.
* Select the pipe based on the MIPI port read from
* VBT for now. Pick PIPE A for MIPI port A and C
* for port C.
*/
enum pipe pipe = intel_crtc->pipe;
 
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
 
if (port == PORT_A)
tmp |= BXT_PIPE_SELECT_A;
else if (port == PORT_C)
tmp |= BXT_PIPE_SELECT_C;
 
tmp |= BXT_PIPE_SELECT(pipe);
I915_WRITE(MIPI_CTRL(port), tmp);
}
 
1025,15 → 1033,6
}
}
 
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
 
intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
 
}
 
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
1128,7 → 1127,7
if (!dev_priv->vbt.has_mipi)
return;
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
1151,11 → 1150,10
 
connector = &intel_connector->base;
 
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
NULL);
 
/* XXX: very likely not all of these are needed */
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_pre_disable;
/drivers/video/drm/i915/intel_dsi.h
117,7 → 117,7
 
#define for_each_dsi_port(__port, __ports_mask) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
if ((__ports_mask) & (1 << (__port)))
for_each_if ((__ports_mask) & (1 << (__port)))
 
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
/drivers/video/drm/i915/intel_dsi_panel_vbt.c
204,6 → 204,9
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
 
gpio = *data++;
 
/* pull up/down */
214,6 → 217,16
goto out;
}
 
if (!IS_VALLEYVIEW(dev_priv)) {
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
goto out;
}
 
if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
goto out;
}
 
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
 
/drivers/video/drm/i915/intel_dsi_pll.c
561,7 → 561,7
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_enable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
bxt_enable_dsi_pll(encoder);
571,7 → 571,7
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_disable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
bxt_disable_dsi_pll(encoder);
599,6 → 599,6
 
if (IS_BROXTON(dev))
bxt_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_dsi_reset_clocks(encoder, port);
}
/drivers/video/drm/i915/intel_dvo.c
44,6 → 44,7
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
.dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
51,6 → 52,7
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
58,6 → 60,7
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
65,6 → 68,7
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.dvo_reg = DVOA,
.dvo_srcdim_reg = DVOA_SRCDIM,
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
72,6 → 76,7
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
.dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
79,6 → 84,7
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
.dvo_reg = DVOC,
.dvo_srcdim_reg = DVOC_SRCDIM,
.slave_addr = 0x75,
.gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
87,6 → 93,7
.type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501",
.dvo_reg = DVOB,
.dvo_srcdim_reg = DVOB_SRCDIM,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
}
171,7 → 178,7
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
184,7 → 191,7
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
255,21 → 262,9
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
 
switch (dvo_reg) {
case DVOA:
default:
dvo_srcdim_reg = DVOA_SRCDIM;
break;
case DVOB:
dvo_srcdim_reg = DVOB_SRCDIM;
break;
case DVOC:
dvo_srcdim_reg = DVOC_SRCDIM;
break;
}
 
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
434,7 → 429,7
 
intel_encoder = &intel_dvo->base;
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
&intel_dvo_enc_funcs, encoder_type, NULL);
 
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
/drivers/video/drm/i915/intel_fbc.c
43,9 → 43,19
 
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
return dev_priv->fbc.enable_fbc != NULL;
return dev_priv->fbc.activate != NULL;
}
 
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
{
return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
}
 
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen < 4;
}
 
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
59,11 → 69,51
return crtc->base.y - crtc->adjusted_y;
}
 
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
/*
* For SKL+, the plane source size used by the hardware is based on the value we
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
int *width, int *height)
{
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
int w, h;
 
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
w = drm_rect_height(&plane_state->src) >> 16;
h = drm_rect_width(&plane_state->src) >> 16;
} else {
w = drm_rect_width(&plane_state->src) >> 16;
h = drm_rect_height(&plane_state->src) >> 16;
}
 
if (width)
*width = w;
if (height)
*height = h;
}
 
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
struct drm_framebuffer *fb)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
int lines;
 
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
if (INTEL_INFO(dev_priv)->gen >= 7)
lines = min(lines, 2048);
 
/* Hardware needs the full buffer stride, not just the active area. */
return lines * fb->pitches[0];
}
 
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 fbc_ctl;
 
dev_priv->fbc.enabled = false;
dev_priv->fbc.active = false;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
78,11 → 128,9
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
 
DRM_DEBUG_KMS("disabled FBC\n");
}
 
static void i8xx_fbc_enable(struct intel_crtc *crtc)
static void i8xx_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
91,10 → 139,10
int i;
u32 fbc_ctl;
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.active = true;
 
/* Note: fbc.threshold == 1 for i8xx */
cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
127,17 → 175,14
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->base.y, plane_name(crtc->plane));
}
 
static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
{
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_fbc_enable(struct intel_crtc *crtc)
static void g4x_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
144,7 → 189,7
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.active = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
157,15 → 202,13
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
 
static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
dev_priv->fbc.active = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
172,23 → 215,22
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
{
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
POSTING_READ(MSG_FBC_REND_STATE);
}
 
static void ilk_fbc_enable(struct intel_crtc *crtc)
static void ilk_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
197,7 → 239,7
int threshold = dev_priv->fbc.threshold;
unsigned int y_offset;
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.active = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
231,16 → 273,14
I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
}
 
intel_fbc_nuke(dev_priv);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
intel_fbc_recompress(dev_priv);
}
 
static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
dev_priv->fbc.active = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
247,17 → 287,15
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
{
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_fbc_enable(struct intel_crtc *crtc)
static void gen7_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
265,7 → 303,7
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.active = true;
 
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
310,13 → 348,11
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
 
intel_fbc_nuke(dev_priv);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
intel_fbc_recompress(dev_priv);
}
 
/**
* intel_fbc_enabled - Is FBC enabled?
* intel_fbc_is_active - Is FBC active?
* @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
323,19 → 359,18
* FIXME: This should be tracked in the plane config eventually
* instead of queried at runtime for most callers.
*/
bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
{
return dev_priv->fbc.enabled;
return dev_priv->fbc.active;
}
 
static void intel_fbc_enable(struct intel_crtc *crtc,
const struct drm_framebuffer *fb)
static void intel_fbc_activate(const struct drm_framebuffer *fb)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_i915_private *dev_priv = fb->dev->dev_private;
struct intel_crtc *crtc = dev_priv->fbc.crtc;
 
dev_priv->fbc.enable_fbc(crtc);
dev_priv->fbc.activate(crtc);
 
dev_priv->fbc.crtc = crtc;
dev_priv->fbc.fb_id = fb->base.id;
dev_priv->fbc.y = crtc->base.y;
}
342,123 → 377,91
 
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
struct intel_fbc_work *work = &dev_priv->fbc.work;
struct intel_crtc *crtc = dev_priv->fbc.crtc;
int delay_ms = 50;
 
mutex_lock(&dev_priv->fbc.lock);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
retry:
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
if (crtc_fb == work->fb)
intel_fbc_enable(work->crtc, work->fb);
wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
 
dev_priv->fbc.fbc_work = NULL;
mutex_lock(&dev_priv->fbc.lock);
 
/* Were we cancelled? */
if (!work->scheduled)
goto out;
 
/* Were we delayed again while this function was sleeping? */
if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
jiffies)) {
mutex_unlock(&dev_priv->fbc.lock);
goto retry;
}
 
if (crtc->base.primary->fb == work->fb)
intel_fbc_activate(work->fb);
 
work->scheduled = false;
 
out:
mutex_unlock(&dev_priv->fbc.lock);
 
kfree(work);
}
 
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
if (dev_priv->fbc.fbc_work == NULL)
return;
 
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc.fbc_work);
 
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc.fbc_work = NULL;
dev_priv->fbc.work.scheduled = false;
}
 
static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc_work *work = &dev_priv->fbc.work;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
intel_fbc_cancel_work(dev_priv);
 
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
intel_fbc_enable(crtc, crtc->base.primary->fb);
return;
}
 
work->crtc = crtc;
/* It is useless to call intel_fbc_cancel_work() in this function since
* we're not releasing fbc.lock, so it won't have an opportunity to grab
* it to discover that it was cancelled. So we just update the expected
* jiffy count. */
work->fb = crtc->base.primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
work->scheduled = true;
work->enable_jiffies = jiffies;
 
dev_priv->fbc.fbc_work = work;
 
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
schedule_work(&work->work);
}
 
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
intel_fbc_cancel_work(dev_priv);
 
dev_priv->fbc.disable_fbc(dev_priv);
dev_priv->fbc.crtc = NULL;
if (dev_priv->fbc.active)
dev_priv->fbc.deactivate(dev_priv);
}
 
/**
* intel_fbc_disable - disable FBC
* @dev_priv: i915 device instance
*
* This function disables FBC.
*/
void intel_fbc_disable(struct drm_i915_private *dev_priv)
{
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
/*
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
* intel_fbc_deactivate - deactivate FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function disables FBC if it's associated with the provided CRTC.
* This function deactivates FBC if it's associated with the provided CRTC.
*/
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
void intel_fbc_deactivate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
467,85 → 470,42
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc)
__intel_fbc_disable(dev_priv);
__intel_fbc_deactivate(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
{
switch (reason) {
case FBC_OK:
return "FBC enabled but currently disabled in hardware";
case FBC_UNSUPPORTED:
return "unsupported by this chipset";
case FBC_NO_OUTPUT:
return "no output";
case FBC_STOLEN_TOO_SMALL:
return "not enough stolen memory";
case FBC_UNSUPPORTED_MODE:
return "mode incompatible with compression";
case FBC_MODE_TOO_LARGE:
return "mode too large for compression";
case FBC_BAD_PLANE:
return "FBC unsupported on plane";
case FBC_NOT_TILED:
return "framebuffer not tiled or fenced";
case FBC_MULTIPLE_PIPES:
return "more than one pipe active";
case FBC_MODULE_PARAM:
return "disabled per module param";
case FBC_CHIP_DEFAULT:
return "disabled per chip default";
case FBC_ROTATION:
return "rotation unsupported";
case FBC_IN_DBG_MASTER:
return "Kernel debugger is active";
case FBC_BAD_STRIDE:
return "framebuffer stride not supported";
case FBC_PIXEL_RATE:
return "pixel rate is too big";
case FBC_PIXEL_FORMAT:
return "pixel format is invalid";
default:
MISSING_CASE(reason);
return "unknown reason";
}
}
 
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
const char *reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return;
 
dev_priv->fbc.no_fbc_reason = reason;
DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
}
 
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
static bool crtc_can_fbc(struct intel_crtc *crtc)
{
struct drm_crtc *crtc = NULL, *tmp_crtc;
enum pipe pipe;
bool pipe_a_only = false;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
pipe_a_only = true;
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
return false;
 
for_each_pipe(dev_priv, pipe) {
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
return false;
 
if (intel_crtc_active(tmp_crtc) &&
to_intel_plane_state(tmp_crtc->primary->state)->visible)
crtc = tmp_crtc;
 
if (pipe_a_only)
break;
return true;
}
 
if (!crtc || crtc->primary->fb == NULL)
return NULL;
static bool crtc_is_valid(struct intel_crtc *crtc)
{
if (!intel_crtc_active(&crtc->base))
return false;
 
return crtc;
if (!to_intel_plane_state(crtc->base.primary->state)->visible)
return false;
 
return true;
}
 
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
581,7 → 541,8
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
else
end = dev_priv->gtt.stolen_usable_size;
617,12 → 578,18
}
}
 
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
int fb_cpp)
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->state->fb;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
int size, fb_cpp, ret;
 
WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
 
size = intel_fbc_calculate_cfb_size(crtc, fb);
fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
656,8 → 623,6
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
dev_priv->fbc.uncompressed_size = size;
 
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
dev_priv->fbc.compressed_fb.size,
dev_priv->fbc.threshold);
674,18 → 639,15
 
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc.uncompressed_size == 0)
return;
if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
i915_gem_stolen_remove_node(dev_priv,
&dev_priv->fbc.compressed_fb);
 
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
 
if (dev_priv->fbc.compressed_llb) {
i915_gem_stolen_remove_node(dev_priv,
dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
 
dev_priv->fbc.uncompressed_size = 0;
}
 
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
698,63 → 660,6
mutex_unlock(&dev_priv->fbc.lock);
}
 
/*
* For SKL+, the plane source size used by the hardware is based on the value we
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
int *width, int *height)
{
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
int w, h;
 
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
w = drm_rect_height(&plane_state->src) >> 16;
h = drm_rect_width(&plane_state->src) >> 16;
} else {
w = drm_rect_width(&plane_state->src) >> 16;
h = drm_rect_height(&plane_state->src) >> 16;
}
 
if (width)
*width = w;
if (height)
*height = h;
}
 
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
int lines;
 
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
if (INTEL_INFO(dev_priv)->gen >= 7)
lines = min(lines, 2048);
 
return lines * fb->pitches[0];
}
 
static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
int size, cpp;
 
size = intel_fbc_calculate_cfb_size(crtc);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
if (size <= dev_priv->fbc.uncompressed_size)
return 0;
 
/* Release any current block */
__intel_fbc_cleanup_cfb(dev_priv);
 
return intel_fbc_alloc_cfb(dev_priv, size, cpp);
}
 
static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
829,28 → 734,15
}
 
/**
* __intel_fbc_update - enable/disable FBC as needed, unlocked
* @dev_priv: i915 device instance
* __intel_fbc_update - activate/deactivate FBC as needed, unlocked
* @crtc: the CRTC that triggered the update
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
* This function completely reevaluates the status of FBC, then activates,
* deactivates or maintains it on the same state.
*/
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
static void __intel_fbc_update(struct intel_crtc *crtc)
{
struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
857,102 → 749,79
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
/* disable framebuffer compression in vGPU */
if (intel_vgpu_active(dev_priv->dev))
i915.enable_fbc = 0;
 
if (i915.enable_fbc < 0) {
set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
if (!multiple_pipes_ok(dev_priv)) {
set_no_fbc_reason(dev_priv, "more than one pipe active");
goto out_disable;
}
 
if (!i915.enable_fbc) {
set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
goto out_disable;
}
if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
return;
 
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
crtc = intel_fbc_find_crtc(dev_priv);
if (!crtc) {
set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
if (!crtc_is_valid(crtc)) {
set_no_fbc_reason(dev_priv, "no output");
goto out_disable;
}
 
if (!multiple_pipes_ok(dev_priv)) {
set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
goto out_disable;
}
 
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
fb = crtc->base.primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
adjusted_mode = &crtc->config->base.adjusted_mode;
 
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
set_no_fbc_reason(dev_priv, "incompatible mode");
goto out_disable;
}
 
if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
set_no_fbc_reason(dev_priv, "mode too large for compression");
goto out_disable;
}
 
if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
intel_crtc->plane != PLANE_A) {
set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
goto out_disable;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
goto out_disable;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
set_no_fbc_reason(dev_priv, FBC_ROTATION);
crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
set_no_fbc_reason(dev_priv, "rotation unsupported");
goto out_disable;
}
 
if (!stride_is_valid(dev_priv, fb->pitches[0])) {
set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
goto out_disable;
}
 
if (!pixel_format_is_valid(fb)) {
set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
set_no_fbc_reason(dev_priv, "pixel format is invalid");
goto out_disable;
}
 
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master()) {
set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
goto out_disable;
}
 
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
ilk_pipe_pixel_rate(intel_crtc->config) >=
ilk_pipe_pixel_rate(crtc->config) >=
dev_priv->cdclk_freq * 95 / 100) {
set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
set_no_fbc_reason(dev_priv, "pixel rate is too big");
goto out_disable;
}
 
if (intel_fbc_setup_cfb(intel_crtc)) {
set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
/* It is possible for the required CFB size change without a
* crtc->disable + crtc->enable since it is possible to change the
* stride without triggering a full modeset. Since we try to
* over-allocate the CFB, there's a chance we may keep FBC enabled even
* if this happens, but if we exceed the current CFB size we'll have to
* disable FBC. Notice that it would be possible to disable FBC, wait
* for a frame, free the stolen node, then try to reenable FBC in case
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
if (intel_fbc_calculate_cfb_size(crtc, fb) >
dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
set_no_fbc_reason(dev_priv, "CFB requirements changed");
goto out_disable;
}
 
961,12 → 830,13
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.crtc == intel_crtc &&
if (dev_priv->fbc.crtc == crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
dev_priv->fbc.y == crtc->base.y &&
dev_priv->fbc.active)
return;
 
if (intel_fbc_enabled(dev_priv)) {
if (intel_fbc_is_active(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
990,36 → 860,37
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
__intel_fbc_disable(dev_priv);
DRM_DEBUG_KMS("deactivating FBC for update\n");
__intel_fbc_deactivate(dev_priv);
}
 
intel_fbc_schedule_enable(intel_crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
intel_fbc_schedule_activation(crtc);
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
__intel_fbc_disable(dev_priv);
if (intel_fbc_is_active(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
__intel_fbc_deactivate(dev_priv);
}
__intel_fbc_cleanup_cfb(dev_priv);
}
 
/*
* intel_fbc_update - enable/disable FBC as needed
* @dev_priv: i915 device instance
* intel_fbc_update - activate/deactivate FBC as needed
* @crtc: the CRTC that triggered the update
*
* This function reevaluates the overall state and enables or disables FBC.
* This function reevaluates the overall state and activates or deactivates FBC.
*/
void intel_fbc_update(struct drm_i915_private *dev_priv)
void intel_fbc_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_update(dev_priv);
__intel_fbc_update(crtc);
mutex_unlock(&dev_priv->fbc.lock);
}
 
1039,9 → 910,6
 
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else if (dev_priv->fbc.fbc_work)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
dev_priv->fbc.fbc_work->crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
1048,7 → 916,7
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
if (dev_priv->fbc.busy_bits)
__intel_fbc_disable(dev_priv);
__intel_fbc_deactivate(dev_priv);
 
mutex_unlock(&dev_priv->fbc.lock);
}
1066,11 → 934,136
 
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
if (!dev_priv->fbc.busy_bits) {
if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
intel_fbc_recompress(dev_priv);
} else {
__intel_fbc_deactivate(dev_priv);
__intel_fbc_update(dev_priv->fbc.crtc);
}
}
 
mutex_unlock(&dev_priv->fbc.lock);
}
 
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
*
* This function checks if it's possible to enable FBC on the following CRTC,
* then enables it. Notice that it doesn't activate FBC.
*/
void intel_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
 
if (dev_priv->fbc.enabled) {
WARN_ON(dev_priv->fbc.crtc == crtc);
goto out;
}
 
WARN_ON(dev_priv->fbc.active);
WARN_ON(dev_priv->fbc.crtc != NULL);
 
if (intel_vgpu_active(dev_priv->dev)) {
set_no_fbc_reason(dev_priv, "VGPU is active");
goto out;
}
 
if (i915.enable_fbc < 0) {
set_no_fbc_reason(dev_priv, "disabled per chip default");
goto out;
}
 
if (!i915.enable_fbc) {
set_no_fbc_reason(dev_priv, "disabled per module param");
goto out;
}
 
if (!crtc_can_fbc(crtc)) {
set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
goto out;
}
 
if (intel_fbc_alloc_cfb(crtc)) {
set_no_fbc_reason(dev_priv, "not enough stolen memory");
goto out;
}
 
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.crtc = crtc;
out:
mutex_unlock(&dev_priv->fbc.lock);
}
 
/**
* __intel_fbc_disable - disable FBC
* @dev_priv: i915 device instance
*
* This is the low level function that actually disables FBC. Callers should
* grab the FBC lock.
*/
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc = dev_priv->fbc.crtc;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!dev_priv->fbc.enabled);
WARN_ON(dev_priv->fbc.active);
assert_pipe_disabled(dev_priv, crtc->pipe);
 
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
__intel_fbc_cleanup_cfb(dev_priv);
 
dev_priv->fbc.enabled = false;
dev_priv->fbc.crtc = NULL;
}
 
/**
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function disables FBC if it's associated with the provided CRTC.
*/
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc) {
WARN_ON(!dev_priv->fbc.enabled);
WARN_ON(dev_priv->fbc.active);
__intel_fbc_disable(dev_priv);
__intel_fbc_update(dev_priv);
}
mutex_unlock(&dev_priv->fbc.lock);
}
 
/**
* intel_fbc_disable - globally disable FBC
* @dev_priv: i915 device instance
*
* This function disables FBC regardless of which CRTC is associated with it.
*/
void intel_fbc_disable(struct drm_i915_private *dev_priv)
{
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.enabled)
__intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
1084,11 → 1077,14
{
enum pipe pipe;
 
INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
mutex_init(&dev_priv->fbc.lock);
dev_priv->fbc.enabled = false;
dev_priv->fbc.active = false;
dev_priv->fbc.work.scheduled = false;
 
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
return;
}
 
1096,30 → 1092,34
dev_priv->fbc.possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe);
 
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
if (fbc_on_pipe_a_only(dev_priv))
break;
}
 
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
dev_priv->fbc.enable_fbc = gen7_fbc_enable;
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
dev_priv->fbc.is_active = ilk_fbc_is_active;
dev_priv->fbc.activate = gen7_fbc_activate;
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
dev_priv->fbc.enable_fbc = ilk_fbc_enable;
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
dev_priv->fbc.is_active = ilk_fbc_is_active;
dev_priv->fbc.activate = ilk_fbc_activate;
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (IS_GM45(dev_priv)) {
dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
dev_priv->fbc.enable_fbc = g4x_fbc_enable;
dev_priv->fbc.disable_fbc = g4x_fbc_disable;
dev_priv->fbc.is_active = g4x_fbc_is_active;
dev_priv->fbc.activate = g4x_fbc_activate;
dev_priv->fbc.deactivate = g4x_fbc_deactivate;
} else {
dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
dev_priv->fbc.is_active = i8xx_fbc_is_active;
dev_priv->fbc.activate = i8xx_fbc_activate;
dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
 
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
 
dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */
if (dev_priv->fbc.is_active(dev_priv))
dev_priv->fbc.deactivate(dev_priv);
}
/drivers/video/drm/i915/intel_fbdev.c
118,7 → 118,7
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb;
struct drm_framebuffer *fb = NULL;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
137,6 → 137,8
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
 
mutex_lock(&dev->struct_mutex);
 
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
 
155,26 → 157,21
 
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
drm_gem_object_unreference(&obj->base);
ret = PTR_ERR(fb);
goto out_unref;
goto out;
}
 
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
}
mutex_unlock(&dev->struct_mutex);
 
ifbdev->fb = to_intel_framebuffer(fb);
 
return 0;
 
out_fb:
drm_framebuffer_remove(fb);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
mutex_unlock(&dev->struct_mutex);
if (!IS_ERR_OR_NULL(fb))
drm_framebuffer_unreference(fb);
return ret;
}
 
192,8 → 189,6
int size, ret;
bool prealloc = false;
 
mutex_lock(&dev->struct_mutex);
 
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
208,7 → 203,7
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
goto out_unlock;
return ret;
intel_fb = ifbdev->fb;
} else {
DRM_DEBUG_KMS("re-using BIOS fb\n");
220,8 → 215,19
obj = intel_fb->obj;
size = obj->base.size;
 
mutex_lock(&dev->struct_mutex);
 
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
if (ret)
goto out_unlock;
 
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
DRM_ERROR("Failed to allocate fb_info\n");
ret = PTR_ERR(info);
goto out_unpin;
}
266,7 → 272,6
drm_fb_helper_release_fbi(helper);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
505,6 → 510,10
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
/* We rely on the object-free to release the VMA pinning for
* the info->screen_base mmaping. Leaking the VMA is simpler than
* trying to rectify all the possible error paths leading here.
*/
 
drm_fb_helper_unregister_fbi(&ifbdev->helper);
drm_fb_helper_release_fbi(&ifbdev->helper);
511,9 → 520,11
 
drm_fb_helper_fini(&ifbdev->helper);
 
if (ifbdev->fb) {
drm_framebuffer_unregister_private(&ifbdev->fb->base);
drm_framebuffer_remove(&ifbdev->fb->base);
}
}
 
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
/drivers/video/drm/i915/intel_fifo_underrun.c
84,30 → 84,16
return true;
}
 
/**
* i9xx_check_fifo_underruns - check for fifo underruns
* @dev_priv: i915 device instance
*
* This function checks for fifo underruns on GMCH platforms. This needs to be
* done manually on modeset to make sure that we catch all underruns since they
* do not generate an interrupt by themselves on these platforms.
*/
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_crtc *crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(crtc->pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
 
spin_lock_irq(&dev_priv->irq_lock);
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_intel_crtc(dev_priv->dev, crtc) {
u32 reg = PIPESTAT(crtc->pipe);
u32 pipestat;
 
if (crtc->cpu_fifo_underrun_disabled)
continue;
 
pipestat = I915_READ(reg) & 0xffff0000;
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
continue;
return;
 
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
115,15 → 101,12
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
}
 
spin_unlock_irq(&dev_priv->irq_lock);
}
 
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe);
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
 
assert_spin_locked(&dev_priv->irq_lock);
145,11 → 128,28
DE_PIPEB_FIFO_UNDERRUN;
 
if (enable)
ironlake_enable_display_irq(dev_priv, bit);
ilk_enable_display_irq(dev_priv, bit);
else
ironlake_disable_display_irq(dev_priv, bit);
ilk_disable_display_irq(dev_priv, bit);
}
 
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
uint32_t err_int = I915_READ(GEN7_ERR_INT);
 
assert_spin_locked(&dev_priv->irq_lock);
 
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
 
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
POSTING_READ(GEN7_ERR_INT);
 
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
}
 
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
161,9 → 161,9
if (!ivb_can_enable_err_int(dev))
return;
 
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
 
if (old &&
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
178,14 → 178,10
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (enable)
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
 
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
202,6 → 198,24
ibx_disable_display_interrupt(dev_priv, bit);
}
 
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
uint32_t serr_int = I915_READ(SERR_INT);
 
assert_spin_locked(&dev_priv->irq_lock);
 
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
 
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
POSTING_READ(SERR_INT);
 
DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
transcoder_name(pch_transcoder));
}
 
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable, bool old)
375,3 → 389,56
DRM_ERROR("PCH transcoder %c FIFO underrun\n",
transcoder_name(pch_transcoder));
}
 
/**
* intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
* @dev_priv: i915 device instance
*
* Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
* error interrupt may have been disabled, and so CPU fifo underruns won't
* necessarily raise an interrupt, and on GMCH platforms where underruns never
* raise an interrupt.
*/
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
 
spin_lock_irq(&dev_priv->irq_lock);
 
for_each_intel_crtc(dev_priv->dev, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
continue;
 
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_check_fifo_underruns(crtc);
else if (IS_GEN7(dev_priv))
ivybridge_check_fifo_underruns(crtc);
}
 
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/**
* intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
* @dev_priv: i915 device instance
*
* Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
* error interrupt may have been disabled, and so PCH fifo underruns won't
* necessarily raise an interrupt.
*/
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
 
spin_lock_irq(&dev_priv->irq_lock);
 
for_each_intel_crtc(dev_priv->dev, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
 
if (HAS_PCH_CPT(dev_priv))
cpt_check_pch_fifo_underruns(crtc);
}
 
spin_unlock_irq(&dev_priv->irq_lock);
}
/drivers/video/drm/i915/intel_guc.h
42,8 → 42,6
 
uint32_t wq_offset;
uint32_t wq_size;
 
spinlock_t wq_lock; /* Protects all data below */
uint32_t wq_tail;
 
/* GuC submission statistics & status */
76,11 → 74,17
uint16_t guc_fw_minor_wanted;
uint16_t guc_fw_major_found;
uint16_t guc_fw_minor_found;
 
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
uint32_t rsa_offset;
uint32_t ucode_size;
uint32_t ucode_offset;
};
 
struct intel_guc {
struct intel_guc_fw guc_fw;
 
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
 
89,8 → 93,6
 
struct i915_guc_client *execbuf_client;
 
spinlock_t host2guc_lock; /* Protects all data below */
 
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
 
/drivers/video/drm/i915/intel_guc_fwif.h
122,6 → 122,78
 
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
 
/**
* DOC: GuC Firmware Layout
*
* The GuC firmware layout looks like this:
*
* +-------------------------------+
* | guc_css_header |
* | contains major/minor version |
* +-------------------------------+
* | uCode |
* +-------------------------------+
* | RSA signature |
* +-------------------------------+
* | modulus key |
* +-------------------------------+
* | exponent val |
* +-------------------------------+
*
* The firmware may or may not have modulus key and exponent data. The header,
* uCode and RSA signature are must-have components that will be used by driver.
* Length of each components, which is all in dwords, can be found in header.
* In the case that modulus and exponent are not present in fw, a.k.a truncated
* image, the length value still appears in header.
*
* Driver will do some basic fw size validation based on the following rules:
*
* 1. Header, uCode and RSA are must-have components.
* 2. All firmware components, if they present, are in the sequence illustrated
* in the layout table above.
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
*/
 
struct guc_css_header {
uint32_t module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
uint32_t header_size_dw;
uint32_t header_version;
uint32_t module_id;
uint32_t module_vendor;
union {
struct {
uint8_t day;
uint8_t month;
uint16_t year;
};
uint32_t date;
};
uint32_t size_dw; /* uCode plus header_size_dw */
uint32_t key_size_dw;
uint32_t modulus_size_dw;
uint32_t exponent_size_dw;
union {
struct {
uint8_t hour;
uint8_t min;
uint16_t sec;
};
uint32_t time;
};
 
char username[8];
char buildnumber[12];
uint32_t device_id;
uint32_t guc_sw_version;
uint32_t prod_preprod_fw;
uint32_t reserved[12];
uint32_t header_info;
} __packed;
 
struct guc_doorbell_info {
u32 db_status;
u32 cookie;
/drivers/video/drm/i915/intel_guc_loader.c
27,12 → 27,11
* Alex Dai <yu.dai@intel.com>
*/
#include <linux/firmware.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_guc.h"
 
/**
* DOC: GuC
* DOC: GuC-specific firmware loader
*
* intel_guc:
* Top level structure of guc. It handles firmware loading and manages client
209,16 → 208,6
/*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* GuC Firmware layout:
* +-------------------------------+ ----
* | CSS header | 128B
* | contains major/minor version |
* +-------------------------------+ ----
* | uCode |
* +-------------------------------+ ----
* | RSA signature | 256B
* +-------------------------------+ ----
*
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
226,13 → 215,6
* Note that GuC needs the CSS header plus uKernel code to be copied by the
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
 
#define UOS_CSS_HEADER_OFFSET 0
#define UOS_VER_MINOR_OFFSET 0x44
#define UOS_VER_MAJOR_OFFSET 0x46
#define UOS_CSS_HEADER_SIZE 0x80
#define UOS_RSA_SIG_SIZE 0x100
 
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
239,20 → 221,23
struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
unsigned long offset;
struct sg_table *sg = fw_obj->pages;
u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
int i, ret = 0;
 
/* uCode size, also is where RSA signature starts */
offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
I915_WRITE(DMA_COPY_SIZE, ucode_size);
/* where RSA signature starts */
offset = guc_fw->rsa_offset;
 
/* Copy RSA signature from the fw image to HW for verification */
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
 
/* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components */
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
/* Set the source address for the new blob */
offset = i915_gem_obj_ggtt_offset(fw_obj);
offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
323,8 → 308,8
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
 
/* WaDisableMinuteIaClockGating:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
379,6 → 364,9
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
int err = 0;
 
if (!i915.enable_guc_submission)
return 0;
 
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
458,10 → 446,8
{
struct drm_i915_gem_object *obj;
const struct firmware *fw;
const u8 *css_header;
const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
- 0x8000; /* 32k reserved (8K stack + 24k context) */
struct guc_css_header *css;
size_t size;
int err;
 
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
475,13 → 461,53
 
DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
guc_fw->guc_fw_path, fw);
DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
fw->size, minsize, maxsize);
 
/* Check the size of the blob befoe examining buffer contents */
if (fw->size < minsize || fw->size > maxsize)
/* Check the size of the blob before examining buffer contents */
if (fw->size < sizeof(struct guc_css_header)) {
DRM_ERROR("Firmware header is missing\n");
goto fail;
}
 
css = (struct guc_css_header *)fw->data;
 
/* Firmware bits always start from header */
guc_fw->header_offset = 0;
guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
 
if (guc_fw->header_size != sizeof(struct guc_css_header)) {
DRM_ERROR("CSS header definition mismatch\n");
goto fail;
}
 
/* then, uCode */
guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
 
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
DRM_ERROR("RSA key size is bad\n");
goto fail;
}
guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
 
/* At least, it should have header, uCode and RSA. Size of all three. */
size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
if (fw->size < size) {
DRM_ERROR("Missing firmware components\n");
goto fail;
}
 
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
 
/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail;
}
 
/*
* The GuC firmware image has the version number embedded at a well-known
* offset within the firmware blob; note that major / minor version are
488,9 → 514,8
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
* in terms of bytes (u8).
*/
css_header = fw->data + UOS_CSS_HEADER_OFFSET;
guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
 
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
567,6 → 592,9
fw_path = ""; /* unknown device */
}
 
if (!i915.enable_guc_submission)
return;
 
guc_fw->guc_dev = dev;
guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
/drivers/video/drm/i915/intel_hdmi.c
78,7 → 78,7
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_SELECT_VENDOR;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
MISSING_CASE(type);
return 0;
}
}
93,7 → 93,7
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VENDOR;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
MISSING_CASE(type);
return 0;
}
}
108,12 → 108,13
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VS_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
MISSING_CASE(type);
return 0;
}
}
 
static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
static i915_reg_t
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder,
enum hdmi_infoframe_type type,
int i)
126,8 → 127,8
case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
MISSING_CASE(type);
return INVALID_MMIO_REG;
}
}
 
168,10 → 169,10
POSTING_READ(VIDEO_DIP_CTL);
}
 
static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
 
193,8 → 194,9
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
223,13 → 225,13
POSTING_READ(reg);
}
 
static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
 
if ((val & VIDEO_DIP_ENABLE) == 0)
251,8 → 253,9
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
284,13 → 287,12
POSTING_READ(reg);
}
 
static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
 
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
308,8 → 310,9
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
338,14 → 341,13
POSTING_READ(reg);
}
 
static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
 
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
367,14 → 369,12
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
u32 data_reg;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t data_reg;
int i;
u32 val = I915_READ(ctl_reg);
 
data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
if (data_reg == 0)
return;
 
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
396,13 → 396,11
POSTING_READ(ctl_reg);
}
 
static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
 
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
513,7 → 511,7
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
633,11 → 631,12
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
u32 reg, val = 0;
i915_reg_t reg;
u32 val = 0;
 
if (HAS_DDI(dev_priv))
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv))
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv->dev))
reg = TVIDEO_DIP_GCP(crtc->pipe);
666,7 → 665,7
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
717,7 → 716,7
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
assert_hdmi_port_disabled(intel_hdmi);
760,7 → 759,7
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
811,7 → 810,7
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(reg);
 
assert_hdmi_port_disabled(intel_hdmi);
881,15 → 880,18
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
tmp = I915_READ(intel_hdmi->hdmi_reg);
 
if (!(tmp & SDVO_ENABLE))
return false;
goto out;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
898,7 → 900,12
else
*pipe = PORT_TO_PIPE(tmp);
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void intel_hdmi_get_config(struct intel_encoder *encoder,
925,7 → 932,7
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
 
if (intel_hdmi->infoframe_enabled(&encoder->base))
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
 
if (tmp & SDVO_AUDIO_ENABLE)
1108,6 → 1115,13
* matching DP port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE;
/*
1122,6 → 1136,10
temp &= ~SDVO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
 
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
1331,13 → 1349,14
}
 
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct edid *edid;
struct edid *edid = NULL;
bool connected = false;
 
if (force) {
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
edid = drm_get_edid(connector,
1345,6 → 1364,7
intel_hdmi->ddc_bus));
 
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
 
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
1370,7 → 1390,10
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
unsigned int try;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
1377,9 → 1400,27
 
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
for (try = 0; !live_status && try < 9; try++) {
if (try)
msleep(10);
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
}
 
if (!live_status) {
DRM_DEBUG_KMS("HDMI live status down\n");
/*
* Live status register is not reliable on all intel platforms.
* So consider live_status only for certain platforms, for
* others, read EDID to determine presence of sink.
*/
if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
live_status = true;
}
 
intel_hdmi_unset_edid(connector);
 
if (intel_hdmi_set_edid(connector)) {
if (intel_hdmi_set_edid(connector, live_status)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1405,7 → 1446,7
if (connector->status != connector_status_connected)
return;
 
intel_hdmi_set_edid(connector);
intel_hdmi_set_edid(connector, true);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
 
1997,50 → 2038,6
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
 
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
u8 ddc_pin;
 
if (info->alternate_ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
info->alternate_ddc_pin, port_name(port));
return info->alternate_ddc_pin;
}
 
switch (port) {
case PORT_B:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev_priv))
ddc_pin = GMBUS_PIN_DPD_CHV;
else
ddc_pin = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_DPB;
break;
}
 
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
ddc_pin, port_name(port));
 
return ddc_pin;
}
 
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
2050,10 → 2047,8
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
 
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
 
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
2062,34 → 2057,65
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
 
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
 
switch (port) {
case PORT_B:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
else
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
if (WARN_ON(IS_BROXTON(dev_priv)))
intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
else if (IS_CHERRYVIEW(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
/* On SKL PORT E doesn't have seperate GMBUS pin
* We rely on VBT to set a proper alternate GMBUS pin. */
alternate_ddc_pin =
dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
switch (alternate_ddc_pin) {
case DDC_PIN_B:
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
break;
case DDC_PIN_C:
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
break;
case DDC_PIN_D:
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(alternate_ddc_pin);
}
intel_encoder->hpd_pin = HPD_PORT_E;
break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
default:
MISSING_CASE(port);
return;
BUG();
}
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
2133,7 → 2159,8
}
}
 
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
2152,7 → 2179,7
intel_encoder = &intel_dig_port->base;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
DRM_MODE_ENCODER_TMDS, NULL);
 
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) {
2204,7 → 2231,7
 
intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = 0;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
/drivers/video/drm/i915/intel_hotplug.c
407,7 → 407,7
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
"Received HPD interrupt on pin %d although disabled\n", i);
continue;
}
/drivers/video/drm/i915/intel_i2c.c
36,7 → 36,7
 
struct gmbus_pin {
const char *name;
int reg;
i915_reg_t reg;
};
 
/* Map gmbus pin pairs to names and registers. */
63,9 → 63,9
};
 
static const struct gmbus_pin gmbus_pins_bxt[] = {
[GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
[GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
[GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD },
[GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
[GMBUS_PIN_3_BXT] = { "misc", GPIOD },
};
 
/* pin is expected to be valid */
74,7 → 74,7
{
if (IS_BROXTON(dev_priv))
return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv))
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
89,7 → 89,7
 
if (IS_BROXTON(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv))
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
96,7 → 96,8
else
size = ARRAY_SIZE(gmbus_pins);
 
return pin < size && get_gmbus_pin(dev_priv, pin)->reg;
return pin < size &&
i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
}
 
/* Intel GPIO access functions */
240,9 → 241,8
 
algo = &bus->bit_algo;
 
bus->gpio_reg = dev_priv->gpio_mmio_base +
get_gmbus_pin(dev_priv, pin)->reg;
 
bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
472,9 → 472,7
}
 
static int
gmbus_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
483,14 → 481,6
int i = 0, inc, try = 0;
int ret = 0;
 
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
 
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
 
retry:
I915_WRITE(GMBUS0, bus->reg0);
 
505,17 → 495,13
ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
 
if (!ret)
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
if (ret == -ENXIO)
else if (ret)
goto clear_err;
 
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
GMBUS_HW_WAIT_EN);
if (ret == -ENXIO)
goto clear_err;
if (ret)
goto timeout;
}
 
/* Generate a STOP condition on the bus. Note that gmbus can't generata
589,13 → 575,34
bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0, 0);
 
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
* instead. Use EAGAIN to have i2c core retry.
*/
bus->force_bit = 1;
ret = -EAGAIN;
 
out:
return ret;
}
 
static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int ret;
 
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
 
if (bus->force_bit)
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
else
ret = do_gmbus_xfer(adapter, msgs, num);
 
out:
mutex_unlock(&dev_priv->gmbus_mutex);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
return ret;
628,12 → 635,13
 
if (HAS_PCH_NOP(dev))
return 0;
else if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else if (IS_VALLEYVIEW(dev))
 
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;
else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base =
i915_mmio_reg_offset(PCH_GPIOA) -
i915_mmio_reg_offset(GPIOA);
 
mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
656,6 → 664,12
 
bus->adapter.algo = &gmbus_algorithm;
 
/*
* We wish to retry with bit banging
* after a timed out GMBUS attempt.
*/
bus->adapter.retries = 1;
 
/* By default use a conservative clock rate */
bus->reg0 = pin | GMBUS_RATE_100KHZ;
 
/drivers/video/drm/i915/intel_lrc.c
134,7 → 134,7
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_mocs.h"
 
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
190,16 → 190,21
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8)
 
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
(reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
(reg_state)[(pos)+1] = (val); \
} while (0)
 
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
}
} while (0)
 
#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
}
} while (0)
 
enum {
ADVANCED_CONTEXT = 0,
284,8 → 289,8
{
struct drm_device *dev = ring->dev;
 
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
(ring->id == VCS || ring->id == VCS2);
}
 
367,7 → 372,7
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_TAIL+1] = rq->tail;
921,7 → 926,7
 
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
intel_logical_ring_emit(ringbuf, INSTPM);
intel_logical_ring_emit_reg(ringbuf, INSTPM);
intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
intel_logical_ring_advance(ringbuf);
 
1096,7 → 1101,7
 
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_logical_ring_emit(ringbuf, w->reg[i].addr);
intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
intel_logical_ring_emit(ringbuf, w->reg[i].value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
1120,6 → 1125,8
batch[__index] = (cmd); \
} while (0)
 
#define wa_ctx_emit_reg(batch, index, reg) \
wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
 
/*
* In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1149,17 → 1156,17
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
 
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, l3sqc4_flush);
 
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1172,7 → 1179,7
 
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
 
1314,8 → 1321,8
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
/* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1340,10 → 1347,10
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
_MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
wa_ctx_emit(batch, index, MI_NOOP);
1350,8 → 1357,8
}
 
/* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1418,7 → 1425,7
return ret;
}
 
page = i915_gem_object_get_page(wa_ctx->obj, 0);
page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
batch = kmap_atomic(page);
offset = 0;
 
1472,12 → 1479,6
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
if (ring->status_page.obj) {
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
}
 
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1562,9 → 1563,9
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
}
 
1894,8 → 1895,10
 
dev_priv = ring->dev->dev_private;
 
if (ring->buffer) {
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
}
 
if (ring->cleanup)
ring->cleanup(ring);
1909,6 → 1912,7
}
 
lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
}
 
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1924,6 → 1928,7
i915_gem_batch_pool_init(dev, &ring->batch_pool);
init_waitqueue_head(&ring->irq_queue);
 
INIT_LIST_HEAD(&ring->buffers);
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
1930,11 → 1935,11
 
ret = i915_cmd_parser_init_ring(ring);
if (ret)
return ret;
goto error;
 
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
if (ret)
return ret;
goto error;
 
/* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(
1945,9 → 1950,13
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
return ret;
goto error;
}
 
return 0;
 
error:
intel_logical_ring_cleanup(ring);
return ret;
}
 
1973,7 → 1982,7
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
2025,7 → 2034,7
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
2080,7 → 2089,7
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
2110,7 → 2119,7
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
2256,7 → 2265,7
 
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2264,46 → 2273,31
* only for the first context restore: on a subsequent save, the GPU will
* recreate this batchbuffer with new values (including all the missing
* MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
if (ring->id == RCS)
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
else
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
reg_state[CTX_LRI_HEADER_0] =
MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
CTX_CTRL_RS_CTX_ENABLE));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
/* Ring buffer start address is not known until the buffer is pinned.
* It is written to the context image in execlists_update_context()
*/
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] =
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
reg_state[CTX_BB_HEAD_U+1] = 0;
reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
reg_state[CTX_BB_HEAD_L+1] = 0;
reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
reg_state[CTX_BB_STATE+1] = (1<<5);
reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
reg_state[CTX_SECOND_BB_STATE+1] = 0;
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
RING_BB_PPGTT);
ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
if (ring->id == RCS) {
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
if (ring->wa_ctx.obj) {
struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2320,18 → 2314,17
0x01;
}
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
reg_state[CTX_CTX_TIMESTAMP+1] = 0;
reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
/* PDP values well be assigned later if needed */
ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
 
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
2353,14 → 2346,11
 
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
make_rpcs(dev));
}
 
kunmap_atomic(reg_state);
 
ctx_obj->dirty = 1;
set_page_dirty(page);
i915_gem_object_unpin_pages(ctx_obj);
 
return 0;
2544,7 → 2534,7
WARN(1, "Failed get_pages for context obj\n");
continue;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_HEAD+1] = 0;
/drivers/video/drm/i915/intel_lrc.h
29,16 → 29,16
#define GEN8_CSB_PTR_MASK 0x07
 
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234)
#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
 
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
70,6 → 70,11
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
i915_reg_t reg)
{
intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
}
 
/* Logical Ring Contexts */
 
/drivers/video/drm/i915/intel_lvds.c
44,7 → 44,7
struct intel_lvds_connector {
struct intel_connector base;
 
// struct notifier_block lid_notifier;
struct notifier_block lid_notifier;
};
 
struct intel_lvds_encoder {
51,7 → 51,7
struct intel_encoder base;
 
bool is_dual_link;
u32 reg;
i915_reg_t reg;
u32 a3_power;
 
struct intel_lvds_connector *attached_connector;
75,15 → 75,18
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
tmp = I915_READ(lvds_encoder->reg);
 
if (!(tmp & LVDS_PORT_EN))
return false;
goto out;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
90,7 → 93,12
else
*pipe = PORT_TO_PIPE(tmp);
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void intel_lvds_get_config(struct intel_encoder *encoder,
210,7 → 218,7
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
i915_reg_t ctl_reg, stat_reg;
 
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
235,7 → 243,7
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
i915_reg_t ctl_reg, stat_reg;
 
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
939,7 → 947,7
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
u32 lvds_reg;
i915_reg_t lvds_reg;
u32 lvds;
int pipe;
u8 pin;
1025,7 → 1033,7
DRM_MODE_CONNECTOR_LVDS);
 
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
DRM_MODE_ENCODER_LVDS, NULL);
 
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
1164,8 → 1172,7
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
 
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
LVDS_A3_POWER_MASK;
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
 
drm_connector_register(connector);
 
/drivers/video/drm/i915/intel_mocs.c
143,7 → 143,7
{
bool result = false;
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
159,11 → 159,30
return result;
}
 
static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
{
switch (ring) {
case RCS:
return GEN9_GFX_MOCS(index);
case VCS:
return GEN9_MFX0_MOCS(index);
case BCS:
return GEN9_BLT_MOCS(index);
case VECS:
return GEN9_VEBOX_MOCS(index);
case VCS2:
return GEN9_MFX1_MOCS(index);
default:
MISSING_CASE(ring);
return INVALID_MMIO_REG;
}
}
 
/**
* emit_mocs_control_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
* @reg_base: The base for the engine that needs to be programmed.
* @ring: The engine for whom to emit the registers.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
172,7 → 191,7
*/
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table,
u32 reg_base)
enum intel_ring_id ring)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
unsigned int index;
191,7 → 210,7
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
for (index = 0; index < table->size; index++) {
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
intel_logical_ring_emit(ringbuf,
table->table[index].control_value);
}
205,7 → 224,7
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
intel_logical_ring_emit(ringbuf, table->table[0].control_value);
}
 
253,7 → 272,7
value = (table->table[count].l3cc_value & 0xffff) |
((table->table[count + 1].l3cc_value & 0xffff) << 16);
 
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, value);
}
 
270,7 → 289,7
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, value);
 
value = filler;
304,27 → 323,17
int ret;
 
if (get_mocs_settings(req->ring->dev, &t)) {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *ring;
enum intel_ring_id ring_id;
 
/* Program the control registers */
ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
for_each_ring(ring, dev_priv, ring_id) {
ret = emit_mocs_control_table(req, &t, ring_id);
if (ret)
return ret;
}
 
ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
if (ret)
return ret;
 
/* Now program the l3cc registers */
ret = emit_mocs_l3cc_table(req, &t);
if (ret)
/drivers/video/drm/i915/intel_opregion.c
26,6 → 26,7
*/
 
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <acpi/video.h>
 
#include <drm/drmP.h>
46,6 → 47,7
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_ASLE_EXT_OFFSET 0x1C00
 
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
120,9 → 122,18
u64 fdss;
u32 fdsp;
u32 stat;
u8 rsvd[70];
u64 rvda; /* Physical address of raw vbt data */
u32 rvds; /* Size of raw vbt data */
u8 rsvd[58];
} __packed;
 
/* OpRegion mailbox #5: ASLE ext */
struct opregion_asle_ext {
u32 phed; /* Panel Header */
u8 bddc[256]; /* Panel EDID */
u8 rsvd[764];
} __packed;
 
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
411,7 → 422,7
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector;
struct intel_connector *connector;
struct opregion_asle *asle = dev_priv->opregion.asle;
 
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
435,8 → 446,8
* only one).
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
for_each_intel_connector(dev, connector)
intel_panel_set_backlight_acpi(connector, bclp, 255);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
 
drm_modeset_unlock(&dev->mode_config.connection_mutex);
826,6 → 837,10
 
/* just clear all opregion memory pointers now */
memunmap(opregion->header);
if (opregion->rvda) {
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
894,6 → 909,25
static inline void swsci_setup(struct drm_device *dev) {}
#endif /* CONFIG_ACPI */
 
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
"VBIOS ROM for %s\n", id->ident);
return 1;
}
 
static const struct dmi_system_id intel_no_opregion_vbt[] = {
{
.callback = intel_no_opregion_vbt_callback,
.ident = "ThinkCentre A57",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
},
},
{ }
};
 
int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
907,6 → 941,7
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
 
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
931,8 → 966,6
goto err_out;
}
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
 
opregion->lid_state = base + ACPI_CLID;
 
mboxes = opregion->header->mboxes;
946,6 → 979,7
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev);
}
 
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
953,6 → 987,37
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
 
if (mboxes & MBOX_ASLE_EXT)
DRM_DEBUG_DRIVER("ASLE extension supported\n");
 
if (!dmi_check_system(intel_no_opregion_vbt)) {
const void *vbt = NULL;
u32 vbt_size = 0;
 
if (opregion->header->opregion_ver >= 2 && opregion->asle &&
opregion->asle->rvda && opregion->asle->rvds) {
opregion->rvda = memremap(opregion->asle->rvda,
opregion->asle->rvds,
MEMREMAP_WB);
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
}
 
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
vbt = base + OPREGION_VBT_OFFSET;
vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
}
}
}
 
return 0;
 
err_out:
/drivers/video/drm/i915/intel_panel.c
461,8 → 461,7
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
 
WARN_ON(panel->backlight.max == 0);
480,8 → 479,7
 
static u32 lpt_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
488,8 → 486,7
 
static u32 pch_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
496,19 → 493,18
 
static u32 i9xx_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 val;
 
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_INFO(dev)->gen < 4)
if (INTEL_INFO(dev_priv)->gen < 4)
val >>= 1;
 
if (panel->backlight.combination_mode) {
u8 lbpc;
 
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
 
515,10 → 511,8
return val;
}
 
static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return 0;
 
527,17 → 521,16
 
static u32 vlv_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = intel_get_pipe_from_connector(connector);
 
return _vlv_get_backlight(dev, pipe);
return _vlv_get_backlight(dev_priv, pipe);
}
 
static u32 bxt_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
553,8 → 546,7
 
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 val = 0;
 
573,8 → 565,7
 
static void lpt_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
581,8 → 572,7
 
static void pch_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
 
tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
591,8 → 581,7
 
static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
 
603,10 → 592,10
 
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
}
 
if (IS_GEN4(dev)) {
if (IS_GEN4(dev_priv)) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
619,8 → 608,7
 
static void vlv_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
 
633,8 → 621,7
 
static void bxt_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
 
I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
663,8 → 650,7
static void intel_panel_set_backlight(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
 
690,8 → 676,7
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
721,8 → 706,7
 
static void lpt_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
 
intel_panel_actually_set_backlight(connector, 0);
747,8 → 731,7
 
static void pch_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
 
intel_panel_actually_set_backlight(connector, 0);
767,8 → 750,7
 
static void i965_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
 
intel_panel_actually_set_backlight(connector, 0);
779,8 → 761,7
 
static void vlv_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
 
795,8 → 776,7
 
static void bxt_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp, val;
 
825,8 → 805,7
 
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
 
if (!panel->backlight.present)
838,7 → 817,7
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
return;
}
853,8 → 832,7
 
static void lpt_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
 
886,8 → 864,7
 
static void pch_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder =
933,8 → 910,7
 
static void i9xx_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
 
951,7 → 927,7
ctl = freq << 17;
if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
 
I915_WRITE(BLC_PWM_CTL, ctl);
965,14 → 941,13
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
if (IS_GEN2(dev))
if (IS_GEN2(dev_priv))
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
 
static void i965_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2, freq;
1005,8 → 980,7
 
static void vlv_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
1037,8 → 1011,7
 
static void bxt_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 pwm_ctl, val;
1095,8 → 1068,7
 
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
 
1250,6 → 1222,14
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
/*
* BXT: PWM clock frequency = 19.2 MHz.
*/
static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
return KHz(19200) / pwm_freq_hz;
}
 
/*
* SPT: This value represents the period of the PWM stream in clock periods
* multiplied by 16 (default increment) or 128 (alternate increment selected in
* SCHICKEN_1 bit 0). PWM clock is 24 MHz.
1256,8 → 1236,7
*/
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 mul, clock;
 
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
1277,8 → 1256,7
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 mul, clock;
 
if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
1286,7 → 1264,7
else
mul = 128;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
if (HAS_PCH_LPT_H(dev_priv))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
1321,9 → 1299,9
int clock;
 
if (IS_PINEVIEW(dev))
clock = intel_hrawclk(dev);
clock = MHz(intel_hrawclk(dev));
else
clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
clock = 1000 * dev_priv->cdclk_freq;
 
return clock / (pwm_freq_hz * 32);
}
1330,14 → 1308,20
 
/*
* Gen4: This value represents the period of the PWM stream in display core
* clocks multiplied by 128.
* clocks ([DevCTG] HRAW clocks) multiplied by 128.
*
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
int clock;
 
if (IS_G4X(dev_priv))
clock = MHz(intel_hrawclk(dev));
else
clock = 1000 * dev_priv->cdclk_freq;
 
return clock / (pwm_freq_hz * 128);
}
 
1365,20 → 1349,23
 
static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
u32 pwm;
 
if (!pwm_freq_hz) {
DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
if (!panel->backlight.hz_to_pwm) {
DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
return 0;
}
 
if (!panel->backlight.hz_to_pwm) {
DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
return 0;
if (pwm_freq_hz) {
DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
pwm_freq_hz);
} else {
pwm_freq_hz = 200;
DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
pwm_freq_hz);
}
 
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
1387,8 → 1374,6
return 0;
}
 
DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
 
return pwm;
}
 
1397,8 → 1382,7
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
int min;
 
1423,8 → 1407,7
 
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val;
 
1453,8 → 1436,7
 
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
 
1484,17 → 1466,16
 
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
 
ctl = I915_READ(BLC_PWM_CTL);
 
if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
 
if (IS_PINEVIEW(dev))
if (IS_PINEVIEW(dev_priv))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
 
panel->backlight.max = ctl >> 17;
1522,8 → 1503,7
 
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
 
1556,8 → 1536,7
 
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
 
1578,7 → 1557,7
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = _vlv_get_backlight(dev, pipe);
val = _vlv_get_backlight(dev_priv, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
1590,8 → 1569,7
static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
 
1669,8 → 1647,7
 
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
int ret;
1725,28 → 1702,28
static void
intel_panel_init_backlight_funcs(struct intel_panel *panel)
{
struct intel_connector *intel_connector =
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
if (IS_BROXTON(dev)) {
if (IS_BROXTON(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
panel->backlight.disable = bxt_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
panel->backlight.set = lpt_set_backlight;
panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev))
if (HAS_PCH_LPT(dev_priv))
panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
else
panel->backlight.hz_to_pwm = spt_hz_to_pwm;
} else if (HAS_PCH_SPLIT(dev)) {
} else if (HAS_PCH_SPLIT(dev_priv)) {
panel->backlight.setup = pch_setup_backlight;
panel->backlight.enable = pch_enable_backlight;
panel->backlight.disable = pch_disable_backlight;
1753,7 → 1730,7
panel->backlight.set = pch_set_backlight;
panel->backlight.get = pch_get_backlight;
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (dev_priv->vbt.has_mipi) {
panel->backlight.setup = pwm_setup_backlight;
panel->backlight.enable = pwm_enable_backlight;
1768,7 → 1745,7
panel->backlight.get = vlv_get_backlight;
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
}
} else if (IS_GEN4(dev)) {
} else if (IS_GEN4(dev_priv)) {
panel->backlight.setup = i965_setup_backlight;
panel->backlight.enable = i965_enable_backlight;
panel->backlight.disable = i965_disable_backlight;
1814,7 → 1791,7
{
struct intel_connector *connector;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
for_each_intel_connector(dev, connector)
intel_backlight_device_register(connector);
}
 
1822,6 → 1799,6
{
struct intel_connector *connector;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
for_each_intel_connector(dev, connector)
intel_backlight_device_unregister(connector);
}
/drivers/video/drm/i915/intel_pm.c
71,6 → 71,14
*/
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
 
/*
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
288,7 → 296,7
struct drm_device *dev = dev_priv->dev;
u32 val;
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
1713,13 → 1721,6
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
 
struct skl_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate; /* in KHz */
struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
};
 
struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
1727,13 → 1728,6
uint16_t fbc;
};
 
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
};
 
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
1988,9 → 1982,11
const struct intel_crtc *intel_crtc,
int level,
struct intel_crtc_state *cstate,
struct intel_plane_state *pristate,
struct intel_plane_state *sprstate,
struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
struct intel_plane *intel_plane;
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
uint16_t spr_latency = dev_priv->wm.spr_latency[level];
uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2002,29 → 1998,11
cur_latency *= 5;
}
 
for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
struct intel_plane_state *pstate =
to_intel_plane_state(intel_plane->base.state);
 
switch (intel_plane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
result->pri_val = ilk_compute_pri_wm(cstate, pstate,
pri_latency,
level);
result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
result->pri_val);
break;
case DRM_PLANE_TYPE_OVERLAY:
result->spr_val = ilk_compute_spr_wm(cstate, pstate,
spr_latency);
break;
case DRM_PLANE_TYPE_CURSOR:
result->cur_val = ilk_compute_cur_wm(cstate, pstate,
cur_latency);
break;
}
}
 
result->pri_val = ilk_compute_pri_wm(cstate, pristate,
pri_latency, level);
result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
result->enable = true;
}
 
2102,34 → 2080,32
GEN9_MEM_LATENCY_LEVEL_MASK;
 
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
*/
for (level = 1; level <= max_level; level++) {
if (wm[level] == 0) {
for (i = level + 1; i <= max_level; i++)
wm[i] = 0;
break;
}
}
 
/*
* WaWmMemoryReadLatency:skl
*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from the
* punit when level 0 response data us 0us.
* to add 2us to the various latency levels we retrieve from
* the punit.
* - W0 is a bit special in that it's the only level that
* can't be disabled if we want to have display working, so
* we always add 2us there.
* - For levels >=1, punit returns 0us latency when they are
* disabled, so we respect that and don't add 2us then
*
* Additionally, if a level n (n > 1) has a 0us latency, all
* levels m (m >= n) need to be disabled. We make sure to
* sanitize the values out of the punit to satisfy this
* requirement.
*/
if (wm[0] == 0) {
wm[0] += 2;
for (level = 1; level <= max_level; level++) {
if (wm[level] == 0)
for (level = 1; level <= max_level; level++)
if (wm[level] != 0)
wm[level] += 2;
else {
for (i = level + 1; i <= max_level; i++)
wm[i] = 0;
 
break;
wm[level] += 2;
}
}
 
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
2285,34 → 2261,19
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
}
 
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
struct intel_crtc *intel_crtc;
 
/* Compute the currently _active_ config */
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
 
if (!wm->pipe_enabled)
continue;
 
config->sprites_enabled |= wm->sprites_enabled;
config->sprites_scaled |= wm->sprites_scaled;
config->num_pipes_active++;
}
}
 
/* Compute new watermarks for the pipe */
static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
struct intel_pipe_wm *pipe_wm)
static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = intel_crtc->base.dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = NULL;
struct intel_plane *intel_plane;
struct drm_plane_state *ps;
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
2320,12 → 2281,26
};
struct ilk_wm_maximums max;
 
cstate = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(cstate))
return PTR_ERR(cstate);
 
pipe_wm = &cstate->wm.optimal.ilk;
memset(pipe_wm, 0, sizeof(*pipe_wm));
 
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
sprstate = to_intel_plane_state(intel_plane->base.state);
break;
ps = drm_atomic_get_plane_state(state,
&intel_plane->base);
if (IS_ERR(ps))
return PTR_ERR(ps);
 
if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
pristate = to_intel_plane_state(ps);
else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
sprstate = to_intel_plane_state(ps);
else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
curstate = to_intel_plane_state(ps);
}
}
 
config.sprites_enabled = sprstate->visible;
config.sprites_scaled = sprstate->visible &&
2333,7 → 2308,7
drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
 
pipe_wm->pipe_enabled = cstate->base.active;
pipe_wm->sprites_enabled = sprstate->visible;
pipe_wm->sprites_enabled = config.sprites_enabled;
pipe_wm->sprites_scaled = config.sprites_scaled;
 
/* ILK/SNB: LP2+ watermarks only w/o sprites */
2344,10 → 2319,12
if (config.sprites_scaled)
max_level = 0;
 
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
pipe_wm->linetime = hsw_compute_linetime_wm(dev,
&intel_crtc->base);
 
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2354,7 → 2331,7
 
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
return false;
return -EINVAL;
 
ilk_compute_wm_reg_maximums(dev, 1, &max);
 
2361,7 → 2338,8
for (level = 1; level <= max_level; level++) {
struct intel_wm_level wm = {};
 
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
pristate, sprstate, curstate, &wm);
 
/*
* Disable any watermark level that exceeds the
2374,7 → 2352,7
pipe_wm->wm[level] = wm;
}
 
return true;
return 0;
}
 
/*
2389,7 → 2367,9
ret_wm->enable = true;
 
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
const struct intel_crtc_state *cstate =
to_intel_crtc_state(intel_crtc->base.state);
const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
const struct intel_wm_level *wm = &active->wm[level];
 
if (!active->pipe_enabled)
2460,7 → 2440,7
* enabled sometime later.
*/
if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
intel_fbc_enabled(dev_priv)) {
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
 
2537,14 → 2517,15
 
/* LP0 register values */
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_crtc_state *cstate =
to_intel_crtc_state(intel_crtc->base.state);
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.wm[0];
const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
 
if (WARN_ON(!r->enable))
continue;
 
results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
 
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2766,18 → 2747,40
#define SKL_DDB_SIZE 896 /* in blocks */
#define BXT_DDB_SIZE 512
 
/*
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
* plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
* other universal planes are in indices 1..n. Note that this may leave unused
* indices between the top "sprite" plane and the cursor.
*/
static int
skl_wm_plane_id(const struct intel_plane *plane)
{
switch (plane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
return 0;
case DRM_PLANE_TYPE_CURSOR:
return PLANE_CURSOR;
case DRM_PLANE_TYPE_OVERLAY:
return plane->plane + 1;
default:
MISSING_CASE(plane->base.type);
return plane->plane;
}
}
 
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc,
const struct intel_crtc_state *cstate,
const struct intel_wm_config *config,
const struct skl_pipe_wm_parameters *params,
struct skl_ddb_entry *alloc /* out */)
{
struct drm_crtc *for_crtc = cstate->base.crtc;
struct drm_crtc *crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
 
if (!params->active) {
if (!cstate->base.active) {
alloc->start = 0;
alloc->end = 0;
return;
2832,7 → 2835,10
memset(ddb, 0, sizeof(*ddb));
 
for_each_pipe(dev_priv, pipe) {
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
enum intel_display_power_domain power_domain;
 
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
 
for_each_plane(dev_priv, pipe, plane) {
2844,23 → 2850,35
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
 
intel_display_power_put(dev_priv, power_domain);
}
}
 
static unsigned int
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_framebuffer *fb = pstate->fb;
 
/* for planar format */
if (p->y_bytes_per_pixel) {
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
return intel_crtc->config->pipe_src_w *
intel_crtc->config->pipe_src_h *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
return (intel_crtc->config->pipe_src_w/2) *
(intel_crtc->config->pipe_src_h/2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
 
/* for packed formats */
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
return intel_crtc->config->pipe_src_w *
intel_crtc->config->pipe_src_h *
drm_format_plane_cpp(fb->pixel_format, 0);
}
 
/*
2869,37 → 2887,47
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
const struct skl_pipe_wm_parameters *params)
skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
const struct intel_plane *intel_plane;
unsigned int total_data_rate = 0;
int plane;
 
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
const struct drm_plane_state *pstate = intel_plane->base.state;
 
p = &params->plane[plane];
if (!p->enabled)
if (pstate->fb == NULL)
continue;
 
total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
if (p->y_bytes_per_pixel) {
total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
 
/* packed/uv */
total_data_rate += skl_plane_relative_data_rate(cstate,
pstate,
0);
 
if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
/* y-plane */
total_data_rate += skl_plane_relative_data_rate(cstate,
pstate,
1);
}
}
 
return total_data_rate;
}
 
static void
skl_allocate_pipe_ddb(struct drm_crtc *crtc,
const struct intel_wm_config *config,
const struct skl_pipe_wm_parameters *params,
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_wm_config *config = &dev_priv->wm.config;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane;
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
2906,9 → 2934,8
uint16_t minimum[I915_MAX_PLANES];
uint16_t y_minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
 
skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2925,17 → 2952,20
alloc->end -= cursor_blocks;
 
/* 1. Allocate the mininum required blocks for each active plane */
for_each_plane(dev_priv, pipe, plane) {
const struct intel_plane_wm_parameters *p;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
 
p = &params->plane[plane];
if (!p->enabled)
if (fb == NULL)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
 
minimum[plane] = 8;
alloc_size -= minimum[plane];
y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
alloc_size -= y_minimum[plane];
minimum[id] = 8;
alloc_size -= minimum[id];
y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
alloc_size -= y_minimum[id];
}
 
/*
2944,19 → 2974,22
*
* FIXME: we may not allocate every single block here.
*/
total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
total_data_rate = skl_get_total_relative_data_rate(cstate);
 
start = alloc->start;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane *plane = &intel_plane->base;
struct drm_plane_state *pstate = intel_plane->base.state;
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
 
p = &params->plane[plane];
if (!p->enabled)
if (pstate->fb == NULL)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
 
data_rate = skl_plane_relative_data_rate(p, 0);
data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
 
/*
* allocation for (packed formats) or (uv-plane part of planar format):
2963,12 → 2996,12
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = minimum[plane];
plane_blocks = minimum[id];
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
 
ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks;
ddb->plane[pipe][id].start = start;
ddb->plane[pipe][id].end = start + plane_blocks;
 
start += plane_blocks;
 
2975,14 → 3008,16
/*
* allocation for y_plane part of planar format:
*/
if (p->y_bytes_per_pixel) {
y_data_rate = skl_plane_relative_data_rate(p, 1);
y_plane_blocks = y_minimum[plane];
if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
y_data_rate = skl_plane_relative_data_rate(cstate,
pstate,
1);
y_plane_blocks = y_minimum[id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
total_data_rate);
 
ddb->y_plane[pipe][plane].start = start;
ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
ddb->y_plane[pipe][id].start = start;
ddb->y_plane[pipe][id].end = start + y_plane_blocks;
 
start += y_plane_blocks;
}
3052,104 → 3087,27
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
enum pipe pipe = intel_crtc->pipe;
 
if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
sizeof(new_ddb->plane[pipe])))
/*
* If ddb allocation of pipes changed, it may require recalculation of
* watermarks
*/
if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
return true;
 
if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
return true;
 
return false;
}
 
static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct intel_wm_config *config)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += to_intel_crtc(crtc)->active;
 
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
config->sprites_enabled |= intel_plane->wm.enabled;
config->sprites_scaled |= intel_plane->wm.scaled;
}
}
 
static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *p)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
 
p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
 
fb = crtc->primary->state->fb;
/* For planar: Bpp is for uv plane, y_Bpp is for y plane */
if (fb) {
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 0) : 0;
p->plane[0].tiling = fb->modifier[0];
} else {
p->plane[0].enabled = false;
p->plane[0].bytes_per_pixel = 0;
p->plane[0].y_bytes_per_pixel = 0;
p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
}
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->plane[0].rotation = crtc->primary->state->rotation;
 
fb = crtc->cursor->state->fb;
p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
if (fb) {
p->plane[PLANE_CURSOR].enabled = true;
p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
} else {
p->plane[PLANE_CURSOR].enabled = false;
p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
p->plane[PLANE_CURSOR].horiz_pixels = 64;
p->plane[PLANE_CURSOR].vert_pixels = 64;
}
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (intel_plane->pipe == pipe &&
plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
 
static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
struct intel_crtc_state *cstate,
struct intel_plane *intel_plane,
uint16_t ddb_allocation,
int level,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
3157,31 → 3115,33
uint32_t selected_result;
uint8_t bytes_per_pixel;
 
if (latency == 0 || !p->active || !p_params->enabled)
if (latency == 0 || !cstate->base.active || !fb)
return false;
 
bytes_per_pixel = p_params->y_bytes_per_pixel ?
p_params->y_bytes_per_pixel :
p_params->bytes_per_pixel;
method1 = skl_wm_method1(p->pixel_rate,
bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
bytes_per_pixel,
latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
cstate->pipe_src_w,
bytes_per_pixel,
p_params->tiling,
fb->modifier[0],
latency);
 
plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
if (intel_rotation_90_or_270(p_params->rotation)) {
switch (p_params->bytes_per_pixel) {
if (intel_rotation_90_or_270(plane->state->rotation)) {
int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
 
switch (bpp) {
case 1:
min_scanlines = 16;
break;
3205,8 → 3165,8
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
 
if (level >= 1 && level <= 7) {
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
res_lines += 4;
else
res_blocks++;
3223,84 → 3183,80
 
static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm_parameters *p,
enum pipe pipe,
struct intel_crtc_state *cstate,
int level,
int num_planes,
struct skl_wm_level *result)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_plane *intel_plane;
uint16_t ddb_blocks;
int i;
enum pipe pipe = intel_crtc->pipe;
 
for (i = 0; i < num_planes; i++) {
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
int i = skl_wm_plane_id(intel_plane);
 
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
result->plane_en[i] = skl_compute_plane_wm(dev_priv,
p, &p->plane[i],
cstate,
intel_plane,
ddb_blocks,
level,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
 
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
&p->plane[PLANE_CURSOR],
ddb_blocks, level,
&result->plane_res_b[PLANE_CURSOR],
&result->plane_res_l[PLANE_CURSOR]);
}
 
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
if (!to_intel_crtc(crtc)->active)
if (!cstate->base.active)
return 0;
 
if (WARN_ON(p->pixel_rate == 0))
if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
return 0;
 
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
skl_pipe_pixel_rate(cstate));
}
 
static void skl_compute_transition_wm(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *params,
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
struct skl_wm_level *trans_wm /* out */)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i;
struct intel_plane *intel_plane;
 
if (!params->active)
if (!cstate->base.active)
return;
 
/* Until we know more, just disable transition WMs */
for (i = 0; i < intel_num_planes(intel_crtc); i++)
for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
int i = skl_wm_plane_id(intel_plane);
 
trans_wm->plane_en[i] = false;
trans_wm->plane_en[PLANE_CURSOR] = false;
}
}
 
static void skl_compute_pipe_wm(struct drm_crtc *crtc,
static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm_parameters *params,
struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int level, max_level = ilk_wm_max_level(dev);
 
for (level = 0; level <= max_level; level++) {
skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
level, intel_num_planes(intel_crtc),
&pipe_wm->wm[level]);
skl_compute_wm_level(dev_priv, ddb, cstate,
level, &pipe_wm->wm[level]);
}
pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
 
skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
}
 
static void skl_compute_wm_results(struct drm_device *dev,
struct skl_pipe_wm_parameters *p,
struct skl_pipe_wm *p_wm,
struct skl_wm_values *r,
struct intel_crtc *intel_crtc)
3357,7 → 3313,8
r->wm_linetime[pipe] = p_wm->linetime;
}
 
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const struct skl_ddb_entry *entry)
{
if (entry->end)
3372,7 → 3329,7
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, crtc) {
int i, level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = crtc->pipe;
 
3544,21 → 3501,19
}
 
static bool skl_update_pipe_wm(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *params,
struct intel_wm_config *config,
struct skl_ddb_allocation *ddb, /* out */
struct skl_pipe_wm *pipe_wm /* out */)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
skl_compute_wm_pipe_parameters(crtc, params);
skl_allocate_pipe_ddb(crtc, config, params, ddb);
skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
skl_allocate_pipe_ddb(cstate, ddb);
skl_compute_pipe_wm(cstate, ddb, pipe_wm);
 
if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
return false;
 
intel_crtc->wm.skl_active = *pipe_wm;
intel_crtc->wm.active.skl = *pipe_wm;
 
return true;
}
3565,7 → 3520,6
 
static void skl_update_other_pipe_wm(struct drm_device *dev,
struct drm_crtc *crtc,
struct intel_wm_config *config,
struct skl_wm_values *r)
{
struct intel_crtc *intel_crtc;
3584,9 → 3538,7
* Otherwise, because of this_crtc being freshly enabled/disabled, the
* other active pipes need new DDB allocation and WM values.
*/
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
struct skl_pipe_wm_parameters params = {};
for_each_intel_crtc(dev, intel_crtc) {
struct skl_pipe_wm pipe_wm = {};
bool wm_changed;
 
3597,7 → 3549,6
continue;
 
wm_changed = skl_update_pipe_wm(&intel_crtc->base,
&params, config,
&r->ddb, &pipe_wm);
 
/*
3607,7 → 3558,7
*/
WARN_ON(!wm_changed);
 
skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
r->dirty[intel_crtc->pipe] = true;
}
}
3637,10 → 3588,9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_pipe_wm_parameters params = {};
struct skl_wm_values *results = &dev_priv->wm.skl_results;
struct skl_pipe_wm pipe_wm = {};
struct intel_wm_config config = {};
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
 
 
/* Clear all dirty flags */
3648,16 → 3598,13
 
skl_clear_wm(results, intel_crtc->pipe);
 
skl_compute_wm_global_parameters(dev, &config);
 
if (!skl_update_pipe_wm(crtc, &params, &config,
&results->ddb, &pipe_wm))
if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
return;
 
skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
results->dirty[intel_crtc->pipe] = true;
 
skl_update_other_pipe_wm(dev, crtc, &config, results);
skl_update_other_pipe_wm(dev, crtc, results);
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
 
3665,61 → 3612,33
dev_priv->wm.skl_hw = *results;
}
 
static void
skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enabled, bool scaled)
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane->state->fb;
struct intel_crtc *crtc;
 
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
/* Compute the currently _active_ config */
for_each_intel_crtc(dev, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
 
/* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
intel_plane->wm.bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
intel_plane->wm.y_bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
if (!wm->pipe_enabled)
continue;
 
/*
* Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case.
*/
if (fb)
intel_plane->wm.tiling = fb->modifier[0];
intel_plane->wm.rotation = plane->state->rotation;
 
skl_update_wm(crtc);
config->sprites_enabled |= wm->sprites_enabled;
config->sprites_scaled |= wm->sprites_scaled;
config->num_pipes_active++;
}
}
 
static void ilk_update_wm(struct drm_crtc *crtc)
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
struct intel_pipe_wm pipe_wm = {};
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
 
WARN_ON(cstate->base.active != intel_crtc->active);
 
intel_compute_pipe_wm(cstate, &pipe_wm);
 
if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
return;
 
intel_crtc->wm.active = pipe_wm;
 
ilk_compute_wm_config(dev, &config);
 
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3744,15 → 3663,14
ilk_write_wm_values(dev_priv, &results);
}
 
static void
ilk_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enabled, bool scaled)
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
WARN_ON(cstate->base.active != intel_crtc->active);
 
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
3760,10 → 3678,14
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
intel_wait_for_vblank(dev, intel_plane->pipe);
if (cstate->disable_lp_wm) {
ilk_disable_lp_wm(crtc->dev);
intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
}
 
ilk_update_wm(crtc);
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
 
ilk_program_watermarks(dev_priv);
}
 
static void skl_pipe_wm_active_state(uint32_t val,
3816,7 → 3738,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
enum pipe pipe = intel_crtc->pipe;
int level, i, max_level;
uint32_t temp;
3860,6 → 3783,8
 
temp = hw->plane_trans[pipe][PLANE_CURSOR];
skl_pipe_wm_active_state(temp, active, true, true, i, 0);
 
intel_crtc->wm.active.skl = *active;
}
 
void skl_wm_get_hw_state(struct drm_device *dev)
3879,9 → 3804,10
struct drm_i915_private *dev_priv = dev->dev_private;
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_pipe_wm *active = &intel_crtc->wm.active;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
enum pipe pipe = intel_crtc->pipe;
static const unsigned int wm0_pipe_reg[] = {
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
[PIPE_B] = WM0_PIPEB_ILK,
[PIPE_C] = WM0_PIPEC_IVB,
3920,6 → 3846,8
for (level = 0; level <= max_level; level++)
active->wm[level].enable = true;
}
 
intel_crtc->wm.active.ilk = *active;
}
 
#define _FW_WM(value, plane) \
4145,21 → 4073,6
dev_priv->display.update_wm(crtc);
}
 
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(plane, crtc,
sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
 
/**
* Lock protecting IPS related data structures
*/
4381,12 → 4294,6
break;
}
 
/* When byt can survive without system hang with dynamic
* sw freq adjustments, this restriction can be lifted.
*/
if (IS_VALLEYVIEW(dev_priv))
goto skip_hw_write;
 
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
4405,7 → 4312,6
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
 
skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
4416,9 → 4322,8
{
u32 mask = 0;
 
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
4435,7 → 4340,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4522,7 → 4427,7
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4536,13 → 4441,12
 
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
4590,7 → 4494,7
 
void intel_set_rps(struct drm_device *dev, u8 val)
{
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
4634,7 → 4538,7
 
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
{
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
mode = GEN6_RC_CTL_RC6_ENABLE;
else
4711,7 → 4615,8
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
4723,7 → 4628,7
dev_priv->rps.max_freq);
}
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4760,7 → 4665,7
gen6_init_rps_frequencies(dev);
 
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
4828,8 → 4733,8
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
/* WaRsUseTimeoutMode */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
5072,7 → 4977,7
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5090,7 → 4995,7
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
 
if (IS_SKYLAKE(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
5225,7 → 5130,17
 
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
u32 val;
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
/*
* According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
* for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
* a BYT-M B0 the above register contains 0xbf. Moreover when setting
* a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
* to make sure it matches what Punit accepts.
*/
return max_t(u32, val, 0xc0);
}
 
/* Check that the pctx buffer wasn't move under us. */
6130,7 → 6045,17
 
void intel_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
if (!i915.enable_rc6) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
intel_runtime_pm_get(dev_priv);
}
 
if (IS_CHERRYVIEW(dev))
cherryview_init_gt_powersave(dev);
6140,10 → 6065,15
 
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_CHERRYVIEW(dev))
return;
else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
 
if (!i915.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
 
static void gen6_suspend_rps(struct drm_device *dev)
6218,7 → 6148,7
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rc6(dev);
gen9_enable_rps(dev);
if (IS_SKYLAKE(dev))
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
__gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
6811,20 → 6741,9
 
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
u32 val;
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
val = I915_READ(DSPCLK_GATE_D);
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
 
/*
* Disable trickle feed and enable pnd deadline calculation
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
7091,7 → 7010,6
dev_priv->display.init_clock_gating =
bxt_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
 
7100,7 → 7018,7
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
7365,4 → 7283,6
INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
 
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
atomic_set(&dev_priv->pm.atomic_seq, 0);
}
/drivers/video/drm/i915/intel_psr.c
80,7 → 80,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
 
151,6 → 151,24
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
}
 
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return DP_AUX_CH_CTL(port);
else
return EDP_PSR_AUX_CTL;
}
 
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return DP_AUX_CH_DATA(port, index);
else
return EDP_PSR_AUX_DATA(index);
}
 
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
157,7 → 175,7
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
uint32_t aux_data_reg, aux_ctl_reg;
i915_reg_t aux_ctl_reg;
int precharge = 0x3;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
166,6 → 184,7
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
enum port port = dig_port->port;
int i;
 
BUILD_BUG_ON(sizeof(aux_msg) > 20);
172,9 → 191,6
 
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
 
/* Enable AUX frame sync at sink */
if (dev_priv->psr.aux_frame_sync)
drm_dp_dpcd_writeb(&intel_dp->aux,
181,14 → 197,11
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
 
aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
 
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(aux_data_reg + i,
I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
if (INTEL_INFO(dev)->gen >= 9) {
254,37 → 267,66
struct drm_i915_private *dev_priv = dev->dev_private;
 
uint32_t max_sleep_time = 0x1f;
/* Lately it was identified that depending on panel idle frame count
* calculated at HW can be off by 1. So let's use what came
* from VBT + 1.
* There are also other cases where panel demands at least 4
* but VBT is not being set. To cover these 2 cases lets use
* at least 5 when VBT isn't set to be on the safest side.
/*
* Let's respect VBT in case VBT asks a higher idle_frame value.
* Let's use 6 as the minimum to cover all known cases including
* the off-by-one issue that HW has in some cases. Also there are
* cases where sink should be able to train
* with the 5 or 6 idle patterns.
*/
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
dev_priv->vbt.psr.idle_frames + 1 : 5;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
uint32_t val = EDP_PSR_ENABLE;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
/* It doesn't mean we shouldn't send TPS patters, so let's
send the minimal TP1 possible and skip TP2. */
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 
if (IS_HASWELL(dev))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
val |= EDP_PSR_TP1_TIME_2500us;
else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
val |= EDP_PSR_TP1_TIME_500us;
else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
val |= EDP_PSR_TP1_TIME_100us;
else
val |= EDP_PSR_TP1_TIME_0us;
 
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR_TP2_TP3_TIME_2500us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
val |= EDP_PSR_TP2_TP3_TIME_500us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
/* Sink should be able to train with the 5 or 6 idle patterns */
idle_frames += 4;
}
 
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
else
val |= EDP_PSR_TP1_TP2_SEL;
 
if (dev_priv->psr.psr2_support)
I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
I915_WRITE(EDP_PSR_CTL, val);
 
if (!dev_priv->psr.psr2_support)
return;
 
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
 
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
val |= EDP_PSR2_TP2_TIME_500;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
val |= EDP_PSR2_TP2_TIME_100;
else
val |= EDP_PSR2_TP2_TIME_50;
 
I915_WRITE(EDP_PSR2_CTL, val);
}
 
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
324,8 → 366,8
return false;
}
 
if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
(dig_port->port != PORT_A))) {
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
return false;
}
340,7 → 382,7
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
 
403,9 → 445,14
skl_psr_setup_su_vsc(intel_dp);
}
 
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD);
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
* Also mask LPSP to avoid dependency on other drivers that
* might block runtime_pm besides preventing other hw tracking
* issues now we can rely on frontbuffer tracking.
*/
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
427,6 → 474,19
vlv_psr_enable_source(intel_dp);
}
 
/*
* FIXME: Activation should happen immediately since this function
* is just called after pipe is fully trained and enabled.
* However on every platform we face issues when first activation
* follows a modeset so quickly.
* - On VLV/CHV we get bank screen on first activation
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
if (INTEL_INFO(dev)->gen < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
 
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
466,17 → 526,17
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
I915_WRITE(EDP_PSR_CTL,
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
}
 
498,11 → 558,15
return;
}
 
/* Disable PSR on Source */
if (HAS_DDI(dev))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
 
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
 
523,7 → 587,7
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv->dev)) {
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
566,11 → 630,11
return;
 
if (HAS_DDI(dev)) {
val = I915_READ(EDP_PSR_CTL(dev));
val = I915_READ(EDP_PSR_CTL);
 
WARN_ON(!(val & EDP_PSR_ENABLE));
 
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
} else {
val = I915_READ(VLV_PSRCTL(pipe));
 
620,7 → 684,7
* Single frame update is already supported on BDW+ but it requires
* many W/A and it isn't really needed.
*/
if (!IS_VALLEYVIEW(dev))
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
return;
 
mutex_lock(&dev_priv->psr.lock);
700,7 → 764,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
int delay_ms = HAS_DDI(dev) ? 100 : 500;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
714,29 → 777,14
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
if (HAS_DDI(dev)) {
/*
* By definition every flush should mean invalidate + flush,
* however on core platforms let's minimize the
* disable/re-enable so we can avoid the invalidate when flip
* originated the flush.
*/
if (frontbuffer_bits && origin != ORIGIN_FLIP)
intel_psr_exit(dev);
} else {
/*
* On Valleyview and Cherryview we don't use hardware tracking
* so any plane updates or cursor moves don't result in a PSR
* invalidating. Which means we need to manually fake this in
* software for all flushes.
*/
/* By definition flush = invalidate + flush */
if (frontbuffer_bits)
intel_psr_exit(dev);
}
 
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(delay_ms));
// if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
// if (!work_busy(&dev_priv->psr.work.work))
// schedule_delayed_work(&dev_priv->psr.work,
// msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
 
751,6 → 799,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
}
/drivers/video/drm/i915/intel_ringbuffer.c
27,6 → 27,7
*
*/
 
#include <linux/log2.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#include <drm/i915_drm.h>
33,23 → 34,6
#include "i915_trace.h"
#include "intel_drv.h"
 
bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
if (!dev)
return false;
 
if (i915.enable_execlists) {
struct intel_context *dctx = ring->default_context;
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
 
return ringbuf->obj;
} else
return ring->buffer && ring->buffer->obj;
}
 
int __intel_ring_space(int head, int tail, int size)
{
int space = head - tail;
483,7 → 467,7
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
i915_reg_t mmio;
 
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
526,7 → 510,7
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base);
i915_reg_t reg = RING_INSTPM(ring->mmio_base);
 
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
735,7 → 719,7
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_ring_emit(ring, w->reg[i].addr);
intel_ring_emit_reg(ring, w->reg[i].addr);
intel_ring_emit(ring, w->reg[i].value);
}
intel_ring_emit(ring, MI_NOOP);
768,7 → 752,8
}
 
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
i915_reg_t addr,
const u32 mask, const u32 val)
{
const u32 idx = dev_priv->workarounds.count;
 
926,17 → 911,15
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
INTEL_REVID(dev) == SKL_REVID_B0)) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
}
 
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
946,12 → 929,10
*/
}
 
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
IS_BROXTON(dev)) {
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX);
}
 
/* Wa4x4STCOptimizationDisable:skl,bxt */
/* WaDisablePartialResolveInVc:skl,bxt */
963,24 → 944,22
GEN9_CCS_TLB_PREFETCH_ENABLE);
 
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
 
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
 
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
if (IS_SKYLAKE(dev) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
}
 
/* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
1002,7 → 981,7
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
continue;
 
/*
1040,11 → 1019,7
if (ret)
return ret;
 
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
 
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1053,23 → 1028,24
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
if (INTEL_REVID(dev) <= SKL_REVID_E0)
if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
 
/* WaEnableGapsTsvCreditFix:skl */
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
 
/* WaDisablePowerCompilerClockGating:skl */
if (INTEL_REVID(dev) == SKL_REVID_B0)
if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/* This is tied to WaForceContextSaveRestoreNonCoherent */
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
1078,21 → 1054,23
/* WaForceEnableNonCoherent:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
 
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
}
 
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
INTEL_REVID(dev) == SKL_REVID_D0)
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
 
/* WaDisableSbeCacheDispatchPortSharing:skl */
if (INTEL_REVID(dev) <= SKL_REVID_F0) {
if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
 
return skl_tune_iz_hashing(ring);
}
1109,11 → 1087,11
 
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
if (INTEL_REVID(dev) == BXT_REVID_A0)
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
/* WaSetClckGatingDisableMedia:bxt */
if (INTEL_REVID(dev) == BXT_REVID_A0) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
1123,7 → 1101,7
STALL_DOP_GATING_DISABLE);
 
/* WaDisableSbeCacheDispatchPortSharing:bxt */
if (INTEL_REVID(dev) <= BXT_REVID_B0) {
if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1321,11 → 1299,13
return ret;
 
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
 
if (i915_mmio_reg_valid(mbox_reg)) {
u32 seqno = i915_gem_request_get_seqno(signaller_req);
 
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit_reg(signaller, mbox_reg);
intel_ring_emit(signaller, seqno);
}
}
2027,6 → 2007,8
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret;
 
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2083,10 → 2065,14
int ret;
 
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (ring == NULL)
if (ring == NULL) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
engine->name);
return ERR_PTR(-ENOMEM);
}
 
ring->ring = engine;
list_add(&ring->link, &engine->buffers);
 
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
2102,8 → 2088,9
 
ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret);
list_del(&ring->link);
kfree(ring);
return ERR_PTR(ret);
}
2115,6 → 2102,7
intel_ringbuffer_free(struct intel_ringbuffer *ring)
{
intel_destroy_ringbuffer_obj(ring);
list_del(&ring->link);
kfree(ring);
}
 
2130,6 → 2118,7
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->buffers);
i915_gem_batch_pool_init(dev, &ring->batch_pool);
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
2136,8 → 2125,10
init_waitqueue_head(&ring->irq_queue);
 
ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf))
return PTR_ERR(ringbuf);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error;
}
ring->buffer = ringbuf;
 
if (I915_NEED_GFX_HWS(dev)) {
2166,8 → 2157,7
return 0;
 
error:
intel_ringbuffer_free(ringbuf);
ring->buffer = NULL;
intel_cleanup_ring_buffer(ring);
return ret;
}
 
2180,6 → 2170,7
 
dev_priv = to_i915(ring->dev);
 
if (ring->buffer) {
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
2186,6 → 2177,7
intel_unpin_ringbuffer_obj(ring->buffer);
intel_ringbuffer_free(ring->buffer);
ring->buffer = NULL;
}
 
if (ring->cleanup)
ring->cleanup(ring);
2199,6 → 2191,7
 
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
ring->dev = NULL;
}
 
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
/drivers/video/drm/i915/intel_ringbuffer.h
100,6 → 100,7
void __iomem *virtual_start;
 
struct intel_engine_cs *ring;
struct list_head link;
 
u32 head;
u32 tail;
157,6 → 158,7
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct list_head buffers;
 
/*
* A pool of objects to use as shadow copies of client batch buffers
247,7 → 249,7
/* our mbox written by others */
u32 wait[I915_NUM_RINGS];
/* mboxes this ring signals to */
u32 signal[I915_NUM_RINGS];
i915_reg_t signal[I915_NUM_RINGS];
} mbox;
u64 signal_ggtt[I915_NUM_RINGS];
};
348,7 → 350,11
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
 
bool intel_ring_initialized(struct intel_engine_cs *ring);
static inline bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
return ring->dev != NULL;
}
 
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)
441,6 → 447,11
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
i915_reg_t reg)
{
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
}
static inline void intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
/drivers/video/drm/i915/intel_runtime_pm.c
49,25 → 49,88
* present for a given platform.
*/
 
#define GEN9_ENABLE_DC5(dev) 0
#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
 
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
((power_well) = &(power_domains)->power_wells[i]); \
i++) \
if ((power_well)->domains & (domain_mask))
for_each_if ((power_well)->domains & (domain_mask))
 
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
for (i = (power_domains)->power_well_count - 1; \
i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
i--) \
if ((power_well)->domains & (domain_mask))
for_each_if ((power_well)->domains & (domain_mask))
 
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
 
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
switch (domain) {
case POWER_DOMAIN_PIPE_A:
return "PIPE_A";
case POWER_DOMAIN_PIPE_B:
return "PIPE_B";
case POWER_DOMAIN_PIPE_C:
return "PIPE_C";
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
return "PIPE_A_PANEL_FITTER";
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
return "PIPE_B_PANEL_FITTER";
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
return "PIPE_C_PANEL_FITTER";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
return "TRANSCODER_B";
case POWER_DOMAIN_TRANSCODER_C:
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
case POWER_DOMAIN_PORT_DDI_A_LANES:
return "PORT_DDI_A_LANES";
case POWER_DOMAIN_PORT_DDI_B_LANES:
return "PORT_DDI_B_LANES";
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
return "PORT_CRT";
case POWER_DOMAIN_PORT_OTHER:
return "PORT_OTHER";
case POWER_DOMAIN_VGA:
return "VGA";
case POWER_DOMAIN_AUDIO:
return "AUDIO";
case POWER_DOMAIN_PLLS:
return "PLLS";
case POWER_DOMAIN_AUX_A:
return "AUX_A";
case POWER_DOMAIN_AUX_B:
return "AUX_B";
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
return "INIT";
case POWER_DOMAIN_MODESET:
return "MODESET";
default:
MISSING_CASE(domain);
return "?";
}
}
 
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
244,13 → 307,7
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
 
if (power_well->data == SKL_DISP_PW_1) {
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
}
}
 
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
292,13 → 349,10
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
305,45 → 359,28
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PLLS) | \
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
(POWER_DOMAIN_MASK & ~( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
BIT(POWER_DOMAIN_INIT))
 
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
354,10 → 391,8
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUDIO) | \
369,11 → 404,15
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
417,48 → 456,121
*/
}
 
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
static void gen9_set_dc_state_debugmask_memory_up(
struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_enable_dc9(dev_priv);
/* The below bit doesn't need to be cleared ever afterwards */
val = I915_READ(DC_STATE_DEBUG);
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
I915_WRITE(DC_STATE_DEBUG, val);
POSTING_READ(DC_STATE_DEBUG);
}
}
 
DRM_DEBUG_KMS("Enabling DC9\n");
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
u32 state)
{
int rewrites = 0;
int rereads = 0;
u32 v;
 
val = I915_READ(DC_STATE_EN);
val |= DC_STATE_EN_DC9;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
I915_WRITE(DC_STATE_EN, state);
 
/* It has been observed that disabling the dc6 state sometimes
* doesn't stick and dmc keeps returning old value. Make sure
* the write really sticks enough times and also force rewrite until
* we are confident that state is exactly what we want.
*/
do {
v = I915_READ(DC_STATE_EN);
 
if (v != state) {
I915_WRITE(DC_STATE_EN, state);
rewrites++;
rereads = 0;
} else if (rereads++ > 5) {
break;
}
 
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
} while (rewrites < 100);
 
if (v != state)
DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
state, v);
 
/* Most of the times we need one retry, avoid spam */
if (rewrites > 1)
DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
state, rewrites);
}
 
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
uint32_t mask;
 
assert_can_disable_dc9(dev_priv);
mask = DC_STATE_EN_UPTO_DC5;
if (IS_BROXTON(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
 
DRM_DEBUG_KMS("Disabling DC9\n");
WARN_ON_ONCE(state & ~mask);
 
if (i915.enable_dc == 0)
state = DC_STATE_DISABLE;
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
state = DC_STATE_EN_UPTO_DC5;
 
if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
gen9_set_dc_state_debugmask_memory_up(dev_priv);
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_DC9;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
 
/* Check if DMC is ignoring our DC state requests */
if ((val & mask) != dev_priv->csr.dc_state)
DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
dev_priv->csr.dc_state, val & mask);
 
val &= ~mask;
val |= state;
 
gen9_write_dc_state(dev_priv, val);
 
dev_priv->csr.dc_state = val & mask;
}
 
static void gen9_set_dc_state_debugmask_memory_up(
struct drm_i915_private *dev_priv)
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
uint32_t val;
assert_can_enable_dc9(dev_priv);
 
/* The below bit doesn't need to be cleared ever afterwards */
val = I915_READ(DC_STATE_DEBUG);
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
I915_WRITE(DC_STATE_DEBUG, val);
POSTING_READ(DC_STATE_DEBUG);
DRM_DEBUG_KMS("Enabling DC9\n");
 
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
 
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
 
DRM_DEBUG_KMS("Disabling DC9\n");
 
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
 
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
"CSR program storage start is NULL\n");
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
}
 
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
471,8 → 583,7
 
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
WARN_ONCE(dev_priv->pm.suspended,
"DC5 cannot be enabled, if platform is runtime-suspended.\n");
assert_rpm_wakelock_held(dev_priv);
 
assert_csr_loaded(dev_priv);
}
479,8 → 590,6
 
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
/*
* During initialization, the firmware may not be loaded yet.
* We still want to make sure that the DC enabling flag is cleared.
488,42 → 597,18
if (dev_priv->power_domains.initializing)
return;
 
WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
WARN_ONCE(dev_priv->pm.suspended,
"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
assert_rpm_wakelock_held(dev_priv);
}
 
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_enable_dc5(dev_priv);
 
DRM_DEBUG_KMS("Enabling DC5\n");
 
gen9_set_dc_state_debugmask_memory_up(dev_priv);
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
val |= DC_STATE_EN_UPTO_DC5;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
 
static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_disable_dc5(dev_priv);
 
DRM_DEBUG_KMS("Disabling DC5\n");
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC5;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
547,40 → 632,37
if (dev_priv->power_domains.initializing)
return;
 
assert_csr_loaded(dev_priv);
WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be disabled.\n");
}
 
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
{
uint32_t val;
assert_can_disable_dc5(dev_priv);
 
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
assert_can_disable_dc6(dev_priv);
 
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
 
void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
 
DRM_DEBUG_KMS("Enabling DC6\n");
 
gen9_set_dc_state_debugmask_memory_up(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
val |= DC_STATE_EN_UPTO_DC6;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void skl_disable_dc6(struct drm_i915_private *dev_priv)
void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_disable_dc6(dev_priv);
 
DRM_DEBUG_KMS("Disabling DC6\n");
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC6;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
 
static void skl_set_power_well(struct drm_i915_private *dev_priv,
630,21 → 712,17
!I915_READ(HSW_PWR_WELL_BIOS),
"Invalid for power well status to be enabled, unless done by the BIOS, \
when request is to disable!\n");
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
power_well->data == SKL_DISP_PW_2) {
if (SKL_ENABLE_DC6(dev)) {
skl_disable_dc6(dev_priv);
if (power_well->data == SKL_DISP_PW_2) {
/*
* DDI buffer programming unnecessary during driver-load/resume
* as it's already done during modeset initialization then.
* It's also invalid here as encoder list is still uninitialized.
* DDI buffer programming unnecessary during
* driver-load/resume as it's already done
* during modeset initialization then. It's
* also invalid here as encoder list is still
* uninitialized.
*/
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
} else {
gen9_disable_dc5(dev_priv);
}
}
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
 
658,36 → 736,11
}
} else {
if (enable_requested) {
if (IS_SKYLAKE(dev) &&
(power_well->data == SKL_DISP_PW_1) &&
(intel_csr_load_status_get(dev_priv) == FW_LOADED))
DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
else {
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
 
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
power_well->data == SKL_DISP_PW_2) {
enum csr_state state;
/* TODO: wait for a completion event or
* similar here instead of busy
* waiting using wait_for function.
*/
wait_for((state = intel_csr_load_status_get(dev_priv)) !=
FW_UNINITIALIZED, 1000);
if (state != FW_LOADED)
DRM_DEBUG("CSR firmware not ready (%d)\n",
state);
else
if (SKL_ENABLE_DC6(dev))
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
}
}
}
 
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
760,6 → 813,41
skl_set_power_well(dev_priv, power_well, false);
}
 
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
}
 
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
gen9_disable_dc5_dc6(dev_priv);
}
 
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
}
 
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (power_well->count > 0) {
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
} else {
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
i915.enable_dc != 1)
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
else
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
}
 
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
974,10 → 1062,12
int power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
 
power_well = &power_domains->power_wells[i];
if (power_well->data == power_well_id)
return power_well;
}
1397,6 → 1487,22
chv_set_pipe_power_well(dev_priv, power_well, false);
}
 
static void
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
for_each_power_well(i, power_well, BIT(domain), power_domains) {
if (!power_well->count++)
intel_power_well_enable(dev_priv, power_well);
}
 
power_domains->domain_use_count[domain]++;
}
 
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
1412,24 → 1518,53
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
intel_runtime_pm_get(dev_priv);
 
power_domains = &dev_priv->power_domains;
mutex_lock(&power_domains->lock);
 
__intel_display_power_get_domain(dev_priv, domain);
 
mutex_unlock(&power_domains->lock);
}
 
/**
* intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
* power domain and all its parents are powered up. Therefore users should only
* grab a reference to the innermost power domain they need.
*
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
bool is_enabled;
 
if (!intel_runtime_pm_get_if_in_use(dev_priv))
return false;
 
mutex_lock(&power_domains->lock);
 
for_each_power_well(i, power_well, BIT(domain), power_domains) {
if (!power_well->count++)
intel_power_well_enable(dev_priv, power_well);
if (__intel_display_power_is_enabled(dev_priv, domain)) {
__intel_display_power_get_domain(dev_priv, domain);
is_enabled = true;
} else {
is_enabled = false;
}
 
power_domains->domain_use_count[domain]++;
mutex_unlock(&power_domains->lock);
 
mutex_unlock(&power_domains->lock);
if (!is_enabled)
intel_runtime_pm_put(dev_priv);
 
return is_enabled;
}
 
/**
1452,13 → 1587,17
 
mutex_lock(&power_domains->lock);
 
WARN_ON(!power_domains->domain_use_count[domain]);
WARN(!power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
WARN(!power_well->count,
"Use count on power well %s is already zero",
power_well->name);
 
if (!--power_well->count && i915.disable_power_well)
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
}
 
1470,14 → 1609,10
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_AUX_A) | \
1501,10 → 1636,8
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
 
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
1511,39 → 1644,34
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
 
1591,6 → 1719,13
.is_enabled = skl_power_well_enabled,
};
 
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
.sync_hw = gen9_dc_off_power_well_sync_hw,
.enable = gen9_dc_off_power_well_enable,
.disable = gen9_dc_off_power_well_disable,
.is_enabled = gen9_dc_off_power_well_enabled,
};
 
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
1646,6 → 1781,7
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
.data = PUNIT_POWER_WELL_ALWAYS_ON,
},
{
.name = "display",
1747,20 → 1883,29
.always_on = 1,
.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
.data = SKL_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
},
{
.name = "MISC IO power well",
.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_MISC_IO,
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.data = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
1792,6 → 1937,34
},
};
 
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
 
if (!IS_SKYLAKE(dev_priv))
return;
 
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
 
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_enable(dev_priv, well);
}
 
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
 
if (!IS_SKYLAKE(dev_priv))
return;
 
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
 
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_disable(dev_priv, well);
}
 
static struct i915_power_well bxt_power_wells[] = {
{
.name = "always-on",
1806,11 → 1979,17
.data = SKL_DISP_PW_1,
},
{
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.data = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
}
},
};
 
static int
1820,7 → 1999,7
if (disable_power_well >= 0)
return !!disable_power_well;
 
if (IS_SKYLAKE(dev_priv)) {
if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("Disabling display power well support\n");
return 0;
}
1859,7 → 2038,7
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
} else if (IS_SKYLAKE(dev_priv->dev)) {
} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv->dev)) {
set_power_wells(power_domains, bxt_power_wells);
1874,21 → 2053,6
return 0;
}
 
static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
if (!intel_enable_rc6(dev))
return;
 
/* Make sure we're not suspended first. */
pm_runtime_get_sync(device);
}
 
/**
* intel_power_domains_fini - finalizes the power domain structures
* @dev_priv: i915 device instance
1899,15 → 2063,32
*/
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_disable(dev_priv);
struct device *device = &dev_priv->dev->pdev->dev;
 
/* The i915.ko module is still not prepared to be loaded when
/*
* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
* we're going to unload/reload.
* The following also reacquires the RPM reference the core passed
* to the driver during loading, which is dropped in
* intel_runtime_pm_enable(). We have to hand back the control of the
* device to the core with this reference held.
*/
intel_display_set_init_power(dev_priv, true);
 
/* Remove the refcount we took to keep power well support disabled. */
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
/*
* Remove the refcount we took in intel_runtime_pm_enable() in case
* the platform doesn't support runtime PM.
*/
if (!HAS_RUNTIME_PM(dev_priv))
pm_runtime_put(device);
}
 
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
1922,6 → 2103,47
mutex_unlock(&power_domains->lock);
}
 
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
uint32_t val;
 
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
/* enable PCH reset handshake */
val = I915_READ(HSW_NDE_RSTWRN_OPT);
I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
 
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
skl_pw1_misc_io_init(dev_priv);
mutex_unlock(&power_domains->lock);
 
if (!resume)
return;
 
skl_init_cdclk(dev_priv);
 
if (dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
 
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
skl_uninit_cdclk(dev_priv);
 
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
skl_pw1_misc_io_fini(dev_priv);
mutex_unlock(&power_domains->lock);
}
 
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
2044,7 → 2266,7
* This function initializes the hardware power domain state and enables all
* power domains using intel_display_set_init_power().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
2051,7 → 2273,9
 
power_domains->initializing = true;
 
if (IS_CHERRYVIEW(dev)) {
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
2063,11 → 2287,34
 
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
/* Disable power support if the user asked so. */
if (!i915.disable_power_well)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(dev_priv);
power_domains->initializing = false;
}
 
/**
* intel_power_domains_suspend - suspend power domain state
* @dev_priv: i915 device instance
*
* This function prepares the hardware power domain state before entering
* system suspend. It must be paired with intel_power_domains_init_hw().
*/
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
{
/*
* Even if power well support was disabled we still want to disable
* power wells while we are system suspended.
*/
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
}
 
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
*
2082,14 → 2329,48
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
pm_runtime_get_sync(device);
 
pm_runtime_get_sync(device);
WARN(dev_priv->pm.suspended, "Device still suspended.\n");
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
}
 
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
* @dev_priv: i915 device instance
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (IS_ENABLED(CONFIG_PM)) {
int ret = pm_runtime_get_if_in_use(device);
 
/*
* In cases runtime PM is disabled by the RPM core and we get
* an -EINVAL return value we are not supposed to call this
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
WARN_ON_ONCE(ret < 0);
if (ret <= 0)
return false;
}
 
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
 
return true;
}
 
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
*
2111,11 → 2392,10
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
assert_rpm_wakelock_held(dev_priv);
pm_runtime_get_noresume(device);
 
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
pm_runtime_get_noresume(device);
atomic_inc(&dev_priv->pm.wakeref_count);
}
 
/**
2131,8 → 2411,9
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
assert_rpm_wakelock_held(dev_priv);
if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
atomic_inc(&dev_priv->pm.atomic_seq);
 
pm_runtime_mark_last_busy(device);
pm_runtime_put_autosuspend(device);
2153,22 → 2434,27
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
pm_runtime_mark_last_busy(device);
 
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
* Take a permanent reference to disable the RPM functionality and drop
* it only when unloading the driver. Use the low level get/put helpers,
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
if (!intel_enable_rc6(dev)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
return;
if (!HAS_RUNTIME_PM(dev)) {
pm_runtime_dont_use_autosuspend(device);
pm_runtime_get_sync(device);
} else {
pm_runtime_use_autosuspend(device);
}
 
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
pm_runtime_mark_last_busy(device);
pm_runtime_use_autosuspend(device);
 
/*
* The core calls the driver load handler with an RPM reference held.
* We drop that here and will reacquire it during unloading in
* intel_power_domains_fini().
*/
pm_runtime_put_autosuspend(device);
}
 
/drivers/video/drm/i915/intel_sdvo.c
74,7 → 74,7
struct i2c_adapter ddc;
 
/* Register for the SDVO device: SDVOB or SDVOC */
uint32_t sdvo_reg;
i915_reg_t sdvo_reg;
 
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
120,8 → 120,7
*/
bool is_tv;
 
/* On different gens SDVOB is at different places. */
bool is_sdvob;
enum port port;
 
/* This is for current tv format name */
int tv_format_index;
245,7 → 244,7
u32 bval = val, cval = val;
int i;
 
if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
if (HAS_PCH_SPLIT(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
/*
259,7 → 258,7
return;
}
 
if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
if (intel_sdvo->port == PORT_B)
cval = I915_READ(GEN3_SDVOC);
else
bval = I915_READ(GEN3_SDVOB);
422,7 → 421,7
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
 
#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
 
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
1282,14 → 1281,10
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
case GEN3_SDVOB:
if (intel_sdvo->port == PORT_B)
sdvox &= SDVOB_PRESERVE_MASK;
break;
case GEN3_SDVOC:
else
sdvox &= SDVOC_PRESERVE_MASK;
break;
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
 
1464,6 → 1459,13
* matching DP port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
1470,6 → 1472,10
 
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
 
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
}
 
2251,7 → 2257,7
{
struct sdvo_device_mapping *mapping;
 
if (sdvo->is_sdvob)
if (sdvo->port == PORT_B)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
2269,7 → 2275,7
struct sdvo_device_mapping *mapping;
u8 pin;
 
if (sdvo->is_sdvob)
if (sdvo->port == PORT_B)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
2307,7 → 2313,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
 
if (sdvo->is_sdvob) {
if (sdvo->port == PORT_B) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
2332,7 → 2338,7
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (sdvo->is_sdvob)
if (sdvo->port == PORT_B)
return 0x70;
else
return 0x72;
2939,18 → 2945,31
return i2c_add_adapter(&sdvo->ddc) == 0;
}
 
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
enum port port)
{
if (HAS_PCH_SPLIT(dev_priv))
WARN_ON(port != PORT_B);
else
WARN_ON(port != PORT_B && port != PORT_C);
}
 
bool intel_sdvo_init(struct drm_device *dev,
i915_reg_t sdvo_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
 
assert_sdvo_port_valid(dev_priv, port);
 
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
 
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->port = port;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
2959,7 → 2978,8
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
NULL);
 
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
3000,8 → 3020,10
* hotplug lines.
*/
if (intel_sdvo->hotplug_active) {
intel_encoder->hpd_pin =
intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
if (intel_sdvo->port == PORT_B)
intel_encoder->hpd_pin = HPD_SDVO_B;
else
intel_encoder->hpd_pin = HPD_SDVO_C;
}
 
/*
/drivers/video/drm/i915/intel_sprite.c
197,7 → 197,6
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(drm_plane->state)->ckey;
u32 surf_addr;
217,10 → 216,6
rotation = drm_plane->state->rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
 
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
 
302,8 → 297,6
 
I915_WRITE(PLANE_SURF(pipe, plane), 0);
POSTING_READ(PLANE_SURF(pipe, plane));
 
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
 
static void
546,10 → 539,6
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
683,10 → 672,6
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
 
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
837,8 → 822,8
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
drm_rect_debug_print(src, true);
drm_rect_debug_print(dst, false);
drm_rect_debug_print("src: ", src, true);
drm_rect_debug_print("dst: ", dst, false);
 
return hscale;
}
846,8 → 831,8
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
drm_rect_debug_print(src, true);
drm_rect_debug_print(dst, false);
drm_rect_debug_print("src: ", src, true);
drm_rect_debug_print("dst: ", dst, false);
 
return vscale;
}
943,9 → 928,6
 
crtc = crtc ? crtc : plane->crtc;
 
if (!crtc->state->active)
return;
 
if (state->visible) {
intel_plane->update_plane(plane, crtc, fb,
state->dst.x1, state->dst.y1,
974,7 → 956,7
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
 
if (IS_VALLEYVIEW(dev) &&
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
 
1109,7 → 1091,7
intel_plane->max_downscale = 1;
}
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
 
1146,7 → 1128,7
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY);
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
kfree(intel_plane);
goto out;
/drivers/video/drm/i915/intel_uncore.c
29,20 → 29,8
 
#define FORCEWAKE_ACK_TIMEOUT_MS 50
 
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
 
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
 
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
 
static const char * const forcewake_domain_names[] = {
"render",
"blitter",
62,17 → 50,10
return "unknown";
}
 
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
"Device suspended\n");
}
 
static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
WARN_ON(d->reg_set == 0);
WARN_ON(!i915_mmio_reg_valid(d->reg_set));
__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
}
 
120,7 → 101,7
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
{
/* something from same cacheline, but not from the set register */
if (d->reg_post)
if (i915_mmio_reg_valid(d->reg_post))
__raw_posting_read(d->i915, d->reg_post);
}
 
250,7 → 231,7
struct intel_uncore_forcewake_domain *domain = (void *)arg;
unsigned long irqflags;
 
assert_device_not_suspended(domain->i915);
assert_rpm_device_not_suspended(domain->i915);
 
spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
425,7 → 406,7
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
WARN_ON(dev_priv->pm.suspended);
assert_rpm_wakelock_held(dev_priv);
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
__intel_uncore_forcewake_get(dev_priv, fw_domains);
527,8 → 508,7
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 
607,8 → 587,8
}
 
static void
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
bool before)
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool read, bool before)
{
const char *op = read ? "reading" : "writing to";
const char *when = before ? "before" : "after";
618,7 → 598,7
 
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
when, op, i915_mmio_reg_offset(reg));
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
i915.mmio_debug--; /* Only report the first N failures */
}
643,7 → 623,7
 
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_device_not_suspended(dev_priv);
assert_rpm_wakelock_held(dev_priv);
 
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
651,7 → 631,7
 
#define __gen2_read(x) \
static u##x \
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN2_READ_FOOTER; \
659,7 → 639,7
 
#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
ilk_dummy_write(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
682,9 → 662,10
#undef GEN2_READ_HEADER
 
#define GEN6_READ_HEADER(x) \
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
u##x val = 0; \
assert_device_not_suspended(dev_priv); \
assert_rpm_wakelock_held(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define GEN6_READ_FOOTER \
716,20 → 697,12
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
 
#define __vgpu_read(x) \
static u##x \
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
 
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (NEEDS_FORCE_WAKE(reg)) \
if (NEEDS_FORCE_WAKE(offset)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
738,12 → 711,17
 
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine = 0; \
GEN6_READ_HEADER(x); \
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
if (!NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
750,15 → 728,19
 
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine = 0; \
GEN6_READ_HEADER(x); \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, \
FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
if (!NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
768,17 → 750,17
 
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (!SKL_NEEDS_FORCE_WAKE(reg)) \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
fw_engine = FORCEWAKE_BLITTER; \
789,10 → 771,6
GEN6_READ_FOOTER; \
}
 
__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
814,19 → 792,46
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __vgpu_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
 
#define VGPU_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
assert_rpm_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define VGPU_READ_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
 
#define __vgpu_read(x) \
static u##x \
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
VGPU_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
VGPU_READ_FOOTER; \
}
 
__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)
 
#undef __vgpu_read
#undef VGPU_READ_FOOTER
#undef VGPU_READ_HEADER
 
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
assert_rpm_wakelock_held(dev_priv); \
 
#define GEN2_WRITE_FOOTER
 
#define __gen2_write(x) \
static void \
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
GEN2_WRITE_FOOTER; \
834,7 → 839,7
 
#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
857,9 → 862,10
#undef GEN2_WRITE_HEADER
 
#define GEN6_WRITE_HEADER \
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
assert_rpm_wakelock_held(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define GEN6_WRITE_FOOTER \
867,10 → 873,10
 
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(reg)) { \
if (NEEDS_FORCE_WAKE(offset)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
__raw_i915_write##x(dev_priv, reg, val); \
882,10 → 888,10
 
#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(reg)) { \
if (NEEDS_FORCE_WAKE(offset)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
898,15 → 904,7
GEN6_WRITE_FOOTER; \
}
 
#define __vgpu_write(x) \
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
off_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
 
static const u32 gen8_shadowed_regs[] = {
static const i915_reg_t gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
917,11 → 915,12
/* TODO: Other registers are not yet used */
};
 
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
if (reg == gen8_shadowed_regs[i])
if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
return true;
 
return false;
929,10 → 928,10
 
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
__raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
942,22 → 941,25
 
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine = 0; \
GEN6_WRITE_HEADER; \
if (!shadowed) { \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
} \
if (!NEEDS_FORCE_WAKE(offset) || \
is_gen8_shadowed(dev_priv, reg)) \
fw_engine = 0; \
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
 
static const u32 gen9_shadowed_regs[] = {
static const i915_reg_t gen9_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
970,11 → 972,12
/* TODO: Other registers are not yet used */
};
 
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
if (reg == gen9_shadowed_regs[i])
if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
return true;
 
return false;
982,19 → 985,19
 
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (!SKL_NEEDS_FORCE_WAKE(reg) || \
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
fw_engine = FORCEWAKE_BLITTER; \
1026,10 → 1029,6
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
__vgpu_write(64)
 
#undef __gen9_write
#undef __chv_write
1036,10 → 1035,35
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
#undef __vgpu_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
 
#define VGPU_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define VGPU_WRITE_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 
#define __vgpu_write(x) \
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg, u##x val, bool trace) { \
VGPU_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
VGPU_WRITE_FOOTER; \
}
 
__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
__vgpu_write(64)
 
#undef __vgpu_write
#undef VGPU_WRITE_FOOTER
#undef VGPU_WRITE_HEADER
 
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1059,7 → 1083,8
 
static void fw_domain_init(struct drm_i915_private *dev_priv,
enum forcewake_domain_id domain_id,
u32 reg_set, u32 reg_ack)
i915_reg_t reg_set,
i915_reg_t reg_ack)
{
struct intel_uncore_forcewake_domain *d;
 
1085,12 → 1110,10
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
 
if (IS_VALLEYVIEW(dev_priv))
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
d->reg_post = FORCEWAKE_ACK_VLV;
else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
d->reg_post = ECOBUS;
else
d->reg_post = 0;
 
d->i915 = dev_priv;
d->id = domain_id;
1120,7 → 1143,7
FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
if (!IS_CHERRYVIEW(dev))
dev_priv->uncore.funcs.force_wake_put =
1268,12 → 1291,14
#define GEN_RANGE(l, h) GENMASK(h, l)
 
static const struct register_whitelist {
uint64_t offset;
i915_reg_t offset_ldw, offset_udw;
uint32_t size;
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
uint32_t gen_bitmask;
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
.size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
};
 
int i915_reg_read_ioctl(struct drm_device *dev,
1283,11 → 1308,11
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
unsigned size;
u64 offset;
i915_reg_t offset_ldw, offset_udw;
int i, ret = 0;
 
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == (reg->offset & -entry->size) &&
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
1299,27 → 1324,28
* be naturally aligned (and those that are not so aligned merely
* limit the available flags for that register).
*/
offset = entry->offset;
offset_ldw = entry->offset_ldw;
offset_udw = entry->offset_udw;
size = entry->size;
size |= reg->offset ^ offset;
size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
 
intel_runtime_pm_get(dev_priv);
 
switch (size) {
case 8 | 1:
reg->val = I915_READ64_2x32(offset, offset+4);
reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
break;
case 8:
reg->val = I915_READ64(offset);
reg->val = I915_READ64(offset_ldw);
break;
case 4:
reg->val = I915_READ(offset);
reg->val = I915_READ(offset_ldw);
break;
case 2:
reg->val = I915_READ16(offset);
reg->val = I915_READ16(offset_ldw);
break;
case 1:
reg->val = I915_READ8(offset);
reg->val = I915_READ8(offset_ldw);
break;
default:
ret = -EINVAL;
1343,6 → 1369,9
if (args->flags || args->pad)
return -EINVAL;
 
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
 
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
1354,7 → 1383,10
}
hs = &ctx->hang_stats;
 
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
 
args->batch_active = hs->batch_active;
args->batch_pending = hs->batch_pending;
1470,7 → 1502,7
}
 
static int wait_for_register(struct drm_i915_private *dev_priv,
const u32 reg,
i915_reg_t reg,
const u32 mask,
const u32 value,
const unsigned long timeout_ms)
/drivers/video/drm/i915/kms_display.c
1,18 → 1,14
 
#define iowrite32(v, addr) writel((v), (addr))
 
#include <drm/drmP.h>
#include <uapi/drm/drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/drm_plane_helper.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include "i915_drv.h"
 
#include <syscall.h>
#include <display.h>
 
int printf ( const char * format, ... );
 
void init_system_cursors(struct drm_device *dev);
 
display_t *os_display;
116,7 → 112,7
}
 
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
/drivers/video/drm/i915/main.c
16,12 → 16,14
#include "bitmap.h"
#include "i915_kos32.h"
 
#define DRV_NAME "i915 v4.4.78"
#define DRV_NAME "i915 v4.5.7"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
#define I915_DEV_READY 2
 
int printf ( const char * format, ... );
 
static int my_atoi(char **cmd);
static char* parse_mode(char *p, videomode_t *mode);
void cpu_detect1();
42,7 → 44,7
int x86_clflush_size;
unsigned int tsc_khz;
struct workqueue_struct *system_wq;
int driver_wq_state;
volatile int driver_wq_state;
struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
videomode_t usermode;
58,8 → 60,6
unsigned long irqflags;
int tmp;
 
printf("%s\n",__FUNCTION__);
 
while(driver_wq_state == I915_DEV_INIT)
{
jiffies = GetClockNs() / 10000000;
67,9 → 67,7
};
 
if( driver_wq_state == I915_DEV_CLOSE)
{
asm volatile ("int $0x40"::"a"(-1));
};
 
dev_priv = main_device->dev_private;