Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7143 → Rev 7144

/drivers/video/drm/drm_atomic.c
28,6 → 28,7
 
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
 
/**
353,6 → 354,8
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
 
memset(&state->mode, 0, sizeof(state->mode));
 
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(&state->mode,
365,7 → 368,6
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
376,6 → 378,58
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
 
/**
* drm_atomic_replace_property_blob - replace a blob property
* @blob: a pointer to the member blob to be replaced
* @new_blob: the new blob to replace with
* @replaced: whether the blob has been replaced
*
* RETURNS:
* Zero on success, error code on failure
*/
static void
drm_atomic_replace_property_blob(struct drm_property_blob **blob,
struct drm_property_blob *new_blob,
bool *replaced)
{
struct drm_property_blob *old_blob = *blob;
 
if (old_blob == new_blob)
return;
 
if (old_blob)
drm_property_unreference_blob(old_blob);
if (new_blob)
drm_property_reference_blob(new_blob);
*blob = new_blob;
*replaced = true;
 
return;
}
 
static int
drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
struct drm_property_blob **blob,
uint64_t blob_id,
ssize_t expected_size,
bool *replaced)
{
struct drm_device *dev = crtc->dev;
struct drm_property_blob *new_blob = NULL;
 
if (blob_id != 0) {
new_blob = drm_property_lookup_blob(dev, blob_id);
if (new_blob == NULL)
return -EINVAL;
if (expected_size > 0 && expected_size != new_blob->length)
return -EINVAL;
}
 
drm_atomic_replace_property_blob(blob, new_blob, replaced);
 
return 0;
}
 
/**
* drm_atomic_crtc_set_property - set property on CRTC
* @crtc: the drm CRTC to set a property on
* @state: the state object to update with the new property value
397,6 → 451,7
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
bool replaced = false;
int ret;
 
if (property == config->prop_active)
407,8 → 462,31
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_unreference_blob(mode);
return ret;
}
else if (crtc->funcs->atomic_set_property)
} else if (property == config->degamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
&state->degamma_lut,
val,
-1,
&replaced);
state->color_mgmt_changed = replaced;
return ret;
} else if (property == config->ctm_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
&state->ctm,
val,
sizeof(struct drm_color_ctm),
&replaced);
state->color_mgmt_changed = replaced;
return ret;
} else if (property == config->gamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
&state->gamma_lut,
val,
-1,
&replaced);
state->color_mgmt_changed = replaced;
return ret;
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
return -EINVAL;
444,6 → 522,12
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
else if (property == config->degamma_lut_property)
*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
else if (property == config->ctm_property)
*val = (state->ctm) ? state->ctm->base.id : 0;
else if (property == config->gamma_lut_property)
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
1204,14 → 1288,39
*/
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
unsigned crtc_mask = 0;
struct drm_crtc *crtc;
int ret;
bool global = false;
 
drm_for_each_crtc(crtc, dev) {
if (crtc->acquire_ctx != state->acquire_ctx)
continue;
 
crtc_mask |= drm_crtc_mask(crtc);
crtc->acquire_ctx = NULL;
}
 
if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
global = true;
 
dev->mode_config.acquire_ctx = NULL;
}
 
retry:
drm_modeset_backoff(state->acquire_ctx);
 
ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
if (ret)
goto retry;
 
drm_for_each_crtc(crtc, dev)
if (drm_crtc_mask(crtc) & crtc_mask)
crtc->acquire_ctx = state->acquire_ctx;
 
if (global)
dev->mode_config.acquire_ctx = state->acquire_ctx;
}
EXPORT_SYMBOL(drm_atomic_legacy_backoff);
 
1343,44 → 1452,23
struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int ret;
 
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
file_priv->event_space -= sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
e = kzalloc(sizeof *e, GFP_KERNEL);
if (e == NULL) {
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
if (!e)
return NULL;
 
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof e->event;
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
out:
return e;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
return NULL;
}
 
static void destroy_vblank_event(struct drm_device *dev,
struct drm_file *file_priv, struct drm_pending_vblank_event *e)
{
unsigned long flags;
 
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
return e;
}
 
static int atomic_set_prop(struct drm_atomic_state *state,
/drivers/video/drm/drm_atomic_helper.c
67,7 → 67,8
struct drm_crtc_state *crtc_state;
 
if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(state,
plane->state->crtc);
 
if (WARN_ON(!crtc_state))
return;
76,8 → 77,8
}
 
if (plane_state->crtc) {
crtc_state =
state->crtc_states[drm_crtc_index(plane_state->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(state,
plane_state->crtc);
 
if (WARN_ON(!crtc_state))
return;
86,111 → 87,186
}
}
 
static bool
check_pending_encoder_assignment(struct drm_atomic_state *state,
struct drm_encoder *new_encoder)
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
struct drm_encoder *encoder;
unsigned encoder_mask = 0;
int i, ret;
 
/*
* First loop, find all newly assigned encoders from the connectors
* part of the state. If the same encoder is assigned to multiple
* connectors bail out.
*/
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->best_encoder != new_encoder)
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_encoder *new_encoder;
 
if (!conn_state->crtc)
continue;
 
/* encoder already assigned and we're trying to re-steal it! */
if (connector->state->best_encoder != conn_state->best_encoder)
return false;
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector, conn_state);
else
new_encoder = funcs->best_encoder(connector);
 
if (new_encoder) {
if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
new_encoder->base.id, new_encoder->name,
connector->base.id, connector->name);
 
return -EINVAL;
}
 
return true;
encoder_mask |= 1 << drm_encoder_index(new_encoder);
}
}
 
static struct drm_crtc *
get_current_crtc_for_encoder(struct drm_device *dev,
if (!encoder_mask)
return 0;
 
/*
* Second loop, iterate over all connectors not part of the state.
*
* If a conflicting encoder is found and disable_conflicting_encoders
* is not set, an error is returned. Userspace can provide a solution
* through the atomic ioctl.
*
* If the flag is set conflicting connectors are removed from the crtc
* and the crtc is disabled if no encoder is left. This preserves
* compatibility with the legacy set_config behavior.
*/
drm_for_each_connector(connector, state->dev) {
struct drm_crtc_state *crtc_state;
 
if (drm_atomic_get_existing_connector_state(state, connector))
continue;
 
encoder = connector->state->best_encoder;
if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
continue;
 
if (!disable_conflicting_encoders) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
encoder->base.id, encoder->name,
connector->state->crtc->base.id,
connector->state->crtc->name,
connector->base.id, connector->name);
return -EINVAL;
}
 
conn_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(conn_state))
return PTR_ERR(conn_state);
 
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
encoder->base.id, encoder->name,
conn_state->crtc->base.id, conn_state->crtc->name,
connector->base.id, connector->name);
 
crtc_state = drm_atomic_get_existing_crtc_state(state, conn_state->crtc);
 
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret)
return ret;
 
if (!crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL);
if (ret < 0)
return ret;
 
crtc_state->active = false;
}
}
 
return 0;
}
 
static void
set_best_encoder(struct drm_atomic_state *state,
struct drm_connector_state *conn_state,
struct drm_encoder *encoder)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
if (conn_state->best_encoder) {
/* Unset the encoder_mask in the old crtc state. */
crtc = conn_state->connector->state->crtc;
 
drm_for_each_connector(connector, dev) {
if (connector->state->best_encoder != encoder)
continue;
/* A NULL crtc is an error here because we should have
* duplicated a NULL best_encoder when crtc was NULL.
* As an exception restoring duplicated atomic state
* during resume is allowed, so don't warn when
* best_encoder is equal to encoder we intend to set.
*/
WARN_ON(!crtc && encoder != conn_state->best_encoder);
if (crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
 
return connector->state->crtc;
crtc_state->encoder_mask &=
~(1 << drm_encoder_index(conn_state->best_encoder));
}
}
 
return NULL;
if (encoder) {
crtc = conn_state->crtc;
WARN_ON(!crtc);
if (crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
 
crtc_state->encoder_mask |=
1 << drm_encoder_index(encoder);
}
}
 
static int
conn_state->best_encoder = encoder;
}
 
static void
steal_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder,
struct drm_crtc *encoder_crtc)
struct drm_encoder *encoder)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int ret;
int i;
 
/*
* We can only steal an encoder coming from a connector, which means we
* must already hold the connection_mutex.
*/
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
for_each_connector_in_state(state, connector, connector_state, i) {
struct drm_crtc *encoder_crtc;
 
if (connector_state->best_encoder != encoder)
continue;
 
encoder_crtc = connector->state->crtc;
 
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
encoder->base.id, encoder->name,
encoder_crtc->base.id, encoder_crtc->name);
 
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
set_best_encoder(state, connector_state, NULL);
 
crtc_state = drm_atomic_get_existing_crtc_state(state, encoder_crtc);
crtc_state->connectors_changed = true;
 
list_for_each_entry(connector, &config->connector_list, head) {
if (connector->state->best_encoder != encoder)
continue;
 
DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
 
connector_state = drm_atomic_get_connector_state(state,
connector);
if (IS_ERR(connector_state))
return PTR_ERR(connector_state);
 
ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
if (ret)
return ret;
connector_state->best_encoder = NULL;
return;
}
 
return 0;
}
 
static int
update_connector_routing(struct drm_atomic_state *state, int conn_idx)
update_connector_routing(struct drm_atomic_state *state,
struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
struct drm_crtc *encoder_crtc;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct drm_crtc_state *crtc_state;
int idx, ret;
 
connector = state->connectors[conn_idx];
connector_state = state->connector_states[conn_idx];
 
if (!connector)
return 0;
 
DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
197,16 → 273,12
 
if (connector->state->crtc != connector_state->crtc) {
if (connector->state->crtc) {
idx = drm_crtc_index(connector->state->crtc);
 
crtc_state = state->crtc_states[idx];
crtc_state = drm_atomic_get_existing_crtc_state(state, connector->state->crtc);
crtc_state->connectors_changed = true;
}
 
if (connector_state->crtc) {
idx = drm_crtc_index(connector_state->crtc);
 
crtc_state = state->crtc_states[idx];
crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
crtc_state->connectors_changed = true;
}
}
216,7 → 288,7
connector->base.id,
connector->name);
 
connector_state->best_encoder = NULL;
set_best_encoder(state, connector_state, NULL);
 
return 0;
}
245,6 → 317,8
}
 
if (new_encoder == connector_state->best_encoder) {
set_best_encoder(state, connector_state, new_encoder);
 
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
256,33 → 330,11
return 0;
}
 
if (!check_pending_encoder_assignment(state, new_encoder)) {
DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
connector->base.id,
connector->name);
return -EINVAL;
}
steal_encoder(state, new_encoder);
 
encoder_crtc = get_current_crtc_for_encoder(state->dev,
new_encoder);
set_best_encoder(state, connector_state, new_encoder);
 
if (encoder_crtc) {
ret = steal_encoder(state, new_encoder, encoder_crtc);
if (ret) {
DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
return ret;
}
}
 
if (WARN_ON(!connector_state->crtc))
return -EINVAL;
 
connector_state->best_encoder = new_encoder;
idx = drm_crtc_index(connector_state->crtc);
 
crtc_state = state->crtc_states[idx];
crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
crtc_state->connectors_changed = true;
 
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
323,8 → 375,8
if (!conn_state->crtc || !conn_state->best_encoder)
continue;
 
crtc_state =
state->crtc_states[drm_crtc_index(conn_state->crtc)];
crtc_state = drm_atomic_get_existing_crtc_state(state,
conn_state->crtc);
 
/*
* Each encoder has at most one connector (since we always steal
445,6 → 497,10
}
}
 
ret = handle_conflicting_encoders(state, state->legacy_set_config);
if (ret)
return ret;
 
for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
451,7 → 507,8
* drivers must set crtc->mode_changed themselves when connector
* properties need to be updated.
*/
ret = update_connector_routing(state, i);
ret = update_connector_routing(state, connector,
connector_state);
if (ret)
return ret;
}
617,7 → 674,6
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_crtc_state *old_crtc_state;
 
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
624,7 → 680,8
if (!old_conn_state->crtc)
continue;
 
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
old_conn_state->crtc);
 
if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1719,7 → 1776,7
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int ret, i, j;
int ret, i;
 
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
state->acquire_ctx);
1726,21 → 1783,11
if (ret)
return ret;
 
/* First grab all affected connector/crtc states. */
for (i = 0; i < set->num_connectors; i++) {
conn_state = drm_atomic_get_connector_state(state,
set->connectors[i]);
if (IS_ERR(conn_state))
return PTR_ERR(conn_state);
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_add_affected_connectors(state, crtc);
/* First disable all connectors on the target crtc. */
ret = drm_atomic_add_affected_connectors(state, set->crtc);
if (ret)
return ret;
}
 
/* Then recompute connector->crtc links and crtc enabling state. */
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
1748,17 → 1795,20
if (ret)
return ret;
}
}
 
for (j = 0; j < set->num_connectors; j++) {
if (set->connectors[j] == connector) {
/* Then set all connectors from set->connectors on the target crtc */
for (i = 0; i < set->num_connectors; i++) {
conn_state = drm_atomic_get_connector_state(state,
set->connectors[i]);
if (IS_ERR(conn_state))
return PTR_ERR(conn_state);
 
ret = drm_atomic_set_crtc_for_connector(conn_state,
set->crtc);
if (ret)
return ret;
break;
}
}
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/* Don't update ->enable for the CRTC in the set_config request,
1800,6 → 1850,7
if (!state)
return -ENOMEM;
 
state->legacy_set_config = true;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
ret = __drm_atomic_helper_set_config(set, state);
2446,8 → 2497,12
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state)
if (crtc->state) {
drm_property_unreference_blob(crtc->state->mode_blob);
drm_property_unreference_blob(crtc->state->degamma_lut);
drm_property_unreference_blob(crtc->state->ctm);
drm_property_unreference_blob(crtc->state->gamma_lut);
}
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
 
2471,10 → 2526,17
 
if (state->mode_blob)
drm_property_reference_blob(state->mode_blob);
if (state->degamma_lut)
drm_property_reference_blob(state->degamma_lut);
if (state->ctm)
drm_property_reference_blob(state->ctm);
if (state->gamma_lut)
drm_property_reference_blob(state->gamma_lut);
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
state->connectors_changed = false;
state->color_mgmt_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
2515,6 → 2577,9
struct drm_crtc_state *state)
{
drm_property_unreference_blob(state->mode_blob);
drm_property_unreference_blob(state->degamma_lut);
drm_property_unreference_blob(state->ctm);
drm_property_unreference_blob(state->gamma_lut);
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
 
2549,9 → 2614,11
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
 
if (plane->state)
if (plane->state) {
plane->state->plane = plane;
plane->state->rotation = BIT(DRM_ROTATE_0);
}
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
/**
2826,3 → 2893,98
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
 
/**
* drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
* @crtc: CRTC object
* @red: red correction table
* @green: green correction table
* @blue: green correction table
* @start:
* @size: size of the tables
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
* properties.
*/
void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t start, uint32_t size)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_property_blob *blob = NULL;
struct drm_color_lut *blob_data;
int i, ret = 0;
 
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return;
 
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
NULL);
if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
blob = NULL;
goto fail;
}
 
/* Prepare GAMMA_LUT with the legacy values. */
blob_data = (struct drm_color_lut *) blob->data;
for (i = 0; i < size; i++) {
blob_data[i].red = red[i];
blob_data[i].green = green[i];
blob_data[i].blue = blue[i];
}
 
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
 
/* Reset DEGAMMA_LUT and CTM properties. */
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
config->degamma_lut_property, 0);
if (ret)
goto fail;
 
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
config->ctm_property, 0);
if (ret)
goto fail;
 
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
config->gamma_lut_property, blob->base.id);
if (ret)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
 
drm_property_unreference_blob(blob);
 
return;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
drm_property_unreference_blob(blob);
 
return;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
/drivers/video/drm/drm_bridge.c
186,6 → 186,7
 
drm_bridge_disable(bridge->next);
 
if (bridge->funcs->disable)
bridge->funcs->disable(bridge);
}
EXPORT_SYMBOL(drm_bridge_disable);
206,6 → 207,7
if (!bridge)
return;
 
if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
 
drm_bridge_post_disable(bridge->next);
256,6 → 258,7
 
drm_bridge_pre_enable(bridge->next);
 
if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
}
EXPORT_SYMBOL(drm_bridge_pre_enable);
276,6 → 279,7
if (!bridge)
return;
 
if (bridge->funcs->enable)
bridge->funcs->enable(bridge);
 
drm_bridge_enable(bridge->next);
/drivers/video/drm/drm_crtc.c
430,9 → 430,7
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
mutex_unlock(&dev->mode_config.idr_mutex);
drm_mode_object_put(dev, &fb->base);
 
fb->base.id = 0;
}
1150,6 → 1148,29
EXPORT_SYMBOL(drm_encoder_init);
 
/**
* drm_encoder_index - find the index of a registered encoder
* @encoder: encoder to find index for
*
* Given a registered encoder, return the index of that encoder within a DRM
* device's list of encoders.
*/
unsigned int drm_encoder_index(struct drm_encoder *encoder)
{
unsigned int index = 0;
struct drm_encoder *tmp;
 
drm_for_each_encoder(tmp, encoder->dev) {
if (tmp == encoder)
return index;
 
index++;
}
 
BUG();
}
EXPORT_SYMBOL(drm_encoder_index);
 
/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
1531,6 → 1552,41
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
 
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"DEGAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.degamma_lut_property = prop;
 
prop = drm_property_create_range(dev,
DRM_MODE_PROP_IMMUTABLE,
"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.degamma_lut_size_property = prop;
 
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"CTM", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.ctm_property = prop;
 
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"GAMMA_LUT", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.gamma_lut_property = prop;
 
prop = drm_property_create_range(dev,
DRM_MODE_PROP_IMMUTABLE,
"GAMMA_LUT_SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.gamma_lut_size_property = prop;
 
return 0;
}
 
2747,8 → 2803,6
goto out;
}
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
/*
* Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a
3289,6 → 3343,24
return 0;
}
 
struct drm_mode_rmfb_work {
struct work_struct work;
struct list_head fbs;
};
 
static void drm_mode_rmfb_work_fn(struct work_struct *w)
{
struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
 
while (!list_empty(&arg->fbs)) {
struct drm_framebuffer *fb =
list_first_entry(&arg->fbs, typeof(*fb), filp_head);
 
list_del_init(&fb->filp_head);
drm_framebuffer_remove(fb);
}
}
 
/**
* drm_mode_rmfb - remove an FB from the configuration
* @dev: drm device for the ioctl
3482,7 → 3554,6
return ret;
}
 
 
/**
* drm_fb_release - remove and free the FBs on this file
* @priv: drm file for the ioctl
/drivers/video/drm/drm_crtc_helper.c
73,9 → 73,6
* &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
* &drm_connector_helper_funcs.
*/
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
 
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
220,6 → 217,15
* disconnected connectors. Then it will disable all unused encoders and CRTCs
* either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF.
*
* NOTE:
*
* This function is part of the legacy modeset helper library and will cause
* major confusion with atomic drivers. This is because atomic helpers guarantee
* to never call ->disable() hooks on a disabled function, or ->enable() hooks
* on an enabled functions. drm_helper_disable_unused_functions() on the other
* hand throws such guarantees into the wind and calls disable hooks
* unconditionally on unused functions.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
328,6 → 334,7
}
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
334,11 → 341,15
goto done;
}
}
}
 
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
if (crtc_funcs->mode_fixup) {
if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
crtc->hwmode = *adjusted_mode;
578,8 → 589,6
if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
} else if (set->fb->pixel_format !=
set->crtc->primary->fb->pixel_format) {
mode_changed = true;
590,7 → 599,7
if (set->x != set->crtc->x || set->y != set->crtc->y)
fb_changed = true;
 
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG_KMS("modes are different, full mode set\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
1066,3 → 1075,36
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
 
/**
* drm_helper_crtc_enable_color_mgmt - enable color management properties
* @crtc: DRM CRTC
* @degamma_lut_size: the size of the degamma lut (before CSC)
* @gamma_lut_size: the size of the gamma lut (after CSC)
*
* This function lets the driver enable the color correction properties on a
* CRTC. This includes 3 degamma, csc and gamma properties that userspace can
* set and 2 size properties to inform the userspace of the lut sizes.
*/
void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int degamma_lut_size,
int gamma_lut_size)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
 
drm_object_attach_property(&crtc->base,
config->degamma_lut_property, 0);
drm_object_attach_property(&crtc->base,
config->ctm_property, 0);
drm_object_attach_property(&crtc->base,
config->gamma_lut_property, 0);
 
drm_object_attach_property(&crtc->base,
config->degamma_lut_size_property,
degamma_lut_size);
drm_object_attach_property(&crtc->base,
config->gamma_lut_size_property,
gamma_lut_size);
}
EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
/drivers/video/drm/drm_dp_dual_mode_helper.c
0,0 → 1,366
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drmP.h>
 
/**
* DOC: dp dual mode helpers
*
* Helper functions to deal with DP dual mode (aka. DP++) adaptors.
*
* Type 1:
* Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
*
* Type 2:
* Adaptor registers and sink DDC bus can be accessed either via I2C or
* I2C-over-AUX. Source devices may choose to implement either of these
* access methods.
*/
 
#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
 
/**
* drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
* @size: sizo of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
*
* Returns:
* 0 on success, negative error code on failure
*/
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size)
{
struct i2c_msg msgs[] = {
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
.buf = &offset,
},
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = I2C_M_RD,
.len = size,
.buf = buffer,
},
};
int ret;
 
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
return -EPROTO;
 
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_read);
 
/**
* drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
* @size: sizo of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
*
* Returns:
* 0 on success, negative error code on failure
*/
ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size)
{
struct i2c_msg msg = {
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1 + size,
.buf = NULL,
};
void *data;
int ret;
 
data = kmalloc(msg.len, GFP_TEMPORARY);
if (!data)
return -ENOMEM;
 
msg.buf = data;
 
memcpy(data, &offset, 1);
memcpy(data + 1, buffer, size);
 
ret = i2c_transfer(adapter, &msg, 1);
 
kfree(data);
 
if (ret < 0)
return ret;
if (ret != 1)
return -EPROTO;
 
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_write);
 
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
"DP-HDMI ADAPTOR\x04";
 
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
 
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
 
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
*
* Attempt to identify the type of the DP dual mode adaptor used.
*
* Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
* certain whether we're dealing with a native HDMI port or
* a type 1 DVI dual mode adaptor. The driver will have to use
* some other hardware/driver specific mechanism to make that
* distinction.
*
* Returns:
* The type of the DP dual mode adaptor used
*/
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
{
char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
uint8_t adaptor_id = 0x00;
ssize_t ret;
 
/*
* Let's see if the adaptor is there the by reading the
* HDMI ID registers.
*
* Note that type 1 DVI adaptors are not required to implemnt
* any registers, and that presents a problem for detection.
* If the i2c transfer is nacked, we may or may not be dealing
* with a type 1 DVI adaptor. Some other mechanism of detecting
* the presence of the adaptor is required. One way would be
* to check the state of the CONFIG1 pin, Another method would
* simply require the driver to know whether the port is a DP++
* port or a native HDMI port. Both of these methods are entirely
* hardware/driver specific so we can't deal with them here.
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
 
/*
* Sigh. Some (maybe all?) type 1 adaptors are broken and ack
* the offset but ignore it, and instead they just always return
* data from the start of the HDMI ID buffer. So for a broken
* type 1 HDMI adaptor a single byte read will always give us
* 0x44, and for a type 1 DVI adaptor it should give 0x00
* (assuming it implements any registers). Fortunately neither
* of those values will match the type 2 signature of the
* DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
* the type 2 adaptor detection safely even in the presence
* of broken type 1 adaptors.
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
if (ret == 0) {
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
}
 
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE1_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE1_DVI;
}
EXPORT_SYMBOL(drm_dp_dual_mode_detect);
 
/**
* drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
*
* Determine the max TMDS clock the adaptor supports based on the
* type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
* register (on type2 adaptors). As some type 1 adaptors have
* problems with registers (see comments in drm_dp_dual_mode_detect())
* we don't read the register on those, instead we simply assume
* a 165 MHz limit based on the specification.
*
* Returns:
* Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
*/
int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter)
{
uint8_t max_tmds_clock;
ssize_t ret;
 
/* native HDMI so no limit */
if (type == DRM_DP_DUAL_MODE_NONE)
return 0;
 
/*
* Type 1 adaptors are limited to 165MHz
* Type 2 adaptors can tells us their limit
*/
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 165000;
 
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
&max_tmds_clock, sizeof(max_tmds_clock));
if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
return 165000;
}
 
return max_tmds_clock * 5000 / 2;
}
EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
 
/**
* drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enabled: current state of the TMDS output buffers
*
* Get the state of the TMDS output buffers in the adaptor. For
* type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
* register. As some type 1 adaptors have problems with registers
* (see comments in drm_dp_dual_mode_detect()) we don't read the
* register on those, instead we simply assume that the buffers
* are always enabled.
*
* Returns:
* 0 on success, negative error code on failure
*/
int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter,
bool *enabled)
{
uint8_t tmds_oen;
ssize_t ret;
 
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
*enabled = true;
return 0;
}
 
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
return ret;
}
 
*enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
 
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
 
/**
* drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enable: enable (as opposed to disable) the TMDS output buffers
*
* Set the state of the TMDS output buffers in the adaptor. For
* type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
* some type 1 adaptors have problems with registers (see comments
* in drm_dp_dual_mode_detect()) we avoid touching the register,
* making this function a no-op on type 1 adaptors.
*
* Returns:
* 0 on success, negative error code on failure
*/
int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable)
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
 
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
 
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
enable ? "enable" : "disable");
return ret;
}
 
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
 
/**
* drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
* @type: DP dual mode adaptor type
*
* Returns:
* String representation of the DP dual mode adaptor type
*/
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
{
switch (type) {
case DRM_DP_DUAL_MODE_NONE:
return "none";
case DRM_DP_DUAL_MODE_TYPE1_DVI:
return "type 1 DVI";
case DRM_DP_DUAL_MODE_TYPE1_HDMI:
return "type 1 HDMI";
case DRM_DP_DUAL_MODE_TYPE2_DVI:
return "type 2 DVI";
case DRM_DP_DUAL_MODE_TYPE2_HDMI:
return "type 2 HDMI";
default:
WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
return "unknown";
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
/drivers/video/drm/drm_dp_helper.c
28,6 → 28,7
#include <linux/sched.h>
#include <linux/i2c.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_dp_aux_dev.h>
#include <drm/drmP.h>
 
/**
761,6 → 762,8
*/
int drm_dp_aux_register(struct drm_dp_aux *aux)
{
int ret;
 
mutex_init(&aux->hw_mutex);
 
aux->ddc.algo = &drm_dp_i2c_algo;
/drivers/video/drm/drm_edid.c
32,6 → 32,7
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_displayid.h>
204,7 → 205,7
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
521,12 → 522,12
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
538,7 → 539,7
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1395,6 → 1396,31
EXPORT_SYMBOL(drm_get_edid);
 
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
* @connector: connector we're probing
* @adapter: I2C adapter to use for DDC
*
* Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
* outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
* switch DDC to the GPU which is retrieving EDID.
*
* Return: Pointer to valid EDID or %NULL if we couldn't find any.
*/
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct pci_dev *pdev = connector->dev->pdev;
struct edid *edid;
 
vga_switcheroo_lock_ddc(pdev);
edid = drm_get_edid(connector, adapter);
vga_switcheroo_unlock_ddc(pdev);
 
return edid;
}
EXPORT_SYMBOL(drm_get_edid_switcheroo);
 
/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
2215,7 → 2241,7
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
u8 *est = ((u8 *)timing) + 5;
u8 *est = ((u8 *)timing) + 6;
 
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
3282,7 → 3308,7
u8 *cea;
u8 *name;
u8 *db;
int sad_count = 0;
int total_sad_count = 0;
int mnl;
int dbl;
 
3296,6 → 3322,7
 
name = NULL;
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
/* max: 13 bytes EDID, 16 bytes ELD */
for (mnl = 0; name && mnl < 13; mnl++) {
if (name[mnl] == 0x0a)
break;
3324,11 → 3351,15
dbl = cea_db_payload_len(db);
 
switch (cea_db_tag(db)) {
int sad_count;
 
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
if (dbl >= 1)
memcpy(eld + 20 + mnl, &db[1], dbl);
sad_count = min(dbl / 3, 15 - total_sad_count);
if (sad_count >= 1)
memcpy(eld + 20 + mnl + total_sad_count * 3,
&db[1], sad_count * 3);
total_sad_count += sad_count;
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
3345,13 → 3376,13
}
}
}
eld[5] |= sad_count << 4;
eld[5] |= total_sad_count << 4;
 
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
 
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
drm_eld_size(eld), sad_count);
drm_eld_size(eld), total_sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
 
/drivers/video/drm/drm_fb_helper.c
104,7 → 104,7
{
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
int i;
int i, ret;
 
if (!drm_fbdev_emulation)
return 0;
111,14 → 111,10
 
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(connector, dev) {
struct drm_fb_helper_connector *fb_helper_connector;
ret = drm_fb_helper_add_one_connector(fb_helper, connector);
 
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
if (!fb_helper_connector)
if (ret)
goto fail;
 
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
130,7 → 126,7
fb_helper->connector_count = 0;
mutex_unlock(&dev->mode_config.mutex);
 
return -ENOMEM;
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
1676,13 → 1672,13
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
 
crtcs = kcalloc(dev->mode_config.num_connector,
crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
modes = kcalloc(fb_helper->connector_count,
sizeof(struct drm_display_mode *), GFP_KERNEL);
offsets = kcalloc(dev->mode_config.num_connector,
offsets = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_offset), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
enabled = kcalloc(fb_helper->connector_count,
sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled || !offsets) {
DRM_ERROR("Memory allocation failed\n");
1696,9 → 1692,9
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
offsets,
enabled, width, height))) {
memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
 
if (!drm_target_cloned(fb_helper, modes, offsets,
enabled, width, height) &&
1778,6 → 1774,27
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
* values for the fbdev info structure.
*
* HANG DEBUGGING:
*
* When you have fbcon support built-in or already loaded, this function will do
* a full modeset to setup the fbdev console. Due to locking misdesign in the
* VT/fbdev subsystem that entire modeset sequence has to be done while holding
* console_lock. Until console_unlock is called no dmesg lines will be sent out
* to consoles, not even serial console. This means when your driver crashes,
* you will see absolutely nothing else but a system stuck in this function,
* with no further output. Any kind of printk() you place within your own driver
* or in the drm core modeset code will also never show up.
*
* Standard debug practice is to run the fbcon setup without taking the
* console_lock as a hack, to be able to see backtraces and crashes on the
* serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
* cmdline option.
*
* The other option is to just disable fbdev emulation since very likely the
* first modest from userspace will crash in the same way, and is even easier to
* debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
* kernel cmdline option.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
1832,6 → 1849,8
struct drm_device *dev = fb_helper->dev;
u32 max_width, max_height;
 
ENTER();
 
if (!drm_fbdev_emulation)
return 0;
 
1853,6 → 1872,7
// drm_setup_crtcs(fb_helper);
// drm_modeset_unlock_all(dev);
// drm_fb_helper_set_par(fb_helper->fbdev);
LEAVE();
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
1861,9 → 1881,9
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
int __init drm_fb_helper_modinit(void)
{
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
static int __init drm_fb_helper_modinit(void)
{
const char *name = "fbcon";
struct module *fbcon;
 
1873,8 → 1893,7
 
if (!fbcon)
request_module_nowait(name);
#endif
return 0;
}
 
module_init(drm_fb_helper_modinit);
#endif
EXPORT_SYMBOL(drm_fb_helper_modinit);
/drivers/video/drm/drm_gem.c
787,7 → 787,13
}
EXPORT_SYMBOL(drm_gem_object_free);
 
 
/**
* drm_gem_vm_open - vma->ops->open implementation for GEM
* @vma: VM area structure
*
* This function implements the #vm_operations_struct open() callback for GEM
* drivers. This must be used together with drm_gem_vm_close().
*/
#if 0
void drm_gem_vm_open(struct vm_area_struct *vma)
{
797,19 → 803,135
}
EXPORT_SYMBOL(drm_gem_vm_open);
 
/**
* drm_gem_vm_close - vma->ops->close implementation for GEM
* @vma: VM area structure
*
* This function implements the #vm_operations_struct close() callback for GEM
* drivers. This must be used together with drm_gem_vm_open().
*/
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
 
mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
 
#endif
/**
* drm_gem_mmap_obj - memory map a GEM object
* @obj: the GEM object to map
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver. Depending on their requirements, drivers can either
* provide a fault handler in their gem_vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
*
* This function is mainly intended to implement the DMABUF mmap operation, when
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
* drm_gem_mmap_obj() assumes the user is granted access to the buffer while
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
* callers must verify access restrictions before calling this helper.
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
 
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
 
if (!dev->driver->gem_vm_ops)
return -EINVAL;
 
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
 
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
 
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object) and map it with a call to drm_gem_mmap_obj().
*
* If the caller is not granted access to the buffer object, the mmap will fail
* with EACCES. Please see the vma manager for more information.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret;
 
if (drm_device_is_unplugged(dev))
return -ENODEV;
 
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it
* proceeds to tear down the object. In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
* that matches our range, we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0-refcnted object and treat it as
* invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
 
if (!obj)
return -EINVAL;
 
if (!drm_vma_node_is_allowed(node, filp)) {
drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
 
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
 
drm_gem_object_unreference_unlocked(obj);
 
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);
#endif
/drivers/video/drm/drm_hashtab.c
37,16 → 37,7
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/rculist.h>
 
#define hlist_for_each_entry_rcu(pos, head, member) \
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
 
 
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
/drivers/video/drm/drm_irq.c
908,15 → 908,11
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
assert_spin_locked(&dev->event_lock);
 
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
 
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
 
}
 
/**
/drivers/video/drm/drm_mipi_dsi.c
185,6 → 185,44
}
EXPORT_SYMBOL(mipi_dsi_create_packet);
 
/**
* mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_SHUTDOWN_PERIPHERAL,
.tx_buf = (u8 [2]) { 0, 0 },
.tx_len = 2,
};
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
 
/**
* mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_TURN_ON_PERIPHERAL,
.tx_buf = (u8 [2]) { 0, 0 },
.tx_len = 2,
};
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
 
/*
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
* the payload in a long packet transmitted from the peripheral back to the
/drivers/video/drm/drm_modes.c
1371,8 → 1371,7
}
done:
if (i >= 0) {
printk(KERN_WARNING
"parse error at position %i in video mode '%s'\n",
pr_warn("[drm] parse error at position %i in video mode '%s'\n",
i, name);
mode->specified = false;
return false;
1519,6 → 1518,8
if (out->status != MODE_OK)
goto out;
 
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
 
ret = 0;
 
out:
/drivers/video/drm/i915/Makefile
2,8 → 2,8
FASM = fasm.exe
 
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
DEFINES += -DCONFIG_HAVE_ATOMIC_IOMAP -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
DEFINES += -DKBUILD_MODNAME=\"i915.dll\"
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
127,6 → 127,7
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_dual_mode_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
/drivers/video/drm/i915/Makefile.lto
1,10 → 1,9
 
CC = kos32-gcc
FASM = fasm.exe
 
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
DEFINES += -DCONFIG_HAVE_ATOMIC_IOMAP -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
DEFINES += -DKBUILD_MODNAME=\"i915.dll\"
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
127,6 → 126,7
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_dual_mode_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
/drivers/video/drm/i915/i915_dma.c
41,7 → 41,7
#include <linux/vgaarb.h>
#include <linux/acpi.h>
//#include <linux/pnp.h>
//#include <linux/vga_switcheroo.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
//#include <acpi/video.h>
#include <linux/pm.h>
167,6 → 167,9
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev);
break;
case I915_PARAM_HAS_EXEC_SOFTPIN:
value = 1;
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
298,12 → 301,6
* vga_client_register() fails with -ENODEV.
*/
 
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
ret = i915_gem_init_stolen(dev);
if (ret)
goto cleanup_vga_switcheroo;
 
intel_power_domains_init_hw(dev_priv, false);
 
311,7 → 308,7
 
ret = intel_irq_install(dev_priv);
if (ret)
goto cleanup_gem_stolen;
goto cleanup_csr;
 
intel_setup_gmbus(dev);
 
363,13 → 360,8
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
intel_guc_ucode_fini(dev);
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
// vga_switcheroo_unregister_client(dev->pdev);
cleanup_csr:
cleanup_vga_client:
// vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
return ret;
}
722,9 → 714,43
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
int num_bits;
 
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
disabled_mask |= BIT(PIPE_A);
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
disabled_mask |= BIT(PIPE_B);
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
disabled_mask |= BIT(PIPE_C);
 
num_bits = hweight8(disabled_mask);
 
switch (disabled_mask) {
case BIT(PIPE_A):
case BIT(PIPE_B):
case BIT(PIPE_A) | BIT(PIPE_B):
case BIT(PIPE_A) | BIT(PIPE_C):
invalid = true;
break;
default:
invalid = false;
}
 
if (num_bits > info->num_pipes || invalid)
DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
disabled_mask);
else
info->num_pipes -= num_bits;
}
 
/* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev))
cherryview_sseu_info_init(dev);
761,6 → 787,83
}
}
 
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
/*
* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
* It is also used for periodic low-priority events, such as
* idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
* workqueue at any time. Use an ordered one.
*/
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL)
goto out_err;
 
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
if (dev_priv->hotplug.dp_wq == NULL)
goto out_free_wq;
 
dev_priv->gpu_error.hangcheck_wq =
alloc_ordered_workqueue("i915-hangcheck", 0);
if (dev_priv->gpu_error.hangcheck_wq == NULL)
goto out_free_dp_wq;
 
system_wq = dev_priv->wq;
 
return 0;
 
out_free_dp_wq:
out_free_wq:
out_err:
DRM_ERROR("Failed to allocate workqueues.\n");
 
return -ENOMEM;
}
 
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
}
 
static int i915_mmio_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int mmio_bar;
int mmio_size;
 
mmio_bar = IS_GEN2(dev) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
if (INTEL_INFO(dev)->gen < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (dev_priv->regs == NULL) {
DRM_ERROR("failed to map registers\n");
 
return -EIO;
}
 
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
 
return 0;
}
 
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
776,7 → 879,7
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info, *device_info;
int ret = 0, mmio_bar, mmio_size;
int ret = 0;
uint32_t aperture_size;
 
info = (struct intel_device_info *) flags;
803,6 → 906,10
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
 
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
goto out_free_priv;
 
intel_pm_setup(dev);
 
intel_runtime_pm_get(dev_priv);
821,28 → 928,12
 
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
goto out_runtime_pm_put;
}
 
mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
if (info->gen < 5)
mmio_size = 512*1024;
else
mmio_size = 2*1024*1024;
 
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
ret = i915_mmio_setup(dev);
if (ret < 0)
goto put_bridge;
}
 
set_fake_framebuffer();
 
853,7 → 944,7
 
ret = i915_gem_gtt_init(dev);
if (ret)
goto out_freecsr;
goto out_uncore_fini;
 
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
883,7 → 974,10
 
aperture_size = dev_priv->gtt.mappable_end;
 
dev_priv->gtt.mappable = AllocKernelSpace(8192);
printk("aperture base %x size = %x\n",(u32)dev_priv->gtt.mappable_base,(u32)aperture_size);
dev_priv->gtt.mappable =
io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size);
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_gtt;
890,42 → 984,12
}
 
 
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
* It is also used for periodic low-priority events, such as
* idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
* workqueue at any time. Use an ordered one.
*/
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
goto out_mtrrfree;
}
system_wq = dev_priv->wq;
 
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
if (dev_priv->hotplug.dp_wq == NULL) {
DRM_ERROR("Failed to create our dp workqueue.\n");
ret = -ENOMEM;
goto out_freewq;
}
 
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
 
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
intel_opregion_setup(dev);
 
i915_gem_load(dev);
i915_gem_load_init(dev);
 
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
981,15 → 1045,15
out_power_well:
drm_vblank_cleanup(dev);
out_gem_unload:
 
out_freewq:
out_mtrrfree:
out_gtt:
i915_global_gtt_cleanup(dev);
out_freecsr:
out_uncore_fini:
put_bridge:
free_priv:
out_runtime_pm_put:
i915_workqueues_cleanup(dev_priv);
out_free_priv:
kfree(dev_priv);
 
return ret;
}
 
1015,8 → 1079,7
 
i915_teardown_sysfs(dev);
 
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
i915_gem_shrinker_cleanup(dev_priv);
 
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
1044,6 → 1107,8
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
 
intel_csr_ucode_fini(dev_priv);
 
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
1062,27 → 1127,17
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
 
intel_csr_ucode_fini(dev_priv);
 
intel_teardown_mchbar(dev);
 
destroy_workqueue(dev_priv->hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
 
i915_global_gtt_cleanup(dev);
 
intel_uncore_fini(dev);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
i915_mmio_cleanup(dev);
 
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
i915_gem_load_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev);
i915_workqueues_cleanup(dev_priv);
kfree(dev_priv);
 
return 0;
1125,8 → 1180,6
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
 
intel_modeset_preclose(dev, file);
}
 
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
/drivers/video/drm/i915/i915_drv.c
37,6 → 37,8
 
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
 
#include <syscall.h>
572,13 → 574,7
 
intel_suspend_gt_powersave(dev);
 
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
intel_display_suspend(dev);
drm_modeset_unlock_all(dev);
 
intel_dp_mst_suspend(dev);
 
733,12 → 729,10
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
 
drm_modeset_lock_all(dev);
intel_dp_mst_resume(dev);
 
intel_display_resume(dev);
drm_modeset_unlock_all(dev);
 
intel_dp_mst_resume(dev);
 
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
858,6 → 852,7
 
return i915_drm_resume(dev);
}
#endif
 
/**
* i915_reset - reset chip after a hang
910,7 → 905,7
return ret;
}
 
intel_overlay_reset(dev_priv);
// intel_overlay_reset(dev_priv);
 
/* Ok, now get things going again... */
 
952,6 → 947,7
return 0;
}
 
#if 0
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
1081,7 → 1077,6
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
intel_prepare_ddi(dev);
 
return 0;
}
1341,7 → 1336,7
return 0;
 
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
wait_for_on ? "on" : "off",
onoff(wait_for_on),
I915_READ(VLV_GTLC_PW_STATUS));
 
/*
1351,7 → 1346,7
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
wait_for_on ? "on" : "off");
onoff(wait_for_on));
 
return err;
#undef COND
1362,7 → 1357,7
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
 
DRM_ERROR("GT register access while GT waking disabled\n");
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
 
1506,6 → 1501,10
 
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
 
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
 
dev_priv->pm.suspended = true;
 
/*
1554,6 → 1553,8
 
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
intel_guc_resume(dev);
 
/drivers/video/drm/i915/i915_drv.h
34,6 → 34,7
#include <uapi/drm/drm_fourcc.h>
 
#include <drm/drmP.h>
#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
40,7 → 41,7
#include "intel_lrc.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include <linux/scatterlist.h>
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
48,17 → 49,16
#include <drm/drm_gem.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include "intel_guc.h"
 
#include <linux/spinlock.h>
 
/* General customization:
*/
 
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20151218"
#define DRIVER_DATE "20160229"
 
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
69,11 → 69,11
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
 
#undef WARN_ON_ONCE
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
 
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
87,25 → 87,14
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, format); \
else \
if (unlikely(__ret_warn_on)) \
if (!WARN(i915.verbose_state_checks, format)) \
DRM_ERROR(format); \
} \
unlikely(__ret_warn_on); \
})
 
#define I915_STATE_WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, "WARN_ON(" #condition ")\n"); \
else \
DRM_ERROR("WARN_ON(" #condition ")\n"); \
} \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
static inline const char *yesno(bool v)
{
112,6 → 101,11
return v ? "yes" : "no";
}
 
static inline const char *onoff(bool v)
{
return v ? "on" : "off";
}
 
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
266,6 → 260,9
 
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if ((__mask) & (1 << (__p)))
#define for_each_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
339,7 → 336,7
unsigned boosts;
} rps;
 
struct intel_engine_cs *bsd_ring;
unsigned int bsd_ring;
};
 
enum intel_dpll_id {
633,6 → 630,7
struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
void (*program_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
657,9 → 655,6
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
726,6 → 721,8
i915_reg_t reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
 
int unclaimed_mmio_check;
};
 
/* Iterate over initialised fw domains */
890,6 → 887,9
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
struct i915_vma *lrc_vma;
u64 lrc_desc;
uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS];
 
struct list_head link;
903,16 → 903,15
ORIGIN_DIRTYFB,
};
 
struct i915_fbc {
struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
unsigned threshold;
unsigned int fb_id;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
unsigned int visible_pipes_mask;
struct intel_crtc *crtc;
int y;
 
struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
922,18 → 921,52
bool enabled;
bool active;
 
struct intel_fbc_state_cache {
struct {
unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate;
} crtc;
 
struct {
unsigned int rotation;
int src_w;
int src_h;
bool visible;
} plane;
 
struct {
u64 ilk_ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
int fence_reg;
unsigned int tiling_mode;
} fb;
} state_cache;
 
struct intel_fbc_reg_params {
struct {
enum pipe pipe;
enum plane plane;
unsigned int fence_y_offset;
} crtc;
 
struct {
u64 ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
int fence_reg;
} fb;
 
int cfb_size;
} params;
 
struct intel_fbc_work {
bool scheduled;
u32 scheduled_vblank;
struct work_struct work;
struct drm_framebuffer *fb;
unsigned long enable_jiffies;
} work;
 
const char *no_fbc_reason;
 
bool (*is_active)(struct drm_i915_private *dev_priv);
void (*activate)(struct intel_crtc *crtc);
void (*deactivate)(struct drm_i915_private *dev_priv);
};
 
/**
973,6 → 1006,7
unsigned busy_frontbuffer_bits;
bool psr2_support;
bool aux_frame_sync;
bool link_standby;
};
 
enum intel_pch {
1299,7 → 1333,7
bool busy;
 
/* the indicator for dispatch video commands on two BSD rings */
int bsd_ring_dispatch_index;
unsigned int bsd_ring_dispatch_index;
 
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
1485,7 → 1519,7
u8 seq_version;
u32 size;
u8 *data;
u8 *sequence[MIPI_SEQ_MAX];
const u8 *sequence[MIPI_SEQ_MAX];
} dsi;
 
int crt_ddc_pin;
1657,11 → 1691,18
u32 mask;
};
 
#define I915_MAX_WA_REGS 16
/*
* RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
* allowing it for RCS as we don't foresee any requirement of having
* a whitelist for other engines. When it is really required for
* other engines then the limit need to be increased.
*/
#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
 
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
u32 hw_whitelist_count[I915_NUM_RINGS];
};
 
struct i915_virtual_gpu {
1756,7 → 1797,7
u32 pipestat_irq_mask[I915_MAX_PIPES];
 
struct i915_hotplug hotplug;
struct i915_fbc fbc;
struct intel_fbc fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
1780,7 → 1821,7
 
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
1805,6 → 1846,7
 
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
 
struct list_head vm_list; /* Global list of all address spaces */
struct i915_gtt gtt; /* VM representing the global address space */
1825,8 → 1867,13
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
 
/* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
 
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
struct i915_workarounds workarounds;
1941,6 → 1988,8
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
 
struct intel_context *kernel_context;
 
bool edp_low_vswing;
 
/* perform PHY state sanity checks? */
2265,9 → 2314,9
 
};
 
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out);
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
2576,6 → 2625,12
 
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
 
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
IS_SKL_GT3(dev) || \
IS_SKL_GT4(dev))
 
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
2665,46 → 2720,6
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
 
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
bool enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
int edp_vswing;
/* Kolibri related */
char *log_file;
char *cmdline_mode;
};
extern struct i915_params i915 __read_mostly;
 
/* i915_dma.c */
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
2748,7 → 2763,8
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2870,7 → 2886,8
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
3134,18 → 3151,11
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
struct i915_address_space *ggtt =
&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
return vm == ggtt;
}
 
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
WARN_ON(i915_is_ggtt(vm));
 
return container_of(vm, struct i915_hw_ppgtt, base);
}
 
3283,6 → 3293,7
#define I915_SHRINK_ACTIVE 0x8
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 
 
/* i915_gem_tiling.c */
3453,8 → 3464,8
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3461,8 → 3472,6
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3619,6 → 3628,4
i915_gem_request_assign(&ring->trace_irq_req, req);
}
 
#include "intel_drv.h"
 
#endif
/drivers/video/drm/i915/i915_gem.c
41,14 → 41,11
#define RQ_BUG_ON(expr)
 
extern int x86_clflush_size;
#define __copy_to_user_inatomic __copy_to_user
 
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define MAP_SHARED 0x01 /* Share changes */
 
 
 
struct drm_i915_gem_object *get_fb_obj();
 
unsigned long vm_mmap(struct file *file, unsigned long addr,
155,10 → 152,10
 
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
247,7 → 244,7
int ret;
 
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
 
653,7 → 650,25
* page faults in the source data
*/
 
static inline int
fast_user_write(struct io_mapping *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
 
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force*)vaddr_atomic + page_offset;
unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
 
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
702,11 → 717,16
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
 
MapPage(dev_priv->gtt.mappable,
dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW);
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_flush;
}
 
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
 
remain -= page_length;
user_data += page_length;
offset += page_length;
741,9 → 761,8
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
memcpy(vaddr + shmem_page_offset,
user_data,
page_length);
ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
user_data, page_length);
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
1126,7 → 1145,7
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
wait_queue_t wait;
unsigned long timeout_expire;
s64 before, now;
s64 before = 0; /* Only to silence a compiler warning. */
int ret;
 
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1146,14 → 1165,17
return -ETIME;
 
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
 
/*
* Record current time in case interrupted by signal, or wedged.
*/
before = ktime_get_raw_ns();
}
 
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
 
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
1213,11 → 1235,10
DestroyEvent(wait.evnt);
 
out:
now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
 
if (timeout) {
s64 tres = *timeout - (now - before);
s64 tres = *timeout - (ktime_get_raw_ns() - before);
 
*timeout = tres < 0 ? 0 : tres;
 
2053,7 → 2074,7
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
list_move_tail(&vma->mm_list, &vma->vm->active_list);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
 
static void
2091,9 → 2112,9
list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.bound_list);
 
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!list_empty(&vma->vm_link))
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
}
 
i915_gem_request_assign(&obj->last_fenced_req, NULL);
2250,7 → 2271,7
 
trace_i915_gem_request_add(request);
 
// i915_queue_hangcheck(ring->dev);
i915_queue_hangcheck(ring->dev);
 
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
2316,10 → 2337,8
i915_gem_request_remove_from_client(req);
 
if (ctx) {
if (i915.enable_execlists) {
if (ctx != req->ring->default_context)
intel_lr_context_unpin(req);
}
if (i915.enable_execlists && ctx != req->i915->kernel_context)
intel_lr_context_unpin(ctx, req->ring);
 
i915_gem_context_unreference(ctx);
}
2327,7 → 2346,8
kfree(req);
}
 
int i915_gem_request_alloc(struct intel_engine_cs *ring,
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
2393,6 → 2413,31
return ret;
}
 
/**
* i915_gem_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
* This can be NULL if the request is not directly related to
* any specific user context, in which case this function will
* choose an appropriate context to use.
*
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
 
if (ctx == NULL)
ctx = to_i915(engine->dev)->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
 
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
2584,11 → 2629,9
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
if (i915.enable_execlists) {
unsigned long flags;
 
spin_lock_irqsave(&ring->execlist_lock, flags);
spin_lock_irq(&ring->execlist_lock);
idle &= list_empty(&ring->execlist_queue);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
spin_unlock_irq(&ring->execlist_lock);
 
intel_execlists_retire_requests(ring);
}
2810,9 → 2853,13
return 0;
 
if (*to_req == NULL) {
ret = i915_gem_request_alloc(to, to->default_context, to_req);
if (ret)
return ret;
struct drm_i915_gem_request *req;
 
req = i915_gem_request_alloc(to, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
 
*to_req = req;
}
 
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
2929,7 → 2976,7
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
 
if (list_empty(&vma->vma_link))
if (list_empty(&vma->obj_link))
return 0;
 
if (!drm_mm_node_allocated(&vma->node)) {
2948,8 → 2995,7
return ret;
}
 
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
 
/* release the fence reg _after_ flushing */
2963,8 → 3009,8
vma->vm->unbind_vma(vma);
vma->bound = 0;
 
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) {
list_del_init(&vma->vm_link);
if (vma->is_ggtt) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) {
3012,9 → 3058,9
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
 
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
 
ret = i915_switch_context(req);
if (ret) {
3210,7 → 3256,7
goto err_remove_node;
 
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
list_add_tail(&vma->vm_link, &vm->inactive_list);
 
return vma;
 
3375,7 → 3421,7
/* And bump the LRU for this access */
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->mm_list,
list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->gtt.base.inactive_list);
 
return 0;
3410,7 → 3456,7
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
 
3473,7 → 3519,7
*/
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link) {
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
 
3483,7 → 3529,7
}
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
 
3957,10 → 4003,20
if (ret)
goto unref;
 
BUILD_BUG_ON(I915_NUM_RINGS > 16);
args->busy = obj->active << 16;
args->busy = 0;
if (obj->active) {
int i;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_request *req;
 
req = obj->last_read_req[i];
if (req)
args->busy |= 1 << (16 + req->ring->exec_id);
}
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->id;
args->busy |= obj->last_write_req->ring->exec_id;
}
 
unref:
drm_gem_object_unreference(&obj->base);
4136,7 → 4192,7
 
trace_i915_gem_object_destroy(obj);
 
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
int ret;
 
vma->pin_count = 0;
4190,7 → 4246,7
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->vm == vm)
return vma;
4207,7 → 4263,7
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
4216,7 → 4272,6
 
void i915_gem_vma_destroy(struct i915_vma *vma)
{
struct i915_address_space *vm = NULL;
WARN_ON(vma->node.allocated);
 
/* Keep the vma as a placeholder in the execbuffer reservation lists */
4223,13 → 4278,11
if (!list_empty(&vma->exec_list))
return;
 
vm = vma->vm;
if (!vma->is_ggtt)
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
if (!i915_is_ggtt(vm))
i915_ppgtt_put(i915_vm_to_ppgtt(vm));
list_del(&vma->obj_link);
 
list_del(&vma->vma_link);
 
kfree(vma);
}
 
4450,7 → 4503,7
*/
init_unused_rings(dev);
 
BUG_ON(!dev_priv->ring[RCS].default_context);
BUG_ON(!dev_priv->kernel_context);
 
ret = i915_ppgtt_init_hw(dev);
if (ret) {
4487,10 → 4540,9
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
 
WARN_ON(!ring->default_context);
 
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret) {
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
4595,6 → 4647,14
 
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
 
if (i915.enable_execlists)
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode.
*/
intel_gpu_reset(dev);
}
 
static void
4605,7 → 4665,7
}
 
void
i915_gem_load(struct drm_device *dev)
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
4651,6 → 4711,7
i915_gem_restore_fences(dev);
 
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
 
dev_priv->mm.interruptible = true;
 
4703,6 → 4764,8
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
 
file_priv->bsd_ring = -1;
 
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);
4745,8 → 4808,8
 
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
4764,7 → 4827,7
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
4778,8 → 4841,8
{
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4795,7 → 4858,7
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
4808,7 → 4871,7
{
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
return true;
 
4825,8 → 4888,8
 
BUG_ON(list_empty(&o->vma_list));
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
4838,7 → 4901,7
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count > 0)
return true;
 
/drivers/video/drm/i915/i915_gem_context.c
142,7 → 142,7
return;
 
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
mm_list) {
vm_link) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
break;
}
321,6 → 321,18
return ERR_PTR(ret);
}
 
static void i915_gem_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
if (i915.enable_execlists) {
intel_lr_context_unpin(ctx, engine);
} else {
if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(ctx);
}
}
 
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
329,40 → 341,31
if (i915.enable_execlists) {
struct intel_context *ctx;
 
list_for_each_entry(ctx, &dev_priv->context_list, link) {
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev, ctx);
}
 
return;
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *lctx = ring->last_context;
 
if (lctx) {
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
i915_gem_context_unreference(lctx);
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
}
}
 
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}
}
 
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int i;
 
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
if (WARN_ON(dev_priv->kernel_context))
return 0;
 
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
392,13 → 395,8
return PTR_ERR(ctx);
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
dev_priv->kernel_context = ctx;
 
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
}
 
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
408,7 → 406,7
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
struct intel_context *dctx = dev_priv->kernel_context;
int i;
 
if (dctx->legacy_hw_ctx.rcs_state) {
424,28 → 422,21
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
 
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
 
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
 
ring->default_context = NULL;
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
}
}
 
i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL;
}
 
int i915_gem_context_enable(struct drm_i915_gem_request *req)
864,6 → 855,9
if (!contexts_enabled(dev))
return -ENODEV;
 
if (args->pad != 0)
return -EINVAL;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
887,6 → 881,9
struct intel_context *ctx;
int ret;
 
if (args->pad != 0)
return -EINVAL;
 
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
 
/drivers/video/drm/i915/i915_gem_evict.c
116,7 → 116,7
 
search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
list_for_each_entry(vma, &vm->inactive_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
125,7 → 125,7
goto none;
 
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(vma, &vm->active_list, mm_list) {
list_for_each_entry(vma, &vm->active_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
270,7 → 270,7
WARN_ON(!list_empty(&vm->active_list));
}
 
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
 
/drivers/video/drm/i915/i915_gem_execbuffer.c
193,13 → 193,10
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
struct i915_vma *vma;
 
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct i915_vma *vma;
 
vma = hlist_entry(node, struct i915_vma, exec_node);
hlist_for_each_entry(vma, head, exec_node) {
if (vma->exec_handle == handle)
return vma;
}
333,13 → 330,26
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
(offset & PAGE_MASK), PG_SW);
reloc_page = dev_priv->gtt.mappable;
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
// io_mapping_unmap_atomic(reloc_page);
if (INTEL_INFO(dev)->gen >= 8) {
offset += sizeof(uint32_t);
 
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
offset);
}
 
iowrite32(upper_32_bits(delta),
reloc_page + offset_in_page(offset));
}
 
io_mapping_unmap_atomic(reloc_page);
 
return 0;
}
 
512,7 → 522,8
count = ARRAY_SIZE(stack_reloc);
remain -= count;
 
memcpy(r, user_relocs, count*sizeof(r[0]));
if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
return -EFAULT;
 
do {
u64 offset = r->presumed_offset;
521,11 → 532,11
if (ret)
return ret;
 
if (r->presumed_offset != offset)
{
memcpy(&user_relocs->presumed_offset,
if (r->presumed_offset != offset &&
__copy_to_user_inatomic(&user_relocs->presumed_offset,
&r->presumed_offset,
sizeof(r->presumed_offset));
sizeof(r->presumed_offset))) {
return -EFAULT;
}
 
user_relocs++;
655,7 → 666,7
if (entry->relocation_count == 0)
return false;
 
if (!i915_is_ggtt(vma->vm))
if (!vma->is_ggtt)
return false;
 
/* See also use_cpu_reloc() */
674,8 → 685,7
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
 
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_is_ggtt(vma->vm));
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
 
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
1286,6 → 1296,9
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
 
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
 
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
1302,34 → 1315,24
 
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
* The ring index is returned.
*/
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
struct drm_file *file)
static unsigned int
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
 
/* Check whether the file_priv is using one ring */
if (file_priv->bsd_ring)
return file_priv->bsd_ring->id;
else {
/* If no, use the ping-pong mechanism to select one ring */
int ring_id;
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */
mutex_lock(&dev_priv->dev->struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
mutex_unlock(&dev_priv->dev->struct_mutex);
}
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
ring_id = VCS;
dev_priv->mm.bsd_ring_dispatch_index = 1;
} else {
ring_id = VCS2;
dev_priv->mm.bsd_ring_dispatch_index = 0;
return file_priv->bsd_ring;
}
file_priv->bsd_ring = &dev_priv->ring[ring_id];
mutex_unlock(&dev->struct_mutex);
return ring_id;
}
}
 
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
1351,7 → 1354,65
return vma->obj;
}
 
#define I915_USER_RINGS (4)
 
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
[I915_EXEC_BSD] = VCS,
[I915_EXEC_VEBOX] = VECS
};
 
static int
eb_select_ring(struct drm_i915_private *dev_priv,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct intel_engine_cs **ring)
{
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
 
if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
return -EINVAL;
}
 
if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
 
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
bsd_idx--;
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
return -EINVAL;
}
 
*ring = &dev_priv->ring[_VCS(bsd_idx)];
} else {
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
}
 
if (!intel_ring_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
 
return 0;
}
 
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
1358,6 → 1419,7
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
1386,52 → 1448,10
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
 
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
ret = eb_select_ring(dev_priv, file, args, &ring);
if (ret)
return ret;
 
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
 
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
 
switch (args->flags & I915_EXEC_BSD_MASK) {
case I915_EXEC_BSD_DEFAULT:
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
break;
case I915_EXEC_BSD_RING1:
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BSD_RING2:
ring = &dev_priv->ring[VCS2];
break;
default:
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
(int)(args->flags & I915_EXEC_BSD_MASK));
return -EINVAL;
}
} else
ring = &dev_priv->ring[VCS];
} else
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
 
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
 
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
1580,11 → 1600,13
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
/* Allocate a request for this batch buffer nice and early. */
ret = i915_gem_request_alloc(ring, ctx, &params->request);
if (ret)
req = i915_gem_request_alloc(ring, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
}
 
ret = i915_gem_request_add_to_client(params->request, file);
ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
 
1600,6 → 1622,7
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
params->request = req;
 
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
1623,8 → 1646,8
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && params->request)
i915_gem_request_cancel(params->request);
if (ret && !IS_ERR_OR_NULL(req))
i915_gem_request_cancel(req);
 
mutex_unlock(&dev->struct_mutex);
 
/drivers/video/drm/i915/i915_gem_fence.c
34,8 → 34,8
* set of these objects.
*
* Fences are used to detile GTT memory mappings. They're also connected to the
* hardware frontbuffer render tracking and hence interract with frontbuffer
* conmpression. Furthermore on older platforms fences are required for tiled
* hardware frontbuffer render tracking and hence interact with frontbuffer
* compression. Furthermore on older platforms fences are required for tiled
* objects used by the display engine. They can also be used by the render
* engine - they're required for blitter commands and are optional for render
* commands. But on gen4+ both display (with the exception of fbc) and rendering
46,8 → 46,8
*
* Finally note that because fences are such a restricted resource they're
* dynamically associated with objects. Furthermore fence state is committed to
* the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
* explictly call i915_gem_object_get_fence() to synchronize fencing status
* the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
* explicitly call i915_gem_object_get_fence() to synchronize fencing status
* for cpu access. Also note that some code wants an unfenced view, for those
* cases the fence can be removed forcefully with i915_gem_object_put_fence().
*
527,7 → 527,7
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* 17 is not just a page offset, so as we page an object out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
/drivers/video/drm/i915/i915_gem_gtt.c
95,9 → 95,11
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
const struct i915_ggtt_view i915_ggtt_view_normal;
const struct i915_ggtt_view i915_ggtt_view_normal = {
.type = I915_GGTT_VIEW_NORMAL,
};
const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED
.type = I915_GGTT_VIEW_ROTATED,
};
 
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
2122,6 → 2124,25
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
 
static void gtt_write_workarounds(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
if (IS_BROADWELL(dev))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_SKYLAKE(dev))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_BROXTON(dev))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
 
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2138,6 → 2159,8
 
int i915_ppgtt_init_hw(struct drm_device *dev)
{
gtt_write_workarounds(dev);
 
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
2727,7 → 2750,7
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
}
 
/* Clear any non-preallocated blocks */
2799,6 → 2822,8
ppgtt->base.cleanup(&ppgtt->base);
}
 
i915_gem_cleanup_stolen(dev);
 
if (drm_mm_initialized(&vm->mm)) {
if (intel_vgpu_active(dev))
intel_vgt_deballoon();
3016,9 → 3041,6
*mappable_base = pci_resource_start(dev->pdev, 2);
*mappable_end = pci_resource_len(dev->pdev, 2);
 
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
 
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
if (INTEL_INFO(dev)->gen >= 9) {
3075,8 → 3097,6
return -ENXIO;
}
 
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
3165,6 → 3185,7
}
 
gtt->base.dev = dev;
gtt->base.is_ggtt = true;
 
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
3171,6 → 3192,14
if (ret)
return ret;
 
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
ret = i915_gem_init_stolen(dev);
if (ret)
goto out_gtt_cleanup;
 
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
gtt->base.total >> 20);
3190,6 → 3219,11
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
return 0;
 
out_gtt_cleanup:
gtt->base.cleanup(&dev_priv->gtt.base);
 
return ret;
}
 
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3212,7 → 3246,7
vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != vm)
continue;
 
3269,19 → 3303,20
if (vma == NULL)
return ERR_PTR(-ENOMEM);
 
INIT_LIST_HEAD(&vma->vma_link);
INIT_LIST_HEAD(&vma->mm_list);
INIT_LIST_HEAD(&vma->vm_link);
INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
vma->is_ggtt = i915_is_ggtt(vm);
 
if (i915_is_ggtt(vm))
vma->ggtt_view = *ggtt_view;
 
list_add_tail(&vma->vma_link, &obj->vma_list);
if (!i915_is_ggtt(vm))
else
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 
list_add_tail(&vma->obj_link, &obj->vma_list);
 
return vma;
}
 
3322,8 → 3357,9
}
 
static struct scatterlist *
rotate_pages(dma_addr_t *in, unsigned int offset,
rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int width, unsigned int height,
unsigned int stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
3335,7 → 3371,7
}
 
for (column = 0; column < width; column++) {
src_idx = width * (height - 1) + column;
src_idx = stride * (height - 1) + column;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
3346,7 → 3382,7
sg_dma_address(sg) = in[offset + src_idx];
sg_dma_len(sg) = PAGE_SIZE;
sg = sg_next(sg);
src_idx -= width;
src_idx -= stride;
}
}
 
3354,10 → 3390,9
}
 
static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
3399,6 → 3434,7
/* Rotate the pages. */
sg = rotate_pages(page_addr_list, 0,
rot_info->width_pages, rot_info->height_pages,
rot_info->width_pages,
st, NULL);
 
/* Append the UV plane if NV12. */
3414,6 → 3450,7
rotate_pages(page_addr_list, uv_start_page,
rot_info->width_pages_uv,
rot_info->height_pages_uv,
rot_info->width_pages_uv,
st, sg);
}
 
3495,7 → 3532,7
vma->ggtt_view.pages = vma->obj->pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->ggtt_view.pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->ggtt_view.pages =
intel_partial_pages(&vma->ggtt_view, vma->obj);
3551,11 → 3588,6
return 0;
 
if (vma->bound == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma->vm,
vma->node.start,
vma->node.size,
VM_TO_TRACE_NAME(vma->vm));
 
/* XXX: i915_vma_pin() will fix this +- hack */
vma->pin_count++;
ret = vma->vm->allocate_va_range(vma->vm,
3589,7 → 3621,7
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
return view->params.rotation_info.size;
return view->params.rotated.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
/drivers/video/drm/i915/i915_gem_gtt.h
44,7 → 44,6
 
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
 
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
156,7 → 155,7
u64 offset;
unsigned int size;
} partial;
struct intel_rotation_info rotation_info;
struct intel_rotation_info rotated;
} params;
 
struct sg_table *pages;
184,6 → 183,7
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
unsigned int bound : 4;
bool is_ggtt : 1;
 
/**
* Support different GGTT views into the same object.
195,9 → 195,9
struct i915_ggtt_view ggtt_view;
 
/** This object's place on the active/inactive lists */
struct list_head mm_list;
struct list_head vm_link;
 
struct list_head vma_link; /* Link in the object's VMA list */
struct list_head obj_link; /* Link in the object's VMA list */
 
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
276,6 → 276,8
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
 
bool is_ggtt;
 
struct i915_page_scratch *scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
331,6 → 333,8
u32 flags);
};
 
#define i915_is_ggtt(V) ((V)->is_ggtt)
 
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
343,6 → 347,8
 
size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base;
size_t stolen_reserved_size;
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
417,7 → 423,7
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
uint32_t pde_shift)
{
const uint64_t mask = ~((1 << pde_shift) - 1);
const uint64_t mask = ~((1ULL << pde_shift) - 1);
uint64_t end;
 
WARN_ON(length == 0);
/drivers/video/drm/i915/i915_gem_stolen.c
392,6 → 392,9
return 0;
}
 
dev_priv->gtt.stolen_reserved_base = reserved_base;
dev_priv->gtt.stolen_reserved_size = reserved_size;
 
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
503,6 → 506,9
if (obj->pages == NULL)
goto cleanup;
 
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
 
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
 
566,6 → 572,8
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
 
lockdep_assert_held(&dev->struct_mutex);
 
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
 
623,7 → 631,7
 
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
list_add_tail(&vma->vm_link, &ggtt->inactive_list);
}
 
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
/drivers/video/drm/i915/i915_gpu_error.c
27,10 → 27,9
*
*/
 
#define UTS_RELEASE " 4.6.7 "
#include "i915_drv.h"
 
#if 0
 
static const char *ring_str(int ring)
{
switch (ring) {
366,6 → 365,10
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
err_printf(m, "PCI Subsystem: %04x:%04x\n",
dev->pdev->subsystem_vendor,
dev->pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
if (HAS_CSR(dev)) {
511,8 → 514,8
}
}
 
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
// if (error->overlay)
// intel_overlay_print_error_state(m, error->overlay);
 
if (error->display)
intel_display_print_error_state(m, dev, error->display);
733,7 → 736,7
struct i915_vma *vma;
int i = 0;
 
list_for_each_entry(vma, head, mm_list) {
list_for_each_entry(vma, head, vm_link) {
capture_bo(err++, vma);
if (++i == count)
break;
756,7 → 759,7
if (err == last)
break;
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
}
1029,18 → 1032,7
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
 
if (request->pid) {
struct task_struct *task;
 
rcu_read_lock();
task = pid_task(request->pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
}
rcu_read_unlock();
}
}
 
if (i915.enable_execlists) {
/* TODO: This is only a small fix to keep basic error
1051,7 → 1043,7
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
rbuf = ring->default_context->engine[ring->id].ringbuf;
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;
 
1124,12 → 1116,12
int i;
 
i = 0;
list_for_each_entry(vma, &vm->active_list, mm_list)
list_for_each_entry(vma, &vm->active_list, vm_link)
i++;
error->active_bo_count[ndx] = i;
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
i++;
}
1338,9 → 1330,9
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
 
do_gettimeofday(&error->time);
// do_gettimeofday(&error->time);
 
error->overlay = intel_overlay_capture_error_state(dev);
// error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
 
i915_error_capture_msg(dev, error, wedged, error_msg);
1400,7 → 1392,6
if (error)
kref_put(&error->ref, i915_error_state_free);
}
#endif
 
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
/drivers/video/drm/i915/i915_guc_reg.h
40,6 → 40,7
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
 
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
 
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64
/drivers/video/drm/i915/i915_guc_submission.c
158,10 → 158,8
 
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
if (!intel_enable_rc6(dev) ||
NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
246,6 → 244,9
db_exc.cookie = 1;
}
 
/* Finally, update the cached copy of the GuC's WQ head */
gc->wq_head = desc->head;
 
kunmap_atomic(base);
return ret;
}
375,6 → 376,8
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *ring;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
387,10 → 390,8
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct guc_execlist_context *lrc = &desc.lrc[i];
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct intel_engine_cs *ring;
for_each_ring(ring, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
 
405,7 → 406,6
if (!obj)
break; /* XXX: continue? */
 
ring = ringbuf->ring;
ctx_desc = intel_lr_context_descriptor(ctx, ring);
lrc->context_desc = (u32)ctx_desc;
 
413,9 → 413,9
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(ring->id << GUC_ELC_ENGINE_OFFSET);
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
 
obj = ringbuf->obj;
obj = ctx->engine[i].ringbuf->obj;
 
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
422,7 → 422,7
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
 
desc.engines_used |= (1 << ring->id);
desc.engines_used |= (1 << ring->guc_id);
}
 
WARN_ON(desc.engines_used == 0);
471,8 → 471,7
sizeof(desc) * client->ctx_index);
}
 
/* Get valid workqueue item and return it back to offset */
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
479,20 → 478,23
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
 
if (!gc)
return 0;
 
/* Quickly return if wq space is available since last time we cache the
* head position. */
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
return 0;
 
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
 
while (timeout_counter-- > 0) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
*offset = gc->wq_tail;
gc->wq_head = desc->head;
 
/* advance the tail for next workqueue item */
gc->wq_tail += size;
gc->wq_tail &= gc->wq_size - 1;
 
/* this will break the loop */
timeout_counter = 0;
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
ret = 0;
break;
}
 
if (timeout_counter)
507,16 → 509,19
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off = 0;
int ret;
u32 tail, wq_len, wq_off, space;
 
ret = guc_get_workqueue_space(gc, &wq_off);
if (ret)
return ret;
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
 
/* postincrement WQ tail for next time */
wq_off = gc->wq_tail;
gc->wq_tail += sizeof(struct guc_wq_item);
gc->wq_tail &= gc->wq_size - 1;
 
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
* wrapped to the beginning. This simplifies the implementation below.
537,7 → 542,7
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
(ring_id << WQ_TARGET_SHIFT) |
(rq->ring->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
 
/* The GuC wants only the low-order word of the context descriptor */
553,29 → 558,6
return 0;
}
 
#define CTX_RING_BUFFER_START 0x08
 
/* Update the ringbuffer pointer in a saved context image */
static void lr_context_update(struct drm_i915_gem_request *rq)
{
enum intel_ring_id ring_id = rq->ring->id;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
 
BUG_ON(!ctx_obj);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
kunmap_atomic(reg_state);
}
 
/**
* i915_guc_submit() - Submit commands through GuC
* @client: the guc client where commands will go through
587,18 → 569,14
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
enum intel_ring_id ring_id = rq->ring->id;
unsigned int engine_id = rq->ring->guc_id;
int q_ret, b_ret;
 
/* Need this because of the deferred pin ctx and ring */
/* Shall we move this right after ring is pinned? */
lr_context_update(rq);
 
q_ret = guc_add_workqueue_item(client, rq);
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
 
client->submissions[ring_id] += 1;
client->submissions[engine_id] += 1;
if (q_ret) {
client->q_fail += 1;
client->retcode = q_ret;
608,8 → 586,8
} else {
client->retcode = 0;
}
guc->submissions[ring_id] += 1;
guc->last_seqno[ring_id] = rq->seqno;
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->seqno;
 
return q_ret;
}
832,7 → 810,97
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
 
static void init_guc_policies(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
 
policies->dpc_promote_time = 500000;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
 
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
 
policy->execution_quantum = 1000000;
policy->preemption_time = 500000;
policy->fault_time = 250000;
policy->policy_flags = 0;
}
}
 
policies->is_valid = 1;
}
 
static void guc_create_ads(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring;
struct page *page;
u32 size, i;
 
/* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
sizeof(struct guc_mmio_reg_state) +
GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
 
obj = guc->ads_obj;
if (!obj) {
obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
if (!obj)
return;
 
guc->ads_obj = obj;
}
 
page = i915_gem_object_get_page(obj, 0);
ads = kmap(page);
 
/*
* The GuC requires a "Golden Context" when it reinitialises
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it.
*/
ring = &dev_priv->ring[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr;
 
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
 
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
init_guc_policies(policies);
 
ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
sizeof(struct guc_ads);
 
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
 
for_each_ring(ring, dev_priv, i) {
reg_state->mmio_white_list[ring->guc_id].mmio_start =
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
 
/* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[ring->guc_id].count = 0;
}
 
ads->reg_state_addr = ads->scheduler_policies +
sizeof(struct guc_policies);
 
ads->reg_state_buffer = ads->reg_state_addr +
sizeof(struct guc_mmio_reg_state);
 
kunmap(page);
}
 
/*
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
*/
858,6 → 926,8
 
guc_create_log(guc);
 
guc_create_ads(guc);
 
return 0;
}
 
865,7 → 935,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
 
/* client for execbuf submission */
896,6 → 966,9
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
 
gem_release_guc_obj(dev_priv->guc.ads_obj);
guc->ads_obj = NULL;
 
gem_release_guc_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
 
919,7 → 992,7
if (!i915.enable_guc_submission)
return 0;
 
ctx = dev_priv->ring[RCS].default_context;
ctx = dev_priv->kernel_context;
 
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
945,7 → 1018,7
if (!i915.enable_guc_submission)
return 0;
 
ctx = dev_priv->ring[RCS].default_context;
ctx = dev_priv->kernel_context;
 
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/drivers/video/drm/i915/i915_irq.c
401,6 → 401,7
 
spin_unlock_irq(&dev_priv->irq_lock);
 
synchronize_irq(dev->irq);
}
 
/**
1635,6 → 1636,12
int pipe;
 
spin_lock(&dev_priv->irq_lock);
 
if (!dev_priv->display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
 
for_each_pipe(dev_priv, pipe) {
i915_reg_t reg;
u32 mask, iir_bit = 0;
2172,10 → 2179,6
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
 
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2252,43 → 2255,20
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
static irqreturn_t gen8_irq_handler(int irq, void *arg)
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
u32 iir;
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
if (INTEL_INFO(dev_priv)->gen >= 9)
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
 
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
goto out;
 
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
 
/* Find, clear, then process each source of interrupt */
 
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
 
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
if (tmp & GEN8_DE_MISC_GSE)
if (iir & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
2298,33 → 2278,40
}
 
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
if (tmp) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
u32 tmp_mask;
bool found = false;
u32 hotplug_trigger = 0;
 
if (IS_BROXTON(dev_priv))
hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
 
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
 
if (tmp & aux_mask) {
tmp_mask = GEN8_AUX_CHANNEL_A;
if (INTEL_INFO(dev_priv)->gen >= 9)
tmp_mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
 
if (iir & tmp_mask) {
dp_aux_irq_handler(dev);
found = true;
}
 
if (hotplug_trigger) {
if (IS_BROXTON(dev))
bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
else
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
if (IS_BROXTON(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
found = true;
}
}
 
if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev);
found = true;
}
2337,24 → 2324,29
}
 
for_each_pipe(dev_priv, pipe) {
uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
u32 flip_done, fault_errors;
 
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
 
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (pipe_iir) {
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
continue;
}
 
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
 
if (pipe_iir & GEN8_PIPE_VBLANK &&
if (iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
 
flip_done = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
else
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
 
if (flip_done) {
intel_prepare_page_flip(dev, pipe);
2361,25 → 2353,22
intel_finish_page_flip_plane(dev, pipe);
}
 
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
 
fault_errors = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
 
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
fault_errors);
}
 
if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2389,15 → 2378,15
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
iir = I915_READ(SDEIIR);
if (iir) {
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
 
if (HAS_PCH_SPT(dev_priv))
spt_irq_handler(dev, pch_iir);
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, pch_iir);
cpt_irq_handler(dev, iir);
} else {
/*
* Like on previous PCH there seems to be something
2407,10 → 2396,36
}
}
 
return ret;
}
 
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
irqreturn_t ret;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
return IRQ_NONE;
 
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
 
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
 
/* Find, clear, then process each source of interrupt */
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
ret |= gen8_de_irq_handler(dev_priv, master_ctl);
 
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
 
out:
enable_rpm_wakeref_asserts(dev_priv);
 
return ret;
2481,6 → 2496,8
*/
intel_runtime_pm_get(dev_priv);
 
intel_prepare_reset(dev);
 
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
2487,9 → 2504,9
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
// ret = i915_reset(dev);
ret = i915_reset(dev);
 
// intel_finish_reset(dev);
intel_finish_reset(dev);
 
intel_runtime_pm_put(dev_priv);
 
2632,7 → 2649,7
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
 
// i915_capture_error_state(dev);
i915_capture_error_state(dev, wedged, error_msg);
i915_report_and_clear_eir(dev);
 
if (wedged) {
2924,14 → 2941,44
ring->hangcheck.deadlock = 0;
}
 
static bool subunits_stuck(struct intel_engine_cs *ring)
{
u32 instdone[I915_NUM_INSTDONE_REG];
bool stuck;
int i;
 
if (ring->id != RCS)
return true;
 
i915_get_extra_instdone(ring->dev, instdone);
 
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
* accumulating the undone -> done transitions and only
* consider those as progress.
*/
stuck = true;
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
 
if (tmp != ring->hangcheck.instdone[i])
stuck = false;
 
ring->hangcheck.instdone[i] |= tmp;
}
 
return stuck;
}
 
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
head_stuck(struct intel_engine_cs *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
if (acthd != ring->hangcheck.acthd) {
 
if (acthd != ring->hangcheck.acthd) {
/* Clear subunit states on head movement */
memset(ring->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone));
 
if (acthd > ring->hangcheck.max_acthd) {
ring->hangcheck.max_acthd = acthd;
return HANGCHECK_ACTIVE;
2940,6 → 2987,24
return HANGCHECK_ACTIVE_LOOP;
}
 
if (!subunits_stuck(ring))
return HANGCHECK_ACTIVE;
 
return HANGCHECK_HUNG;
}
 
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_ring_hangcheck_action ha;
u32 tmp;
 
ha = head_stuck(ring, acthd);
if (ha != HANGCHECK_HUNG)
return ha;
 
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
 
3007,6 → 3072,12
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
for_each_ring(ring, dev_priv, i) {
u64 acthd;
u32 seqno;
3081,7 → 3152,11
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
 
/* Clear head and subunit states on seqno movement */
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
 
memset(ring->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone));
}
 
ring->hangcheck.seqno = seqno;
3098,11 → 3173,36
}
}
 
// if (rings_hung)
// return i915_handle_error(dev, true);
if (rings_hung) {
i915_handle_error(dev, true, "Ring hung");
goto out;
}
 
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
i915_queue_hangcheck(dev);
 
out:
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
}
 
void i915_queue_hangcheck(struct drm_device *dev)
{
struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
 
if (!i915.enable_hangcheck)
return;
 
/* Don't continually defer the hangcheck so that it is always run at
* least once after work has been scheduled on any ring. Otherwise,
* we will ignore a hung ring if a second ring is kept busy.
*/
 
queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
}
 
static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3227,23 → 3327,30
unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
 
spin_lock_irq(&dev_priv->irq_lock);
if (pipe_mask & 1 << PIPE_A)
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
dev_priv->de_irq_mask[PIPE_A],
~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
if (pipe_mask & 1 << PIPE_B)
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
if (pipe_mask & 1 << PIPE_C)
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask)
{
enum pipe pipe;
 
spin_lock_irq(&dev_priv->irq_lock);
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
spin_unlock_irq(&dev_priv->irq_lock);
 
/* make sure we're done processing display irqs */
synchronize_irq(dev_priv->dev->irq);
}
 
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4569,6 → 4676,7
{
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
dev_priv->pm.irqs_enabled = false;
synchronize_irq(dev_priv->dev->irq);
}
 
/**
/drivers/video/drm/i915/i915_params.c
22,6 → 22,7
* IN THE SOFTWARE.
*/
 
#include "i915_params.h"
#include "i915_drv.h"
 
struct i915_params i915 __read_mostly = {
35,7 → 36,7
.enable_dc = -1,
.enable_fbc = -1,
.enable_execlists = -1,
.enable_hangcheck = true,
.enable_hangcheck = false,
.enable_ppgtt = -1,
.enable_psr = 0,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
48,7 → 49,6
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 0,
.disable_vtd_wa = 1,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
93,7 → 93,7
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
 
module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
103,7 → 103,7
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
 
module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
128,9 → 128,11
"(-1=auto [default], 0=disabled, 1=enabled)");
 
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
 
module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
 
164,12 → 166,9
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
 
module_param_named(disable_display, i915.disable_display, bool, 0600);
module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
 
module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
 
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
/drivers/video/drm/i915/i915_params.h
0,0 → 1,69
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
 
#ifndef _I915_PARAMS_H_
#define _I915_PARAMS_H_
 
#include <linux/cache.h> /* for __read_mostly */
 
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
char *log_file;
char *cmdline_mode;
};
 
extern struct i915_params i915 __read_mostly;
 
#endif
 
/drivers/video/drm/i915/i915_reg.h
610,16 → 610,17
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
#define IOSF_PORT_BUNIT 0x3
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_BUNIT 0x03
#define IOSF_PORT_PUNIT 0x04
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
#define IOSF_PORT_DPIO_2 0x1a
#define IOSF_PORT_GPIO_NC 0x13
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
#define IOSF_PORT_GPS_CORE 0x48
#define IOSF_PORT_FLISDSI 0x1B
#define IOSF_PORT_DPIO_2 0x1a
#define IOSF_PORT_FLISDSI 0x1b
#define IOSF_PORT_GPIO_SC 0x48
#define IOSF_PORT_GPIO_SUS 0xa8
#define IOSF_PORT_CCU 0xa9
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
 
1635,6 → 1636,9
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
 
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
#define RING_MAX_NONPRIV_SLOTS 12
 
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
 
#if 0
1711,6 → 1715,11
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
 
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
#define CLAIM_ER_OVERFLOW (1 << 16)
#define CLAIM_ER_CTR_MASK 0xffff
 
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
5948,6 → 5957,7
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
#define IVB_PIPE_C_DISABLE (1 << 28)
#define ILK_HDCP_DISABLE (1 << 25)
#define ILK_eDP_A_DISABLE (1 << 24)
#define HSW_CDCLK_LIMIT (1 << 24)
5994,10 → 6004,19
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
 
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
 
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
 
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
 
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
6043,6 → 6062,8
#define HDC_FORCE_NON_COHERENT (1<<4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
 
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
 
/* GEN9 chicken */
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
6773,6 → 6794,16
 
#define VLV_PMWGICZ _MMIO(0x1300a4)
 
#define RC6_LOCATION _MMIO(0xD40)
#define RC6_CTX_IN_DRAM (1 << 0)
#define RC6_CTX_BASE _MMIO(0xD48)
#define RC6_CTX_BASE_MASK 0xFFFFFFF0
#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054)
#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054)
#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054)
#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054)
#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054)
#define IDLE_TIME_MASK 0xFFFFF
#define FORCEWAKE _MMIO(0xA18C)
#define FORCEWAKE_VLV _MMIO(0x1300b0)
#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
6911,6 → 6942,7
#define GEN6_RPDEUC _MMIO(0xA084)
#define GEN6_RPDEUCSW _MMIO(0xA088)
#define GEN6_RC_STATE _MMIO(0xA094)
#define RC6_STATE (1 << 18)
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
7545,6 → 7577,7
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
 
#define DC_STATE_DEBUG _MMIO(0x45520)
#define DC_STATE_DEBUG_MASK_CORES (1<<0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
 
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
8164,4 → 8197,11
#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
 
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/i915_trace.h
4,6 → 4,10
//#include <linux/stringify.h>
#include <linux/types.h>
 
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_ringbuffer.h"
 
#define trace_i915_gem_object_create(x)
#define trace_i915_gem_object_destroy(x)
/drivers/video/drm/i915/intel_atomic.c
98,6 → 98,7
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
 
return &crtc_state->base;
}
309,5 → 310,5
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = false;
state->dpll_set = state->modeset = false;
}
/drivers/video/drm/i915/intel_atomic_plane.c
152,9 → 152,9
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
crtc_state->base.active ? crtc_state->pipe_src_w : 0;
crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
194,8 → 194,14
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
 
intel_plane->commit_plane(plane, intel_state);
if (intel_state->visible)
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc->state),
intel_state);
else
intel_plane->disable_plane(plane, crtc);
}
 
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
/drivers/video/drm/i915/intel_audio.c
571,7 → 571,7
if (IS_G4X(dev)) {
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
/drivers/video/drm/i915/intel_bios.c
31,11 → 31,49
#include "i915_drv.h"
#include "intel_bios.h"
 
/**
* DOC: Video BIOS Table (VBT)
*
* The Video BIOS Table, or VBT, provides platform and board specific
* configuration information to the driver that is not discoverable or available
* through other means. The configuration is mostly related to display
* hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
* the PCI ROM.
*
* The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
* Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
* contain the actual configuration information. The VBT Header, and thus the
* VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
* BDB Header. The data blocks are concatenated after the BDB Header. The data
* blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
* data. (Block 53, the MIPI Sequence Block is an exception.)
*
* The driver parses the VBT during load. The relevant information is stored in
* driver private data for ease of use, and the actual VBT is not read after
* that.
*/
 
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
 
static int panel_type;
 
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
{
/* The MIPI Sequence Block v3+ has a separate size field. */
if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
return *((const u32 *)(block_base + 4));
else
return *((const u16 *)(block_base + 1));
}
 
/* Get BDB block size give a pointer to data after Block ID and Block Size. */
static u32 get_blocksize(const void *block_data)
{
return _get_blocksize(block_data - 3);
}
 
static const void *
find_section(const void *_bdb, int section_id)
{
52,15 → 90,9
/* walk the sections looking for section_id */
while (index + 3 < total) {
current_id = *(base + index);
index++;
current_size = _get_blocksize(base + index);
index += 3;
 
current_size = *((const u16 *)(base + index));
index += 2;
 
/* The MIPI Sequence Block v3+ has a separate size field. */
if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
current_size = *((const u32 *)(base + index + 1));
 
if (index + current_size > total)
return NULL;
 
73,16 → 105,6
return NULL;
}
 
static u16
get_blocksize(const void *p)
{
u16 *block_ptr, block_size;
 
block_ptr = (u16 *)((char *)p - 2);
block_size = *block_ptr;
return block_size;
}
 
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
const struct lvds_dvo_timing *dvo_timing)
675,84 → 697,13
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
 
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
int tmp = *size;
 
if (--tmp < 0)
return NULL;
 
/* goto first element */
data++;
while (1) {
switch (*data) {
case MIPI_SEQ_ELEM_SEND_PKT:
/*
* skip by this element payload size
* skip elem id, command flag and data type
*/
tmp -= 5;
if (tmp < 0)
return NULL;
 
data += 3;
len = *((u16 *)data);
 
tmp -= len;
if (tmp < 0)
return NULL;
 
/* skip by len */
data = data + 2 + len;
break;
case MIPI_SEQ_ELEM_DELAY:
/* skip by elem id, and delay is 4 bytes */
tmp -= 5;
if (tmp < 0)
return NULL;
 
data += 5;
break;
case MIPI_SEQ_ELEM_GPIO:
tmp -= 3;
if (tmp < 0)
return NULL;
 
data += 3;
break;
default:
DRM_ERROR("Unknown element\n");
return NULL;
}
 
/* end of sequence ? */
if (*data == 0)
break;
}
 
/* goto next sequence or end of block byte */
if (--tmp < 0)
return NULL;
 
data++;
 
/* update amount of data left for the sequence block to be parsed */
*size = tmp;
return data;
}
 
static void
parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_mipi_config *start;
const struct bdb_mipi_sequence *sequence;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
u8 *data;
const u8 *seq_data;
int i, panel_id, seq_size;
u16 block_size;
 
/* parse MIPI blocks only if LFP type is MIPI */
if (!dev_priv->vbt.has_mipi)
798,104 → 749,233
 
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
 
/* Check if we have sequence block as well */
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
return;
/* Find the sequence block and size for the given panel. */
static const u8 *
find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
u16 panel_id, u32 *seq_size)
{
u32 total = get_blocksize(sequence);
const u8 *data = &sequence->data[0];
u8 current_id;
u32 current_size;
int header_size = sequence->version >= 3 ? 5 : 3;
int index = 0;
int i;
 
/* skip new block size */
if (sequence->version >= 3)
data += 4;
 
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
if (index + header_size > total) {
DRM_ERROR("Invalid sequence block (header)\n");
return NULL;
}
 
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 3) {
DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
return;
current_id = *(data + index);
if (sequence->version >= 3)
current_size = *((const u32 *)(data + index + 1));
else
current_size = *((const u16 *)(data + index + 1));
 
index += header_size;
 
if (index + current_size > total) {
DRM_ERROR("Invalid sequence block\n");
return NULL;
}
 
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
if (current_id == panel_id) {
*seq_size = current_size;
return data + index;
}
 
block_size = get_blocksize(sequence);
index += current_size;
}
 
DRM_ERROR("Sequence block detected but no valid configuration\n");
 
return NULL;
}
 
static int goto_next_sequence(const u8 *data, int index, int total)
{
u16 len;
 
/* Skip Sequence Byte. */
for (index = index + 1; index < total; index += len) {
u8 operation_byte = *(data + index);
index++;
 
switch (operation_byte) {
case MIPI_SEQ_ELEM_END:
return index;
case MIPI_SEQ_ELEM_SEND_PKT:
if (index + 4 > total)
return 0;
 
len = *((const u16 *)(data + index + 2)) + 4;
break;
case MIPI_SEQ_ELEM_DELAY:
len = 4;
break;
case MIPI_SEQ_ELEM_GPIO:
len = 2;
break;
case MIPI_SEQ_ELEM_I2C:
if (index + 7 > total)
return 0;
len = *(data + index + 6) + 7;
break;
default:
DRM_ERROR("Unknown operation byte\n");
return 0;
}
}
 
return 0;
}
 
static int goto_next_sequence_v3(const u8 *data, int index, int total)
{
int seq_end;
u16 len;
u32 size_of_sequence;
 
/*
* parse the sequence block for individual sequences
* Could skip sequence based on Size of Sequence alone, but also do some
* checking on the structure.
*/
dev_priv->vbt.dsi.seq_version = sequence->version;
if (total < 5) {
DRM_ERROR("Too small sequence size\n");
return 0;
}
 
seq_data = &sequence->data[0];
/* Skip Sequence Byte. */
index++;
 
/*
* sequence block is variable length and hence we need to parse and
* get the sequence data for specific panel id
* Size of Sequence. Excludes the Sequence Byte and the size itself,
* includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
* byte.
*/
for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
panel_id = *seq_data;
seq_size = *((u16 *) (seq_data + 1));
if (panel_id == panel_type)
break;
size_of_sequence = *((const uint32_t *)(data + index));
index += 4;
 
/* skip the sequence including seq header of 3 bytes */
seq_data = seq_data + 3 + seq_size;
if ((seq_data - &sequence->data[0]) > block_size) {
DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
return;
seq_end = index + size_of_sequence;
if (seq_end > total) {
DRM_ERROR("Invalid sequence size\n");
return 0;
}
 
for (; index < total; index += len) {
u8 operation_byte = *(data + index);
index++;
 
if (operation_byte == MIPI_SEQ_ELEM_END) {
if (index != seq_end) {
DRM_ERROR("Invalid element structure\n");
return 0;
}
return index;
}
 
if (i == MAX_MIPI_CONFIGURATIONS) {
DRM_ERROR("Sequence block detected but no valid configuration\n");
len = *(data + index);
index++;
 
/*
* FIXME: Would be nice to check elements like for v1/v2 in
* goto_next_sequence() above.
*/
switch (operation_byte) {
case MIPI_SEQ_ELEM_SEND_PKT:
case MIPI_SEQ_ELEM_DELAY:
case MIPI_SEQ_ELEM_GPIO:
case MIPI_SEQ_ELEM_I2C:
case MIPI_SEQ_ELEM_SPI:
case MIPI_SEQ_ELEM_PMIC:
break;
default:
DRM_ERROR("Unknown operation byte %u\n",
operation_byte);
break;
}
}
 
return 0;
}
 
static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
u8 *data;
int index = 0;
 
/* Only our generic panel driver uses the sequence block. */
if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
 
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
return;
}
 
/* check if found sequence is completely within the sequence block
* just being paranoid */
if (seq_size > block_size) {
DRM_ERROR("Corrupted sequence/size, bailing out\n");
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
sequence->version);
return;
}
 
/* skip the panel id(1 byte) and seq size(2 bytes) */
dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
if (!dev_priv->vbt.dsi.data)
DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
 
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
if (!seq_data)
return;
 
/*
* loop into the sequence data and split into multiple sequneces
* There are only 5 types of sequences as of now
*/
data = dev_priv->vbt.dsi.data;
dev_priv->vbt.dsi.size = seq_size;
data = kmemdup(seq_data, seq_size, GFP_KERNEL);
if (!data)
return;
 
/* two consecutive 0x00 indicate end of all sequences */
while (1) {
int seq_id = *data;
if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
dev_priv->vbt.dsi.sequence[seq_id] = data;
DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
} else {
DRM_ERROR("undefined sequence\n");
/* Parse the sequences, store pointers to each sequence. */
for (;;) {
u8 seq_id = *(data + index);
if (seq_id == MIPI_SEQ_END)
break;
 
if (seq_id >= MIPI_SEQ_MAX) {
DRM_ERROR("Unknown sequence %u\n", seq_id);
goto err;
}
 
/* partial parsing to skip elements */
data = goto_next_sequence(data, &seq_size);
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
 
if (data == NULL) {
DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
if (sequence->version >= 3)
index = goto_next_sequence_v3(data, index, seq_size);
else
index = goto_next_sequence(data, index, seq_size);
if (!index) {
DRM_ERROR("Invalid sequence %u\n", seq_id);
goto err;
}
 
if (*data == 0)
break; /* end of sequence reached */
}
 
DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
dev_priv->vbt.dsi.data = data;
dev_priv->vbt.dsi.size = seq_size;
dev_priv->vbt.dsi.seq_version = sequence->version;
 
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
 
err:
kfree(dev_priv->vbt.dsi.data);
dev_priv->vbt.dsi.data = NULL;
 
/* error during parsing so set all pointers to null
* because of partial parsing */
kfree(data);
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
 
1088,7 → 1168,12
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
if (bdb->version < 195) {
if (bdb->version < 106) {
expected_size = 22;
} else if (bdb->version < 109) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
expected_size = sizeof(struct old_child_dev_config);
} else if (bdb->version == 195) {
expected_size = 37;
1101,18 → 1186,18
bdb->version, expected_size);
}
 
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
 
/* The legacy sized child device config is the minimum we need. */
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
DRM_ERROR("Child device config size %u is too small.\n",
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
p_defs->child_dev_size);
return;
}
 
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
 
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
1285,7 → 1370,7
 
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
* @dev: DRM device
* @dev_priv: i915 device instance
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
1337,7 → 1422,8
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
 
if (bios)
/drivers/video/drm/i915/intel_bios.h
25,25 → 25,43
*
*/
 
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
 
/**
* struct vbt_header - VBT Header structure
* @signature: VBT signature, always starts with "$VBT"
* @version: Version of this structure
* @header_size: Size of this structure
* @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
* @vbt_checksum: Checksum
* @reserved0: Reserved
* @bdb_offset: Offset of &struct bdb_header from beginning of VBT
* @aim_offset: Offsets of add-in data blocks from beginning of VBT
*/
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 vbt_size; /**< in bytes */
u8 signature[20];
u16 version;
u16 header_size;
u16 vbt_size;
u8 vbt_checksum;
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
u32 bdb_offset;
u32 aim_offset[4];
} __packed;
 
/**
* struct bdb_header - BDB Header structure
* @signature: BDB signature "BIOS_DATA_BLOCK"
* @version: Version of the data block definitions
* @header_size: Size of this structure
* @bdb_size: Size of BDB (BDB Header and data blocks)
*/
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
u8 signature[16];
u16 version;
u16 header_size;
u16 bdb_size;
} __packed;
 
/* strictly speaking, this is a "skip" block, but it has interesting info */
936,21 → 954,29
 
/* MIPI Sequnece Block definitions */
enum mipi_seq {
MIPI_SEQ_UNDEFINED = 0,
MIPI_SEQ_END = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
MIPI_SEQ_POWER_ON, /* sequence block v3+ */
MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
MIPI_SEQ_MAX
};
 
enum mipi_seq_element {
MIPI_SEQ_ELEM_UNDEFINED = 0,
MIPI_SEQ_ELEM_END = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
MIPI_SEQ_ELEM_STATUS,
MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
MIPI_SEQ_ELEM_MAX
};
 
965,4 → 991,4
MIPI_GPIO_MAX
};
 
#endif /* _I830_BIOS_H_ */
#endif /* _INTEL_BIOS_H_ */
/drivers/video/drm/i915/intel_crt.c
213,9 → 213,7
 
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
 
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
}
 
static enum drm_mode_status
223,6 → 221,7
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
int max_dotclk = to_i915(dev)->max_dotclk_freq;
 
int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
238,6 → 237,9
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
 
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
 
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
482,11 → 484,10
}
 
static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt)
intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
655,7 → 656,8
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt);
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, &ctx);
/drivers/video/drm/i915/intel_csr.c
44,6 → 44,8
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
 
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
 
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
 
218,7 → 220,7
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
void intel_csr_load_program(struct drm_i915_private *dev_priv)
bool intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
225,12 → 227,12
 
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
return;
return false;
}
 
if (!dev_priv->csr.dmc_payload) {
DRM_ERROR("Tried to program CSR with empty payload\n");
return;
return false;
}
 
fw_size = dev_priv->csr.dmc_fw_size;
243,6 → 245,8
}
 
dev_priv->csr.dc_state = 0;
 
return true;
}
 
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
281,10 → 285,11
 
csr->version = css_header->version;
 
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
371,12 → 376,14
return dmc_payload;
}
 
static void csr_load_work_fn(struct drm_i915_private *dev_priv)
static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
struct intel_csr *csr;
const struct firmware *fw;
int ret;
 
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
csr = &dev_priv->csr;
 
ret = request_firmware(&fw, dev_priv->csr.fw_path,
400,7 → 407,10
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
dev_notice(dev_priv->dev->dev,
"Failed to load DMC firmware"
" [" FIRMWARE_URL "],"
" disabling runtime power management.\n");
}
 
release_firmware(fw);
417,10 → 427,12
{
struct intel_csr *csr = &dev_priv->csr;
 
INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
 
if (!HAS_CSR(dev_priv))
return;
 
if (IS_SKYLAKE(dev_priv))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
437,7 → 449,7
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
csr_load_work_fn(dev_priv);
schedule_work(&dev_priv->csr.work);
}
 
/**
/drivers/video/drm/i915/intel_ddi.c
133,12 → 133,12
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x000000DF, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80005012, 0x000000C0, 0x1 },
};
 
/* Skylake U */
145,13 → 145,13
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000CD, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80005012, 0x000000C0, 0x1 },
{ 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x00000088, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80005012, 0x000000C0, 0x1 },
};
 
/* Skylake Y */
158,13 → 158,13
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80007011, 0x000000CD, 0x0 },
{ 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80005012, 0x000000C0, 0x3 },
{ 0x80007011, 0x000000C0, 0x3 },
{ 0x00000018, 0x00000088, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80005012, 0x000000C0, 0x3 },
};
 
/*
226,11 → 226,11
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00000018, 0x00000098, 0x0 },
{ 0x00004013, 0x00000088, 0x0 },
{ 0x00006012, 0x00000087, 0x0 },
{ 0x80006012, 0x000000CD, 0x1 },
{ 0x00000018, 0x000000DF, 0x0 },
{ 0x00003015, 0x00000087, 0x0 }, /* Default */
{ 0x00003015, 0x000000C7, 0x0 },
{ 0x00000018, 0x000000C7, 0x0 },
{ 0x80003015, 0x000000CD, 0x1 }, /* Default */
{ 0x80003015, 0x000000C0, 0x1 },
{ 0x80000018, 0x000000C0, 0x1 },
};
 
/* Skylake Y */
237,15 → 237,15
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
{ 0x00007011, 0x00000084, 0x0 },
{ 0x80007011, 0x000000CB, 0x3 },
{ 0x00000018, 0x000000A4, 0x0 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00004013, 0x00000080, 0x0 },
{ 0x00006013, 0x000000C7, 0x0 },
{ 0x80006013, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000008A, 0x0 },
{ 0x00003015, 0x000000C7, 0x0 }, /* Default */
{ 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
{ 0x00000018, 0x000000C7, 0x0 },
{ 0x80003015, 0x000000C0, 0x3 }, /* Default */
{ 0x80003015, 0x000000C0, 0x3 },
{ 0x80000018, 0x000000C0, 0x3 },
};
 
struct bxt_ddi_buf_trans {
301,8 → 301,8
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
 
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
enum port port, int type);
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type);
 
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
342,81 → 342,50
return port;
}
 
static bool
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
}
 
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
int *n_entries)
{
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_dp;
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp;
return skl_y_ddi_translations_dp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
return skl_ddi_translations_dp;
}
 
return ddi_translations;
}
 
static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_y_ddi_translations_edp;
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
} else {
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
}
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_u_ddi_translations_edp;
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
return skl_u_ddi_translations_edp;
} else {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
}
} else {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else {
ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
return skl_ddi_translations_edp;
}
}
 
return ddi_translations;
return skl_get_buf_trans_dp(dev_priv, n_entries);
}
 
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_device *dev,
int *n_entries)
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_hdmi;
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
ddi_translations = skl_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
return skl_ddi_translations_hdmi;
}
 
return ddi_translations;
}
 
/*
426,14 → 395,14
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool supports_hdmi)
void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
int hdmi_level;
enum port port;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
440,28 → 409,38
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
 
if (IS_BROXTON(dev)) {
if (!supports_hdmi)
port = intel_ddi_get_encoder_port(encoder);
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
if (IS_BROXTON(dev_priv)) {
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
 
/* Vswing programming for HDMI */
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
}
 
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries);
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev, &n_edp_entries);
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = 1<<31;
} else if (IS_BROADWELL(dev)) {
 
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
port != PORT_A && port != PORT_E &&
n_edp_entries > 9))
n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
 
478,7 → 457,7
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
498,30 → 477,18
hdmi_default_entry = 7;
}
 
switch (port) {
case PORT_A:
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
case PORT_B:
case PORT_C:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
case PORT_D:
if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
} else {
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
}
break;
case PORT_E:
if (ddi_translations_fdi)
case INTEL_OUTPUT_ANALOG:
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
default:
535,7 → 502,7
ddi_translations[i].trans2);
}
 
if (!supports_hdmi)
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
 
/* Choose a good default if VBT is badly populated */
550,37 → 517,6
ddi_translations_hdmi[hdmi_level].trans2);
}
 
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
* mode and port E for FDI.
*/
void intel_prepare_ddi(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
bool visited[I915_MAX_PORTS] = { 0, };
 
if (!HAS_DDI(dev))
return;
 
for_each_intel_encoder(dev, intel_encoder) {
struct intel_digital_port *intel_dig_port;
enum port port;
bool supports_hdmi;
 
if (intel_encoder->type == INTEL_OUTPUT_DSI)
continue;
 
ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
if (visited[port])
continue;
 
supports_hdmi = intel_dig_port &&
intel_dig_port_supports_hdmi(intel_dig_port);
 
intel_prepare_ddi_buffers(dev, port, supports_hdmi);
visited[port] = true;
}
}
 
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
609,8 → 545,14
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
intel_prepare_ddi_buffer(encoder);
}
 
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
1612,8 → 1554,10
}
 
cfgcr1 = cfgcr2 = 0;
} else /* eDP */
} else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
return true;
} else
return false;
 
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
2117,10 → 2061,9
TRANS_CLK_SEL_DISABLED);
}
 
static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
enum port port, int type)
static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
2135,7 → 2078,7
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
2142,7 → 2085,12
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
 
if (WARN_ON(port != PORT_A &&
port != PORT_E && n_entries > 9))
n_entries = 9;
 
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_HDMI) {
2149,7 → 2097,7
if (hdmi_iboost) {
iboost = hdmi_iboost;
} else {
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else {
2174,10 → 2122,9
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
}
 
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
enum port port, int type)
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
uint32_t val;
2292,7 → 2239,7
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = dport->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2302,10 → 2249,10
 
level = translate_signal_level(signal_levels);
 
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
else if (IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
 
return DDI_BUF_TRANS_SELECT(level);
}
2357,13 → 2304,19
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
 
if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
}
 
intel_prepare_ddi_buffer(intel_encoder);
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
2380,17 → 2333,11
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (IS_BROXTON(dev)) {
hdmi_level = dev_priv->vbt.
ddi_port_info[port].hdmi_level_shift;
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI);
}
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
&crtc->config->base.adjusted_mode);
2434,7 → 2381,13
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
 
if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
}
 
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
3318,7 → 3271,34
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp;
int max_lanes;
 
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
switch (port) {
case PORT_A:
max_lanes = 4;
break;
case PORT_E:
max_lanes = 0;
break;
default:
max_lanes = 4;
break;
}
} else {
switch (port) {
case PORT_A:
max_lanes = 2;
break;
case PORT_E:
max_lanes = 2;
break;
default:
max_lanes = 4;
break;
}
}
 
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
3363,9 → 3343,12
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
}
 
intel_dig_port->max_lanes = max_lanes;
 
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
/drivers/video/drm/i915/intel_display.c
85,8 → 85,6
DRM_FORMAT_ARGB8888,
};
 
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
1152,11 → 1150,6
}
}
 
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
 
/* Only for pre-ILK configs */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
1168,7 → 1161,7
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
 
/* XXX: the dsi pll is shared between MIPI DSI ports */
1184,7 → 1177,7
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1208,14 → 1201,13
bool cur_state;
struct intel_dpll_hw_state hw_state;
 
if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state)))
if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
 
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
pll->name, onoff(state), onoff(cur_state));
}
 
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1235,7 → 1227,7
}
I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1250,7 → 1242,7
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1282,7 → 1274,7
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
 
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1340,7 → 1332,7
 
I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
pipe_name(pipe), onoff(state), onoff(cur_state));
}
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1370,7 → 1362,7
 
I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
pipe_name(pipe), onoff(state), onoff(cur_state));
}
 
static void assert_plane(struct drm_i915_private *dev_priv,
1383,7 → 1375,7
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
plane_name(plane), state_string(state), state_string(cur_state));
plane_name(plane), onoff(state), onoff(cur_state));
}
 
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
2156,6 → 2148,17
 
I915_WRITE(reg, val | PIPECONF_ENABLE);
POSTING_READ(reg);
 
/*
* Until the pipe starts DSL will read as 0, which would cause
* an apparent vblank timestamp jump, which messes up also the
* frame count when it's derived from the timestamps. So let's
* wait for the pipe to start properly before we call
* drm_crtc_vblank_on()
*/
if (dev->max_vblank_count == 0 &&
wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
}
 
/**
2217,59 → 2220,66
return false;
}
 
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier, unsigned int plane)
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
unsigned int tile_height;
uint32_t pixel_bytes;
return IS_GEN2(dev_priv) ? 2048 : 4096;
}
 
switch (fb_format_modifier) {
static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
tile_height = 1;
break;
return cpp;
case I915_FORMAT_MOD_X_TILED:
tile_height = IS_GEN2(dev) ? 16 : 8;
break;
if (IS_GEN2(dev_priv))
return 128;
else
return 512;
case I915_FORMAT_MOD_Y_TILED:
tile_height = 32;
break;
if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
case I915_FORMAT_MOD_Yf_TILED:
pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
switch (pixel_bytes) {
default:
switch (cpp) {
case 1:
tile_height = 64;
break;
return 64;
case 2:
case 4:
tile_height = 32;
break;
return 128;
case 8:
tile_height = 16;
break;
case 16:
WARN_ONCE(1,
"128-bit pixels are not supported for display!");
tile_height = 16;
break;
return 256;
default:
MISSING_CASE(cpp);
return cpp;
}
break;
default:
MISSING_CASE(fb_format_modifier);
tile_height = 1;
break;
MISSING_CASE(fb_modifier);
return cpp;
}
}
 
return tile_height;
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp)
{
if (fb_modifier == DRM_FORMAT_MOD_NONE)
return 1;
else
return intel_tile_size(dev_priv) /
intel_tile_width(dev_priv, fb_modifier, cpp);
}
 
unsigned int
intel_fb_align_height(struct drm_device *dev, unsigned int height,
uint32_t pixel_format, uint64_t fb_format_modifier)
uint32_t pixel_format, uint64_t fb_modifier)
{
return ALIGN(height, intel_tile_height(dev, pixel_format,
fb_format_modifier, 0));
unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
 
return ALIGN(height, tile_height);
}
 
static void
2276,8 → 2286,9
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct intel_rotation_info *info = &view->params.rotation_info;
unsigned int tile_height, tile_pitch;
struct drm_i915_private *dev_priv = to_i915(fb->dev);
struct intel_rotation_info *info = &view->params.rotated;
unsigned int tile_size, tile_width, tile_height, cpp;
 
*view = i915_ggtt_view_normal;
 
2295,26 → 2306,28
info->uv_offset = fb->offsets[1];
info->fb_modifier = fb->modifier[0];
 
tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fb->modifier[0], 0);
tile_pitch = PAGE_SIZE / tile_height;
info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
tile_size = intel_tile_size(dev_priv);
 
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
tile_height = tile_size / tile_width;
 
info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
info->size = info->width_pages * info->height_pages * PAGE_SIZE;
info->size = info->width_pages * info->height_pages * tile_size;
 
if (info->pixel_format == DRM_FORMAT_NV12) {
tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fb->modifier[0], 1);
tile_pitch = PAGE_SIZE / tile_height;
info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
tile_height);
info->size_uv = info->width_pages_uv * info->height_pages_uv *
PAGE_SIZE;
cpp = drm_format_plane_cpp(fb->pixel_format, 1);
tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
tile_height = tile_size / tile_width;
 
info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
}
}
 
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
2327,6 → 2340,25
return 0;
}
 
static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
MISSING_CASE(fb_modifier);
return 0;
}
}
 
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
2341,29 → 2373,7
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE:
alignment = intel_linear_alignment(dev_priv);
break;
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else {
/* pin() will align the object as required by fence */
alignment = 0;
}
break;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
"Y tiling bo slipped through, driver bug!\n"))
return -EINVAL;
alignment = 1 * 1024 * 1024;
break;
default:
MISSING_CASE(fb->modifier[0]);
return -EINVAL;
}
alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
 
intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
2441,22 → 2451,27
 
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
unsigned int tiling_mode,
uint64_t fb_modifier,
unsigned int cpp,
unsigned int pitch)
{
if (tiling_mode != I915_TILING_NONE) {
if (fb_modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles;
 
tile_rows = *y / 8;
*y %= 8;
tile_size = intel_tile_size(dev_priv);
tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
tile_height = tile_size / tile_width;
 
tiles = *x / (512/cpp);
*x %= 512/cpp;
tile_rows = *y / tile_height;
*y %= tile_height;
 
return tile_rows * pitch * 8 + tiles * 4096;
tiles = *x / (tile_width/cpp);
*x %= tile_width/cpp;
 
return tile_rows * pitch * tile_height + tiles * tile_size;
} else {
unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
unsigned int offset;
2539,12 → 2554,16
if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
return false;
 
mutex_lock(&dev->struct_mutex);
 
obj = i915_gem_object_create_stolen_for_preallocated(dev,
base_aligned,
base_aligned,
size_aligned);
if (!obj)
if (!obj) {
mutex_unlock(&dev->struct_mutex);
return false;
}
 
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
2557,12 → 2576,12
mode_cmd.modifier[0] = fb->modifier[0];
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
 
mutex_lock(&dev->struct_mutex);
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
 
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2601,6 → 2620,8
struct drm_plane_state *plane_state = primary->state;
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane_state);
struct drm_framebuffer *fb;
 
if (!plane_config->fb)
2662,6 → 2683,15
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
 
intel_state->src.x1 = plane_state->src_x;
intel_state->src.y1 = plane_state->src_y;
intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
intel_state->dst.x1 = plane_state->crtc_x;
intel_state->dst.y1 = plane_state->crtc_y;
intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
 
obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
2673,38 → 2703,23
obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
}
 
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = primary->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int x = plane_state->src.x1 >> 16;
int y = plane_state->src.y1 >> 16;
 
if (!visible || !fb) {
I915_WRITE(reg, 0);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
else
I915_WRITE(DSPADDR(plane), 0);
POSTING_READ(reg);
return;
}
 
obj = intel_fb_obj(fb);
if (WARN_ON(obj == NULL))
return;
 
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
dspcntr |= DISPLAY_PLANE_ENABLE;
2717,13 → 2732,13
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config->pipe_src_h - 1) << 16) |
(intel_crtc->config->pipe_src_w - 1));
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((intel_crtc->config->pipe_src_h - 1) << 16) |
(intel_crtc->config->pipe_src_w - 1));
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
2761,13 → 2776,12
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
linear_offset = y * fb->pitches[0] + x * cpp;
 
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size,
intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0], cpp,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
2774,17 → 2788,17
intel_crtc->dspaddr_offset = linear_offset;
}
 
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
 
x += (intel_crtc->config->pipe_src_w - 1);
y += (intel_crtc->config->pipe_src_h - 1);
x += (crtc_state->pipe_src_w - 1);
y += (crtc_state->pipe_src_h - 1);
 
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config->pipe_src_w - 1) * pixel_size;
(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
(crtc_state->pipe_src_w - 1) * cpp;
}
 
intel_crtc->adjusted_x = x;
2803,37 → 2817,40
POSTING_READ(reg);
}
 
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
static void i9xx_disable_primary_plane(struct drm_plane *primary,
struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
int pixel_size;
 
if (!visible || !fb) {
I915_WRITE(reg, 0);
I915_WRITE(DSPCNTR(plane), 0);
if (INTEL_INFO(dev_priv)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
POSTING_READ(reg);
return;
else
I915_WRITE(DSPADDR(plane), 0);
POSTING_READ(DSPCNTR(plane));
}
 
obj = intel_fb_obj(fb);
if (WARN_ON(obj == NULL))
return;
static void ironlake_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = primary->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int x = plane_state->src.x1 >> 16;
int y = plane_state->src.y1 >> 16;
 
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
dspcntr |= DISPLAY_PLANE_ENABLE;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2868,25 → 2885,24
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
linear_offset = y * fb->pitches[0] + x * cpp;
intel_crtc->dspaddr_offset =
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size,
intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0], cpp,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
 
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += (intel_crtc->config->pipe_src_w - 1);
y += (intel_crtc->config->pipe_src_h - 1);
x += (crtc_state->pipe_src_w - 1);
y += (crtc_state->pipe_src_h - 1);
 
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config->pipe_src_w - 1) * pixel_size;
(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
(crtc_state->pipe_src_w - 1) * cpp;
}
}
 
2907,37 → 2923,15
POSTING_READ(reg);
}
 
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format)
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format)
{
u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
if (fb_modifier == DRM_FORMAT_MOD_NONE) {
return 64;
} else {
int cpp = drm_format_plane_cpp(pixel_format, 0);
 
/*
* The stride is either expressed as a multiple of 64 bytes
* chunks for linear buffers or in number of tiles for tiled
* buffers.
*/
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
return 64;
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen == 2)
return 128;
return 512;
case I915_FORMAT_MOD_Y_TILED:
/* No need to check for old gens and Y tiling since this is
* about the display engine and those will be blocked before
* we get here.
*/
return 128;
case I915_FORMAT_MOD_Yf_TILED:
if (bits_per_pixel == 8)
return 64;
else
return 128;
default:
MISSING_CASE(fb_modifier);
return 64;
return intel_tile_width(dev_priv, fb_modifier, cpp);
}
}
 
2960,7 → 2954,7
offset = vma->node.start;
 
if (plane == 1) {
offset += vma->ggtt_view.params.rotation_info.uv_start_page *
offset += vma->ggtt_view.params.rotated.uv_start_page *
PAGE_SIZE;
}
 
3077,37 → 3071,31
return 0;
}
 
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
static void skylake_update_primary_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = crtc->primary;
bool visible = to_intel_plane_state(plane->state)->visible;
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride_div, stride;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
unsigned int rotation = plane_state->base.rotation;
int x_offset, y_offset;
u32 surf_addr;
struct intel_crtc_state *crtc_state = intel_crtc->config;
struct intel_plane_state *plane_state;
int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
int scaler_id = -1;
int scaler_id = plane_state->scaler_id;
int src_x = plane_state->src.x1 >> 16;
int src_y = plane_state->src.y1 >> 16;
int src_w = drm_rect_width(&plane_state->src) >> 16;
int src_h = drm_rect_height(&plane_state->src) >> 16;
int dst_x = plane_state->dst.x1;
int dst_y = plane_state->dst.y1;
int dst_w = drm_rect_width(&plane_state->dst);
int dst_h = drm_rect_height(&plane_state->dst);
 
plane_state = to_intel_plane_state(plane->state);
 
if (!visible || !fb) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_CTL(pipe, 0));
return;
}
 
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
3115,41 → 3103,27
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
 
rotation = plane->state->rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
 
obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
 
WARN_ON(drm_rect_width(&plane_state->src) == 0);
 
scaler_id = plane_state->scaler_id;
src_x = plane_state->src.x1 >> 16;
src_y = plane_state->src.y1 >> 16;
src_w = drm_rect_width(&plane_state->src) >> 16;
src_h = drm_rect_height(&plane_state->src) >> 16;
dst_x = plane_state->dst.x1;
dst_y = plane_state->dst.y1;
dst_w = drm_rect_width(&plane_state->dst);
dst_h = drm_rect_height(&plane_state->dst);
if (intel_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
WARN_ON(x != src_x || y != src_y);
 
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
x_offset = stride * tile_height - y - src_h;
y_offset = x;
x_offset = stride * tile_height - src_y - src_h;
y_offset = src_x;
plane_size = (src_w - 1) << 16 | (src_h - 1);
} else {
stride = fb->pitches[0] / stride_div;
x_offset = x;
y_offset = y;
x_offset = src_x;
y_offset = src_y;
plane_size = (src_h - 1) << 16 | (src_w - 1);
}
plane_offset = y_offset << 16 | x_offset;
3182,20 → 3156,27
POSTING_READ(PLANE_SURF(pipe, 0));
}
 
static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_crtc(crtc)->pipe;
 
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_SURF(pipe, 0));
}
 
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/* Support for kgdboc is disabled, this needs a major rework. */
DRM_ERROR("legacy panic handler not supported any more.\n");
 
if (dev_priv->fbc.deactivate)
dev_priv->fbc.deactivate(dev_priv);
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
return 0;
return -ENODEV;
}
 
static void intel_complete_page_flips(struct drm_device *dev)
3222,8 → 3203,10
drm_modeset_lock_crtc(crtc, &plane->base);
plane_state = to_intel_plane_state(plane->base.state);
 
if (crtc->state->active && plane_state->base.fb)
plane->commit_plane(&plane->base, plane_state);
if (plane_state->visible)
plane->update_plane(&plane->base,
to_intel_crtc_state(crtc->state),
plane_state);
 
drm_modeset_unlock_crtc(crtc);
}
4809,9 → 4792,6
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
 
if (atomic->wait_vblank)
intel_wait_for_vblank(dev, crtc->pipe);
 
intel_frontbuffer_flip(dev, atomic->fb_bits);
 
crtc->wm.cxsr_allowed = true;
4820,7 → 4800,7
intel_update_watermarks(&crtc->base);
 
if (atomic->update_fbc)
intel_fbc_update(crtc);
intel_fbc_post_update(crtc);
 
if (atomic->post_enable_primary)
intel_post_enable_primary(&crtc->base);
4828,25 → 4808,38
memset(atomic, 0, sizeof(*atomic));
}
 
static void intel_pre_plane_update(struct intel_crtc *crtc)
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
 
if (atomic->disable_fbc)
intel_fbc_deactivate(crtc);
if (atomic->update_fbc)
intel_fbc_pre_update(crtc);
 
if (crtc->atomic.disable_ips)
hsw_disable_ips(crtc);
if (old_pri_state) {
struct intel_plane_state *primary_state =
to_intel_plane_state(primary->state);
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
 
if (atomic->pre_disable_primary)
if (old_primary_state->visible &&
(modeset || !primary_state->visible))
intel_pre_disable_primary(&crtc->base);
}
 
if (pipe_config->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
 
if (old_crtc_state->base.active)
intel_set_memory_cxsr(dev_priv, false);
}
 
4948,8 → 4941,6
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
intel_fbc_enable(intel_crtc);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
5062,8 → 5053,6
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
 
intel_fbc_enable(intel_crtc);
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5144,8 → 5133,6
}
 
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void haswell_crtc_disable(struct drm_crtc *crtc)
5196,8 → 5183,6
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
}
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5320,31 → 5305,37
}
}
 
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned long mask;
enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
enum transcoder transcoder = crtc_state->cpu_transcoder;
 
if (!crtc->state->active)
if (!crtc_state->base.active)
return 0;
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru)
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
mask |= BIT(intel_display_port_power_domain(intel_encoder));
}
 
return mask;
}
 
static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
static unsigned long
modeset_get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5352,7 → 5343,8
unsigned long domains, new_domains, old_domains;
 
old_domains = intel_crtc->enabled_power_domains;
intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
intel_crtc->enabled_power_domains = new_domains =
get_crtc_power_domains(crtc, crtc_state);
 
domains = new_domains & ~old_domains;
 
5371,34 → 5363,6
intel_display_power_put(dev_priv, domain);
}
 
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long put_domains[I915_MAX_PIPES] = {};
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (needs_modeset(crtc->state))
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc);
}
 
if (dev_priv->display.modeset_commit_cdclk) {
unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
 
if (cdclk != dev_priv->cdclk_freq &&
!WARN_ON(!state->allow_modeset))
dev_priv->display.modeset_commit_cdclk(state);
}
 
for (i = 0; i < I915_MAX_PIPES; i++)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
}
 
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
6061,27 → 6025,32
return 144000;
}
 
/* Compute the max pixel clock for new configuration. Uses atomic state if
* that's non-NULL, look at current state otherwise. */
/* Compute the max pixel clock for new configuration. */
static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
int max_pixclk = 0;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
unsigned max_pixclk = 0, i;
enum pipe pipe;
 
for_each_intel_crtc(dev, intel_crtc) {
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
sizeof(intel_state->min_pixclk));
 
if (!crtc_state->base.enable)
continue;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
int pixclk = 0;
 
max_pixclk = max(max_pixclk,
crtc_state->base.adjusted_mode.crtc_clock);
if (crtc_state->enable)
pixclk = crtc_state->adjusted_mode.crtc_clock;
 
intel_state->min_pixclk[i] = pixclk;
}
 
for_each_pipe(dev_priv, pipe)
max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
 
return max_pixclk;
}
 
6090,13 → 6059,18
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev, state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
 
if (max_pixclk < 0)
return max_pixclk;
 
to_intel_atomic_state(state)->cdclk =
intel_state->cdclk = intel_state->dev_cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
 
if (!intel_state->active_crtcs)
intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
 
return 0;
}
 
6105,13 → 6079,18
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev, state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
 
if (max_pixclk < 0)
return max_pixclk;
 
to_intel_atomic_state(state)->cdclk =
intel_state->cdclk = intel_state->dev_cdclk =
broxton_calc_cdclk(dev_priv, max_pixclk);
 
if (!intel_state->active_crtcs)
intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
 
return 0;
}
 
6154,8 → 6133,10
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned req_cdclk = old_intel_state->dev_cdclk;
 
/*
* FIXME: We can end up here with all power domains off, yet
6291,8 → 6272,6
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
intel_fbc_enable(intel_crtc);
}
 
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6355,8 → 6334,6
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_fbc_disable_crtc(intel_crtc);
}
 
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6380,6 → 6357,7
 
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
intel_update_watermarks(crtc);
intel_disable_shared_dpll(intel_crtc);
 
6387,6 → 6365,9
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
 
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_pixclk[intel_crtc->pipe] = 0;
}
 
/*
6395,55 → 6376,16
*/
int intel_display_suspend(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct drm_crtc *crtc;
unsigned crtc_mask = 0;
int ret = 0;
int ret;
 
if (WARN_ON(!ctx))
return 0;
 
lockdep_assert_held(&ctx->ww_ctx);
state = drm_atomic_state_alloc(dev);
if (WARN_ON(!state))
return -ENOMEM;
 
state->acquire_ctx = ctx;
state->allow_modeset = true;
 
for_each_crtc(dev, crtc) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state, crtc);
 
ret = PTR_ERR_OR_ZERO(crtc_state);
state = drm_atomic_helper_suspend(dev);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
goto free;
 
if (!crtc_state->active)
continue;
 
crtc_state->active = false;
crtc_mask |= 1 << drm_crtc_index(crtc);
}
 
if (crtc_mask) {
ret = drm_atomic_commit(state);
 
if (!ret) {
for_each_crtc(dev, crtc)
if (crtc_mask & (1 << drm_crtc_index(crtc)))
crtc->state->active = true;
 
return ret;
}
}
 
free:
if (ret)
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
drm_atomic_state_free(state);
else
dev_priv->modeset_restore_state = state;
return ret;
}
 
7597,26 → 7539,34
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll)
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_state pipe_config = {
.base.crtc = &crtc->base,
.pixel_multiplier = 1,
.dpll = *dpll,
};
struct intel_crtc_state *pipe_config;
 
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return -ENOMEM;
 
pipe_config->base.crtc = &crtc->base;
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
 
if (IS_CHERRYVIEW(dev)) {
chv_compute_dpll(crtc, &pipe_config);
chv_prepare_pll(crtc, &pipe_config);
chv_enable_pll(crtc, &pipe_config);
chv_compute_dpll(crtc, pipe_config);
chv_prepare_pll(crtc, pipe_config);
chv_enable_pll(crtc, pipe_config);
} else {
vlv_compute_dpll(crtc, &pipe_config);
vlv_prepare_pll(crtc, &pipe_config);
vlv_enable_pll(crtc, &pipe_config);
vlv_compute_dpll(crtc, pipe_config);
vlv_prepare_pll(crtc, pipe_config);
vlv_enable_pll(crtc, pipe_config);
}
 
kfree(pipe_config);
 
return 0;
}
 
/**
8039,9 → 7989,6
 
pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
 
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8282,6 → 8229,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
8288,6 → 8236,7
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
bool using_ssc_source = false;
 
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
8314,9 → 8263,23
can_ssc = true;
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
has_panel, has_lvds, has_ck505);
/* Check if any DPLLs are using the SSC source */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
u32 temp = I915_READ(PCH_DPLL(i));
 
if (!(temp & DPLL_VCO_ENABLE))
continue;
 
if ((temp & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
using_ssc_source = true;
break;
}
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
has_panel, has_lvds, has_ck505, using_ssc_source);
 
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
8352,9 → 8315,9
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
} else {
final |= DREF_SSC_SOURCE_DISABLE;
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
} else if (using_ssc_source) {
final |= DREF_SSC_SOURCE_ENABLE;
final |= DREF_SSC1_ENABLE;
}
 
if (final == val)
8400,7 → 8363,7
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
DRM_DEBUG_KMS("Disabling SSC entirely\n");
DRM_DEBUG_KMS("Disabling CPU source output\n");
 
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
8411,6 → 8374,9
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
 
if (!using_ssc_source) {
DRM_DEBUG_KMS("Disabling SSC source\n");
 
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
val |= DREF_SSC_SOURCE_DISABLE;
8422,6 → 8388,7
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
}
 
BUG_ON(val != final);
}
9259,7 → 9226,7
fb->width = ((val >> 0) & 0x1fff) + 1;
 
val = I915_READ(PLANE_STRIDE(pipe, 0));
stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
9683,14 → 9650,14
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
 
intel_prepare_ddi(dev);
}
 
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned int req_cdclk = old_intel_state->dev_cdclk;
 
broxton_set_cdclk(dev, req_cdclk);
}
9698,29 → 9665,38
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
struct intel_crtc *intel_crtc;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = state->dev->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct intel_crtc_state *crtc_state;
int max_pixel_rate = 0;
unsigned max_pixel_rate = 0, i;
enum pipe pipe;
 
for_each_intel_crtc(state->dev, intel_crtc) {
memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
sizeof(intel_state->min_pixclk));
 
for_each_crtc_in_state(state, crtc, cstate, i) {
int pixel_rate;
 
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
if (!crtc_state->base.enable)
crtc_state = to_intel_crtc_state(cstate);
if (!crtc_state->base.enable) {
intel_state->min_pixclk[i] = 0;
continue;
}
 
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
 
max_pixel_rate = max(max_pixel_rate, pixel_rate);
intel_state->min_pixclk[i] = pixel_rate;
}
 
for_each_pipe(dev_priv, pipe)
max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
 
return max_pixel_rate;
}
 
9806,6 → 9782,7
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int max_pixclk = ilk_max_pixel_rate(state);
int cdclk;
 
9828,7 → 9805,9
return -EINVAL;
}
 
to_intel_atomic_state(state)->cdclk = cdclk;
intel_state->cdclk = intel_state->dev_cdclk = cdclk;
if (!intel_state->active_crtcs)
intel_state->dev_cdclk = 337500;
 
return 0;
}
9836,7 → 9815,9
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned req_cdclk = old_intel_state->dev_cdclk;
 
broadwell_set_cdclk(dev, req_cdclk);
}
9844,8 → 9825,13
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
 
if (intel_encoder->type != INTEL_OUTPUT_DSI) {
if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
}
 
crtc->lowfreq_avail = false;
 
10061,7 → 10047,8
return ret;
}
 
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
10068,9 → 10055,9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
 
if (on) {
unsigned int width = intel_crtc->base.cursor->state->crtc_w;
unsigned int height = intel_crtc->base.cursor->state->crtc_h;
if (plane_state && plane_state->visible) {
unsigned int width = plane_state->base.crtc_w;
unsigned int height = plane_state->base.crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
 
switch (stride) {
10123,7 → 10110,8
}
}
 
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
10131,9 → 10119,9
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
 
if (on) {
if (plane_state && plane_state->visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->base.cursor->state->crtc_w) {
switch (plane_state->base.crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
10144,7 → 10132,7
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
MISSING_CASE(plane_state->base.crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
10151,10 → 10139,10
 
if (HAS_DDI(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
}
 
if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
cntl |= CURSOR_ROTATE_180;
}
 
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
10171,29 → 10159,20
 
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
void intel_crtc_update_cursor(struct drm_crtc *crtc,
bool on)
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_plane_state *cursor_state = crtc->cursor->state;
int x = cursor_state->crtc_x;
int y = cursor_state->crtc_y;
u32 base = 0, pos = 0;
u32 base = intel_crtc->cursor_addr;
u32 pos = 0;
 
base = intel_crtc->cursor_addr;
if (plane_state) {
int x = plane_state->base.crtc_x;
int y = plane_state->base.crtc_y;
 
if (x >= intel_crtc->config->pipe_src_w)
on = false;
 
if (y >= intel_crtc->config->pipe_src_h)
on = false;
 
if (x < 0) {
if (x + cursor_state->crtc_w <= 0)
on = false;
 
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
}
10200,27 → 10179,25
pos |= x << CURSOR_X_SHIFT;
 
if (y < 0) {
if (y + cursor_state->crtc_h <= 0)
on = false;
 
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
 
I915_WRITE(CURPOS(pipe), pos);
 
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
base += (cursor_state->crtc_h *
cursor_state->crtc_w - 1) * 4;
plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
}
 
I915_WRITE(CURPOS(pipe), pos);
 
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base, on);
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, on);
i9xx_update_cursor(crtc, base, plane_state);
}
 
static bool cursor_size_ok(struct drm_device *dev,
10388,6 → 10365,7
if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
 
drm_framebuffer_reference(fb);
return fb;
#else
return NULL;
10443,7 → 10421,7
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret, i = -1;
10452,6 → 10430,8
connector->base.id, connector->name,
encoder->base.id, encoder->name);
 
old->restore_state = NULL;
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
10468,24 → 10448,15
*/
 
/* See if we already have a CRTC for this connector */
if (encoder->crtc) {
crtc = encoder->crtc;
if (connector->state->crtc) {
crtc = connector->state->crtc;
 
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail;
 
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
 
/* Make sure the crtc and connector are running */
if (connector->dpms != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
 
return true;
goto found;
}
 
/* Find an unused one (if possible) */
10493,8 → 10464,15
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
if (possible_crtc->state->enable)
 
ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
if (ret)
goto fail;
 
if (possible_crtc->state->enable) {
drm_modeset_unlock(&possible_crtc->mutex);
continue;
}
 
crtc = possible_crtc;
break;
10508,23 → 10486,22
goto fail;
}
 
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail;
found:
intel_crtc = to_intel_crtc(crtc);
 
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail;
 
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
 
state = drm_atomic_state_alloc(dev);
if (!state)
return false;
restore_state = drm_atomic_state_alloc(dev);
if (!state || !restore_state) {
ret = -ENOMEM;
goto fail;
}
 
state->acquire_ctx = ctx;
restore_state->acquire_ctx = ctx;
 
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
10532,8 → 10509,9
goto fail;
}
 
connector_state->crtc = crtc;
connector_state->best_encoder = &intel_encoder->base;
ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
if (ret)
goto fail;
 
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state)) {
10557,7 → 10535,6
if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
10569,16 → 10546,30
if (ret)
goto fail;
 
drm_mode_copy(&crtc_state->base.mode, mode);
drm_framebuffer_unreference(fb);
 
if (drm_atomic_commit(state)) {
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
 
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
if (ret) {
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
goto fail;
}
 
ret = drm_atomic_commit(state);
if (ret) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
goto fail;
}
crtc->primary->crtc = crtc;
 
old->restore_state = restore_state;
 
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
10585,7 → 10576,8
 
fail:
drm_atomic_state_free(state);
state = NULL;
drm_atomic_state_free(restore_state);
restore_state = state = NULL;
 
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
10599,15 → 10591,10
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_atomic_state *state;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
struct drm_atomic_state *state = old->restore_state;
int ret;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10614,53 → 10601,16
connector->base.id, connector->name,
encoder->base.id, encoder->name);
 
if (old->load_detect_temp) {
state = drm_atomic_state_alloc(dev);
if (!state)
goto fail;
return;
 
state->acquire_ctx = ctx;
 
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state))
goto fail;
 
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
goto fail;
 
connector_state->best_encoder = NULL;
connector_state->crtc = NULL;
 
crtc_state->base.enable = crtc_state->base.active = false;
 
ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
0, 0);
if (ret)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret)
goto fail;
 
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
drm_framebuffer_unreference(old->release_fb);
if (ret) {
DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_free(state);
}
 
return;
}
 
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
 
return;
fail:
DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
drm_atomic_state_free(state);
}
 
static int i9xx_pll_refclk(struct drm_device *dev,
const struct intel_crtc_state *pipe_config)
{
10813,7 → 10763,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_state pipe_config;
struct intel_crtc_state *pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
10824,6 → 10774,12
if (!mode)
return NULL;
 
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config) {
kfree(mode);
return NULL;
}
 
/*
* Construct a pipe_config sufficient for getting the clock info
* back out of crtc_clock_get.
10831,14 → 10787,14
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
pipe_config.cpu_transcoder = (enum transcoder) pipe;
pipe_config.pixel_multiplier = 1;
pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
pipe_config->cpu_transcoder = (enum transcoder) pipe;
pipe_config->pixel_multiplier = 1;
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
i9xx_crtc_clock_get(intel_crtc, pipe_config);
 
mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
10850,6 → 10806,8
 
drm_mode_set_name(mode);
 
kfree(pipe_config);
 
return mode;
}
 
10894,7 → 10852,7
spin_unlock_irq(&dev->event_lock);
 
if (work) {
// cancel_work_sync(&work->work);
cancel_work_sync(&work->work);
kfree(work);
}
 
10920,6 → 10878,7
mutex_unlock(&dev->struct_mutex);
 
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
intel_fbc_post_update(crtc);
drm_framebuffer_unreference(work->old_fb);
 
BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
11001,6 → 10960,12
return true;
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
 
/*
* A DSPSURFLIVE check isn't enough in case the mmio and CS flips
* used the same base address. In that case the mmio flip might
* have completed, but the CS hasn't even executed the flip yet.
11354,12 → 11319,11
*/
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
} else {
stride = fb->pitches[0] /
intel_fb_stride_alignment(dev, fb->modifier[0],
intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
}
 
11636,6 → 11600,7
 
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
intel_fbc_pre_update(intel_crtc);
 
work->pending_flip_obj = obj;
 
11695,10 → 11660,12
obj->last_write_req);
} else {
if (!request) {
ret = i915_gem_request_alloc(ring, ring->default_context, &request);
if (ret)
request = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
}
}
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
11718,7 → 11685,6
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
 
intel_fbc_deactivate(intel_crtc);
intel_frontbuffer_flip_prepare(dev,
to_intel_plane(primary)->frontbuffer_bit);
 
11729,7 → 11695,7
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
if (request)
if (!IS_ERR_OR_NULL(request))
i915_gem_request_cancel(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
11840,11 → 11806,9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
int idx = intel_crtc->base.base.id, ret;
int i = drm_plane_index(plane);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
11866,12 → 11830,20
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
 
if (!is_crtc_enabled && WARN_ON(visible))
visible = false;
/*
* Visibility is calculated as if the crtc was on, but
* after scaler setup everything depends on it being off
* when the crtc isn't active.
*/
if (!is_crtc_enabled)
to_intel_plane_state(plane_state)->visible = visible = false;
 
if (!was_visible && !visible)
return 0;
 
if (fb != old_plane_state->base.fb)
pipe_config->fb_changed = true;
 
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
 
11892,11 → 11864,8
pipe_config->update_wm_post = true;
 
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
if (is_crtc_enabled)
intel_crtc->atomic.wait_vblank = true;
if (plane->type != DRM_PLANE_TYPE_CURSOR)
pipe_config->disable_cxsr = true;
}
} else if (intel_wm_need_update(plane, plane_state)) {
/* FIXME bollocks */
pipe_config->update_wm_pre = true;
11909,49 → 11878,9
 
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
intel_crtc->atomic.pre_disable_primary = turn_off;
intel_crtc->atomic.post_enable_primary = turn_on;
intel_crtc->atomic.update_fbc = true;
 
if (turn_off) {
/*
* FIXME: Actually if we will still have any other
* plane enabled on the pipe we could let IPS enabled
* still, but for now lets consider that when we make
* primary invisible by setting DSPCNTR to 0 on
* update_primary_plane function IPS needs to be
* disable.
*/
intel_crtc->atomic.disable_ips = true;
 
intel_crtc->atomic.disable_fbc = true;
}
 
/*
* FBC does not work on some platforms for rotated
* planes, so disable it when rotation is not 0 and
* update it when rotation is set back to 0.
*
* FIXME: This is redundant with the fbc update done in
* the primary plane enable function except that that
* one is done too late. We eventually need to unify
* this.
*/
 
if (visible &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.crtc == intel_crtc &&
plane_state->rotation != BIT(DRM_ROTATE_0))
intel_crtc->atomic.disable_fbc = true;
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (turn_on && IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
 
intel_crtc->atomic.update_fbc |= visible || mode_changed;
break;
case DRM_PLANE_TYPE_CURSOR:
break;
11964,13 → 11893,8
*/
if (IS_IVYBRIDGE(dev) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state)) {
to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
} else if (turn_off && !mode_changed) {
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_sprite_watermarks |=
1 << i;
}
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
 
break;
}
12572,19 → 12496,22
 
BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
 
if (m > m2) {
while (m > m2) {
if (n > n2) {
while (n > n2) {
m2 <<= 1;
n2 <<= 1;
}
} else if (m < m2) {
while (m < m2) {
} else if (n < n2) {
while (n < n2) {
m <<= 1;
n <<= 1;
}
}
 
return m == m2 && n == n2;
if (n != n2)
return false;
 
return intel_fuzzy_clock_check(m, m2);
}
 
static bool
13135,8 → 13062,6
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll_config *shared_dpll = NULL;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *intel_crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
13145,21 → 13070,21
return;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
int dpll;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
 
intel_crtc = to_intel_crtc(crtc);
intel_crtc_state = to_intel_crtc_state(crtc_state);
dpll = intel_crtc_state->shared_dpll;
if (!needs_modeset(crtc_state))
continue;
 
if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
 
if (old_dpll == DPLL_ID_PRIVATE)
continue;
 
intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
 
if (!shared_dpll)
shared_dpll = intel_atomic_get_shared_dpll_state(state);
 
shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
}
}
 
13259,9 → 13184,11
 
static int intel_modeset_checks(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = state->dev->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret = 0, i;
 
if (!check_digital_port_conflicts(state)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13268,6 → 13195,16
return -EINVAL;
}
 
intel_state->modeset = true;
intel_state->active_crtcs = dev_priv->active_crtcs;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc_state->active)
intel_state->active_crtcs |= 1 << i;
else
intel_state->active_crtcs &= ~(1 << i);
}
 
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
13276,22 → 13213,22
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
unsigned int cdclk;
 
ret = dev_priv->display.modeset_calc_cdclk(state);
 
cdclk = to_intel_atomic_state(state)->cdclk;
if (!ret && cdclk != dev_priv->cdclk_freq)
if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
ret = intel_modeset_all_pipes(state);
 
if (ret < 0)
return ret;
 
DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
intel_state->cdclk, intel_state->dev_cdclk);
} else
to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
 
intel_modeset_clear_plls(state);
 
if (IS_HASWELL(dev))
if (IS_HASWELL(dev_priv))
return haswell_mode_set_planes_workaround(state);
 
return 0;
13344,6 → 13281,7
static int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
13386,7 → 13324,7
return ret;
 
if (i915.fastboot &&
intel_pipe_config_compare(state->dev,
intel_pipe_config_compare(dev,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
13412,12 → 13350,13
if (ret)
return ret;
} else
intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
intel_state->cdclk = dev_priv->cdclk_freq;
 
ret = drm_atomic_helper_check_planes(state->dev, state);
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
 
intel_fbc_choose_crtc(dev_priv, state);
calc_watermark_data(state);
 
return 0;
13492,6 → 13431,71
return ret;
}
 
static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
struct drm_i915_private *dev_priv,
unsigned crtc_mask)
{
unsigned last_vblank_count[I915_MAX_PIPES];
enum pipe pipe;
int ret;
 
if (!crtc_mask)
return;
 
for_each_pipe(dev_priv, pipe) {
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
if (!((1 << pipe) & crtc_mask))
continue;
 
ret = drm_crtc_vblank_get(crtc);
if (WARN_ON(ret != 0)) {
crtc_mask &= ~(1 << pipe);
continue;
}
 
last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
}
 
for_each_pipe(dev_priv, pipe) {
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
long lret;
 
if (!((1 << pipe) & crtc_mask))
continue;
 
lret = wait_event_timeout(dev->vblank[pipe].queue,
last_vblank_count[pipe] !=
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
 
WARN_ON(!lret);
 
drm_crtc_vblank_put(crtc);
}
}
 
static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
{
/* fb updated, need to unpin old fb */
if (crtc_state->fb_changed)
return true;
 
/* wm changes, need vblank before final wm's */
if (crtc_state->update_wm_post)
return true;
 
/*
* cxsr is re-enabled after vblank.
* This is already handled by crtc_state->update_wm_post,
* but added for clarity.
*/
if (crtc_state->disable_cxsr)
return true;
 
return false;
}
 
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
13512,12 → 13516,14
struct drm_atomic_state *state,
bool async)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret = 0;
int i;
bool any_ms = false;
int ret = 0, i;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
 
ret = intel_atomic_prepare_commit(dev, state, async);
if (ret) {
13528,19 → 13534,37
drm_atomic_helper_swap_state(dev, state);
dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
 
if (intel_state->modeset) {
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
sizeof(intel_state->min_pixclk));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->atomic_cdclk_freq = intel_state->cdclk;
 
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (needs_modeset(crtc->state) ||
to_intel_crtc_state(crtc->state)->update_pipe) {
hw_check = true;
 
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc,
to_intel_crtc_state(crtc->state));
}
 
if (!needs_modeset(crtc->state))
continue;
 
any_ms = true;
intel_pre_plane_update(intel_crtc);
intel_pre_plane_update(to_intel_crtc_state(crtc_state));
 
if (crtc_state->active) {
intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
 
/*
13559,11 → 13583,14
* update the the output configuration. */
intel_modeset_update_crtc_state(state);
 
if (any_ms) {
if (intel_state->modeset) {
intel_shared_dpll_commit(state);
 
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
modeset_update_crtc_power_domains(state);
 
if (dev_priv->display.modeset_commit_cdclk &&
intel_state->dev_cdclk != dev_priv->cdclk_freq)
dev_priv->display.modeset_commit_cdclk(state);
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13570,54 → 13597,66
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
bool update_pipe = !modeset &&
to_intel_crtc_state(crtc->state)->update_pipe;
unsigned long put_domains = 0;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
bool update_pipe = !modeset && pipe_config->update_pipe;
 
if (modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
}
 
if (update_pipe) {
put_domains = modeset_get_crtc_power_domains(crtc);
if (!modeset)
intel_pre_plane_update(to_intel_crtc_state(crtc_state));
 
/* make sure intel_modeset_check_state runs */
any_ms = true;
}
if (crtc->state->active && intel_crtc->atomic.update_fbc)
intel_fbc_enable(intel_crtc);
 
if (!modeset)
intel_pre_plane_update(intel_crtc);
 
if (crtc->state->active &&
(crtc->state->planes_changed || update_pipe))
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
 
if (put_domains)
modeset_put_power_domains(dev_priv, put_domains);
if (pipe_config->base.active && needs_vblank_wait(pipe_config))
crtc_vblank_mask |= 1 << i;
}
 
intel_post_plane_update(intel_crtc);
/* FIXME: add subpixel order */
 
if (modeset)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
if (!state->legacy_cursor_update)
intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
intel_post_plane_update(to_intel_crtc(crtc));
 
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
}
 
/* FIXME: add subpixel order */
if (intel_state->modeset)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
 
drm_atomic_helper_wait_for_vblanks(dev, state);
 
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
 
if (any_ms)
if (hw_check)
intel_modeset_check_state(dev, state);
 
drm_atomic_state_free(state);
 
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
* modesetting, see if an error has been flagged, and if so
* enable debugging for the next modeset - and hope we catch
* the culprit.
*
* XXX note that we assume display power is on at this point.
* This might hold true now but we need to add pm helper to check
* unclaimed only when the hardware is on, as atomic commits
* can happen also when the device is completely off.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
return 0;
}
 
13897,7 → 13936,7
struct drm_i915_private *dev_priv;
int crtc_clock, cdclk;
 
if (!intel_crtc || !crtc_state)
if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
 
dev = intel_crtc->base.dev;
13946,32 → 13985,6
&state->visible);
}
 
static void
intel_commit_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
crtc = crtc ? crtc : plane->crtc;
 
dev_priv->display.update_primary_plane(crtc, fb,
state->src.x1 >> 16,
state->src.y1 >> 16);
}
 
static void
intel_disable_primary_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
}
 
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
14056,8 → 14069,6
primary->plane = pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
primary->disable_plane = intel_disable_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
 
14064,12 → 14075,27
if (INTEL_INFO(dev)->gen >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
 
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
 
primary->update_plane = ironlake_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
} else if (INTEL_INFO(dev)->gen >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
 
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
 
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
}
 
drm_universal_plane_init(dev, &primary->base, 0,
14168,22 → 14194,23
intel_disable_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
intel_crtc_update_cursor(crtc, false);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_crtc->cursor_addr = 0;
intel_crtc_update_cursor(crtc, NULL);
}
 
static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
intel_update_cursor_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
struct drm_crtc *crtc = crtc_state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
 
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
 
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
14192,9 → 14219,7
addr = obj->phys_handle->busaddr;
 
intel_crtc->cursor_addr = addr;
 
if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
intel_crtc_update_cursor(crtc, state);
}
 
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14220,7 → 14245,7
cursor->plane = pipe;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
 
drm_universal_plane_init(dev, &cursor->base, 0,
14667,10 → 14692,12
u32 gen = INTEL_INFO(dev)->gen;
 
if (gen >= 9) {
int cpp = drm_format_plane_cpp(pixel_format, 0);
 
/* "The stride in bytes must not exceed the of the size of 8K
* pixels and 32K bytes."
*/
return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
return min(8192 * cpp, 32768);
} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
return 32*1024;
} else if (gen >= 4) {
14694,6 → 14721,7
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned int aligned_height;
int ret;
u32 pitch_limit, stride_alignment;
14735,7 → 14763,8
return -EINVAL;
}
 
stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
stride_alignment = intel_fb_stride_alignment(dev_priv,
mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14827,7 → 14856,6
 
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
intel_fb->obj->framebuffer_references++;
 
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
14834,7 → 14862,11
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
 
intel_fb->obj->framebuffer_references++;
 
kolibri_framebuffer_init(intel_fb);
 
return 0;
}
 
14898,8 → 14930,6
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.update_primary_plane =
skylake_update_primary_plane;
} else if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
14908,8 → 14938,6
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
14918,8 → 14946,6
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
14927,8 → 14953,6
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
14936,8 → 14960,6
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
}
 
/* Returns the core display clock speed */
15243,12 → 15265,89
 
void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_update_cdclk(dev);
intel_prepare_ddi(dev);
 
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
 
intel_init_clock_gating(dev);
intel_enable_gt_powersave(dev);
}
 
/*
* Calculate what we think the watermarks should be for the state we've read
* out of the hardware and then immediately program those watermarks so that
* we ensure the hardware settings match our internal state.
*
* We can calculate what we think WM's should be by creating a duplicate of the
* current state (which was constructed during hardware readout) and running it
* through the atomic check code to calculate new watermark values in the
* state object.
*/
static void sanitize_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_modeset_acquire_ctx ctx;
int ret;
int i;
 
/* Only supported on platforms that use atomic watermark design */
if (!dev_priv->display.program_watermarks)
return;
 
/*
* We need to hold connection_mutex before calling duplicate_state so
* that the connector loop is protected.
*/
drm_modeset_acquire_init(&ctx, 0);
retry:
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
} else if (WARN_ON(ret)) {
goto fail;
}
 
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (WARN_ON(IS_ERR(state)))
goto fail;
 
ret = intel_atomic_check(dev, state);
if (ret) {
/*
* If we fail here, it means that the hardware appears to be
* programmed in a way that shouldn't be possible, given our
* understanding of watermark requirements. This might mean a
* mistake in the hardware readout code or a mistake in the
* watermark calculations for a given platform. Raise a WARN
* so that this is noticeable.
*
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
WARN(true, "Could not determine valid watermarks for inherited state\n");
goto fail;
}
 
/* Write calculated watermark values back */
to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
 
dev_priv->display.program_watermarks(cs);
}
 
drm_atomic_state_free(state);
fail:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
 
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
15365,6 → 15464,13
*/
intel_find_initial_plane_obj(crtc, &plane_config);
}
 
/*
* Make sure hardware watermarks really match the state we read out.
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
sanitize_watermarks(dev);
}
 
static void intel_enable_pipe_a(struct drm_device *dev)
15421,6 → 15527,17
return false;
}
 
static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_connector *connector;
 
for_each_connector_on_encoder(dev, &encoder->base, connector)
return true;
 
return false;
}
 
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
15495,6 → 15612,7
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
crtc->base.state->connector_mask = 0;
crtc->base.state->encoder_mask = 0;
 
/* Because we only establish the connector -> encoder ->
* crtc links if something is active, this means the
15530,7 → 15648,6
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
bool active = false;
 
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
15538,15 → 15655,7
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
 
for_each_intel_connector(dev, connector) {
if (connector->base.encoder != &encoder->base)
continue;
 
active = true;
break;
}
 
if (active && !has_active_crtc) {
if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
15639,17 → 15748,41
struct intel_connector *connector;
int i;
 
dev_priv->active_crtcs = 0;
 
for_each_intel_crtc(dev, crtc) {
__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
memset(crtc->config, 0, sizeof(*crtc->config));
crtc->config->base.crtc = &crtc->base;
struct intel_crtc_state *crtc_state = crtc->config;
int pixclk = 0;
 
crtc->active = dev_priv->display.get_pipe_config(crtc,
crtc->config);
__drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
crtc_state->base.crtc = &crtc->base;
 
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
crtc_state->base.active = crtc_state->base.enable =
dev_priv->display.get_pipe_config(crtc, crtc_state);
 
crtc->base.enabled = crtc_state->base.enable;
crtc->active = crtc_state->base.active;
 
if (crtc_state->base.active) {
dev_priv->active_crtcs |= 1 << crtc->pipe;
 
if (IS_BROADWELL(dev_priv)) {
pixclk = ilk_pipe_pixel_rate(crtc_state);
 
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (crtc_state->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
} else if (IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv) ||
IS_BROXTON(dev_priv))
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
}
 
dev_priv->min_pixclk[crtc->pipe] = pixclk;
 
readout_plane_state(crtc);
 
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15712,6 → 15845,8
*/
encoder->base.crtc->state->connector_mask |=
1 << drm_connector_index(&connector->base);
encoder->base.crtc->state->encoder_mask |=
1 << drm_encoder_index(&encoder->base);
}
 
} else {
15808,64 → 15943,89
for_each_intel_crtc(dev, crtc) {
unsigned long put_domains;
 
put_domains = modeset_get_crtc_power_domains(&crtc->base);
put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_set_init_power(dev_priv, false);
 
intel_fbc_init_pipe_state(dev_priv);
}
 
void intel_display_resume(struct drm_device *dev)
{
struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
struct intel_connector *conn;
struct intel_plane *plane;
struct drm_crtc *crtc;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
bool setup = false;
 
if (!state)
return;
dev_priv->modeset_restore_state = NULL;
 
state->acquire_ctx = dev->mode_config.acquire_ctx;
/*
* This is a cludge because with real atomic modeset mode_config.mutex
* won't be taken. Unfortunately some probed state like
* audio_codec_enable is still protected by mode_config.mutex, so lock
* it here for now.
*/
mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
 
/* preserve complete old state, including dpll */
intel_atomic_get_shared_dpll_state(state);
retry:
ret = drm_modeset_lock_all_ctx(dev, &ctx);
 
for_each_crtc(dev, crtc) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state, crtc);
/*
* With MST, the number of connectors can change between suspend and
* resume, which means that the state we want to restore might now be
* impossible to use since it'll be pointing to non-existant
* connectors.
*/
if (ret == 0 && state &&
state->num_connector != dev->mode_config.num_connector) {
drm_atomic_state_free(state);
state = NULL;
}
 
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
goto err;
if (ret == 0 && !setup) {
setup = true;
 
/* force a restore */
intel_modeset_setup_hw_state(dev);
i915_redisable_vga(dev);
}
 
if (ret == 0 && state) {
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i;
 
state->acquire_ctx = &ctx;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
* current state. With fast modeset this may not result
* in a modeset when the state is compatible.
*/
crtc_state->mode_changed = true;
}
 
for_each_intel_plane(dev, plane) {
ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
if (ret)
goto err;
ret = drm_atomic_commit(state);
}
 
for_each_intel_connector(dev, conn) {
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
if (ret)
goto err;
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
 
intel_modeset_setup_hw_state(dev);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
 
i915_redisable_vga(dev);
ret = drm_atomic_commit(state);
if (!ret)
return;
 
err:
if (ret) {
DRM_ERROR("Restoring old state failed with %i\n", ret);
drm_atomic_state_free(state);
}
}
 
void intel_modeset_gem_init(struct drm_device *dev)
{
15873,9 → 16033,7
struct drm_i915_gem_object *obj;
int ret;
 
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
 
intel_modeset_init_hw(dev);
 
15943,7 → 16101,7
 
intel_unregister_dsm_handler();
 
intel_fbc_disable(dev_priv);
intel_fbc_global_disable(dev_priv);
 
/* flush any delayed tasks or pending work */
flush_scheduled_work();
15956,9 → 16114,7
 
intel_cleanup_overlay(dev);
 
mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
#endif
}
 
16008,8 → 16164,6
return 0;
}
 
#ifdef CONFIG_DEBUG_FS
 
struct intel_display_error_state {
 
u32 power_well_driver;
16154,7 → 16308,7
for_each_pipe(dev_priv, i) {
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " Power: %s\n",
error->pipe[i].power_domain_on ? "on" : "off");
onoff(error->pipe[i].power_domain_on));
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
 
16182,7 → 16336,7
err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
error->transcoder[i].power_domain_on ? "on" : "off");
onoff(error->transcoder[i].power_domain_on));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16192,25 → 16346,3
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
#endif
 
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
{
struct intel_crtc *crtc;
 
for_each_intel_crtc(dev, crtc) {
struct intel_unpin_work *work;
 
spin_lock_irq(&dev->event_lock);
 
work = crtc->unpin_work;
 
if (work && work->event &&
work->event->base.file_priv == file) {
kfree(work->event);
work->event = NULL;
}
 
spin_unlock_irq(&dev->event_lock);
}
}
/drivers/video/drm/i915/intel_dp.c
156,14 → 156,9
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
 
source_max = 4;
if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
(intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
source_max = 2;
 
source_max = intel_dig_port->max_lanes;
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 
return min(source_max, sink_max);
207,6 → 202,7
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
if (is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
224,7 → 220,7
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
 
if (mode_rate > max_rate)
if (mode_rate > max_rate || target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
 
if (mode->clock < 10000)
339,9 → 335,13
release_cl_override = IS_CHERRYVIEW(dev) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
return;
}
}
 
/*
* Similar magic as in intel_dp_enable_port().
979,7 → 979,10
if (WARN_ON(txsize > 20))
return -E2BIG;
 
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
else
WARN_ON(msg->size);
 
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
1188,7 → 1191,6
static int
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
int ret;
1199,13 → 1201,9
if (!intel_dp->aux.name)
return -ENOMEM;
 
intel_dp->aux.dev = dev->dev;
intel_dp->aux.dev = connector->base.kdev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
DRM_DEBUG_KMS("registering %s bus for %s\n",
intel_dp->aux.name,
connector->base.kdev->kobj.name);
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1214,16 → 1212,6
return ret;
}
 
ret = sysfs_create_link(&connector->base.kdev->kobj,
&intel_dp->aux.ddc.dev.kobj,
intel_dp->aux.ddc.dev.kobj.name);
if (ret < 0) {
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
intel_dp->aux.name, ret);
intel_dp_aux_fini(intel_dp);
return ret;
}
 
return 0;
}
 
1232,9 → 1220,7
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 
if (!intel_connector->mst_port)
sysfs_remove_link(&intel_connector->base.kdev->kobj,
intel_dp->aux.ddc.dev.kobj.name);
intel_dp_aux_fini(intel_dp);
intel_connector_unregister(intel_connector);
}
 
1811,12 → 1797,21
 
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
 
DRM_DEBUG_KMS("Wait for panel power cycle\n");
 
/* take the difference of currrent time and panel power off time
* and then make panel wait for t11_t12 if needed. */
panel_power_on_time = ktime_get();
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
 
/* When we disable the VDD override bit last we have to do the manual
* wait. */
wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
intel_dp->panel_power_cycle_delay);
if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
wait_remaining_ms_from_jiffies(jiffies,
intel_dp->panel_power_cycle_delay - panel_power_off_duration);
 
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
1874,7 → 1869,7
if (!is_edp(intel_dp))
return false;
 
// cancel_delayed_work(&intel_dp->panel_vdd_work);
cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
 
if (edp_have_panel_vdd(intel_dp))
1968,7 → 1963,7
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
if ((pp & POWER_TARGET_ON) == 0)
intel_dp->last_power_cycle = jiffies;
intel_dp->panel_power_off_time = ktime_get();
 
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
2117,7 → 2112,7
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
intel_dp->last_power_cycle = jiffies;
intel_dp->panel_power_off_time = ktime_get();
wait_panel_off(intel_dp);
 
/* We got a reference when we enabled the VDD. */
2242,11 → 2237,6
_intel_edp_backlight_off(intel_dp);
}
 
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
 
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2256,7 → 2246,7
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
 
2266,7 → 2256,7
 
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
4023,7 → 4013,7
} while (--attempts && count);
 
if (attempts == 0) {
DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
ret = -ETIMEDOUT;
}
 
4563,7 → 4553,7
{
if (HAS_PCH_IBX(dev_priv))
return ibx_digital_port_connected(dev_priv, port);
if (HAS_PCH_SPLIT(dev_priv))
else if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
4883,10 → 4873,9
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
intel_dp_aux_fini(intel_dp);
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
4914,7 → 4903,7
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
4947,13 → 4936,15
 
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
 
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(encoder);
 
pps_lock(intel_dp);
 
/*
5025,9 → 5016,6
intel_display_power_get(dev_priv, power_domain);
 
if (long_hpd) {
/* indicate that we need to restart link training */
intel_dp->train_set_valid = false;
 
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
 
5130,7 → 5118,7
 
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
{
intel_dp->last_power_cycle = jiffies;
intel_dp->panel_power_off_time = ktime_get();
intel_dp->last_power_on = jiffies;
intel_dp->last_backlight_off = jiffies;
}
5513,7 → 5501,7
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
 
// cancel_delayed_work_sync(&dev_priv->drrs.work);
cancel_delayed_work_sync(&dev_priv->drrs.work);
}
 
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5566,7 → 5554,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
// cancel_delayed_work(&dev_priv->drrs.work);
cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5611,7 → 5599,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
// cancel_delayed_work(&dev_priv->drrs.work);
cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5847,6 → 5835,11
enum port port = intel_dig_port->port;
int type, ret;
 
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return false;
 
intel_dp->pps_pipe = INVALID_PIPE;
 
/* intel_dp vfuncs */
5979,7 → 5972,7
 
fail:
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
6044,6 → 6037,7
 
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
 
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {
/drivers/video/drm/i915/intel_dp_link_training.c
85,7 → 85,6
intel_dp_reset_link_train(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
if (!intel_dp->train_set_valid)
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
return intel_dp_set_link_train(intel_dp, dp_train_pat);
161,22 → 160,6
break;
}
 
/*
* if we used previously trained voltage and pre-emphasis values
* and we don't get clock recovery, reset link training values
*/
if (intel_dp->train_set_valid) {
DRM_DEBUG_KMS("clock recovery not ok, reset");
/* clear the flag as we are not reusing train set */
intel_dp->train_set_valid = false;
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
continue;
}
 
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
284,7 → 267,6
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
301,7 → 283,6
 
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp,
training_pattern |
322,11 → 303,9
 
intel_dp_set_idle_link_train(intel_dp);
 
if (channel_eq) {
intel_dp->train_set_valid = true;
if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
}
}
 
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
/drivers/video/drm/i915/intel_dp_mst.c
173,6 → 173,8
intel_mst->port = found->port;
 
if (intel_dp->active_mst_links == 0) {
intel_prepare_ddi_buffer(&intel_dig_port->base);
 
intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
 
intel_dp_set_link_params(intel_dp, intel_crtc->config);
347,6 → 349,8
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
354,6 → 358,9
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
 
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
/drivers/video/drm/i915/intel_drv.h
33,6 → 33,7
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
247,7 → 248,18
struct drm_atomic_state base;
 
unsigned int cdclk;
bool dpll_set;
 
/*
* Calculated device cdclk, can be different from cdclk
* only when all crtc's are DPMS off.
*/
unsigned int dev_cdclk;
 
bool dpll_set, modeset;
 
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
 
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
};
369,6 → 381,7
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */
 
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
482,6 → 495,8
 
bool ips_enabled;
 
bool enable_fbc;
 
bool double_wide;
 
bool dp_encoder_is_mst;
532,16 → 547,13
*/
struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */
bool disable_fbc;
bool disable_ips;
bool pre_disable_primary;
 
/* Sleepable operations to perform after commit */
unsigned fb_bits;
bool wait_vblank;
bool post_enable_primary;
 
/* Sleepable operations to perform before and after commit */
bool update_fbc;
bool post_enable_primary;
unsigned update_sprite_watermarks;
};
 
struct intel_crtc {
565,7 → 577,7
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
unsigned long dspaddr_offset;
u32 dspaddr_offset;
int adjusted_x;
int adjusted_y;
 
648,23 → 660,17
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
* the intel_plane_state structure and accessed via drm_plane->state.
* the intel_plane_state structure and accessed via plane_state.
*/
 
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
};
 
struct intel_watermark_params {
699,6 → 705,10
struct intel_hdmi {
i915_reg_t hdmi_reg;
int ddc_bus;
struct {
enum drm_dp_dual_mode_type type;
int max_tmds_clock;
} dp_dual_mode;
bool limited_color_range;
bool color_range_auto;
bool has_hdmi_sink;
766,9 → 776,9
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
ktime_t panel_power_off_time;
 
struct notifier_block edp_notifier;
 
802,8 → 812,6
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
 
bool train_set_valid;
 
/* Displayport compliance testing */
unsigned long compliance_test_type;
unsigned long compliance_test_data;
818,6 → 826,7
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
904,9 → 913,7
};
 
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
int dpms_mode;
struct drm_atomic_state *restore_state;
};
 
static inline struct intel_encoder *
989,6 → 996,8
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
 
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
997,7 → 1006,7
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void intel_prepare_ddi(struct drm_device *dev);
void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
1040,8 → 1049,8
uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
 
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
1125,9 → 1134,8
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
 
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier, unsigned int plane);
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
 
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
1148,7 → 1156,7
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
 
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
 
1166,10 → 1174,10
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
uint64_t fb_modifier,
unsigned int cpp,
unsigned int pitch);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
1206,7 → 1214,6
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
 
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1221,7 → 1228,7
 
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
bool intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
 
/* intel_dp.c */
1324,13 → 1331,16
#endif
 
/* intel_fbc.c */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
void intel_fbc_deactivate(struct intel_crtc *crtc);
void intel_fbc_update(struct intel_crtc *crtc);
void intel_fbc_pre_update(struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc);
void intel_fbc_disable(struct drm_i915_private *dev_priv);
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
1345,6 → 1355,7
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 
/* intel_lvds.c */
1559,6 → 1570,7
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
 
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev,
1617,5 → 1629,6
void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
#define synchronize_irq(x)
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_dsi.c
478,8 → 478,8
 
DRM_DEBUG_KMS("\n");
 
intel_enable_dsi_pll(encoder);
intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
 
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
634,7 → 634,6
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 val;
 
DRM_DEBUG_KMS("\n");
 
642,9 → 641,13
 
intel_dsi_clear_device_ready(encoder);
 
if (!IS_BROXTON(dev_priv)) {
u32 val;
 
val = I915_READ(DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
}
 
drm_panel_unprepare(intel_dsi->panel);
 
709,7 → 712,7
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
u32 pclk = 0;
u32 pclk;
DRM_DEBUG_KMS("\n");
 
pipe_config->has_dsi_encoder = true;
720,12 → 723,7
*/
pipe_config->dpll_hw_state.dpll_md = 0;
 
if (IS_BROXTON(encoder->base.dev))
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
else if (IS_VALLEYVIEW(encoder->base.dev) ||
IS_CHERRYVIEW(encoder->base.dev))
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
 
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;
 
787,10 → 785,9
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = intel_crtc->config->pipe_bpp;
unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
 
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
861,7 → 858,7
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum port port;
unsigned int bpp = intel_crtc->config->pipe_bpp;
unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
 
/drivers/video/drm/i915/intel_dsi.h
34,6 → 34,8
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
 
int dsi_pixel_format_bpp(int pixel_format);
 
struct intel_dsi_host;
 
struct intel_dsi {
126,8 → 128,7
 
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
 
/drivers/video/drm/i915/intel_dsi_panel_vbt.c
234,7 → 234,8
if (!gtable[gpio].init) {
/* program the function */
/* FIXME: remove constant below */
vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
0x2000CC00);
gtable[gpio].init = 1;
}
 
241,7 → 242,7
val = 0x4 | action;
 
/* pull up/down */
vlv_gpio_nc_write(dev_priv, pad, val);
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
mutex_unlock(&dev_priv->sb_lock);
 
out:
248,14 → 249,18
return data;
}
 
static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
{
return data + *(data + 6) + 7;
}
 
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
mipi_exec_delay,
mipi_exec_gpio,
NULL, /* status read; later */
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
};
 
/*
265,108 → 270,115
*/
 
static const char * const seq_name[] = {
"UNDEFINED",
"MIPI_SEQ_ASSERT_RESET",
"MIPI_SEQ_INIT_OTP",
"MIPI_SEQ_DISPLAY_ON",
"MIPI_SEQ_DISPLAY_OFF",
"MIPI_SEQ_DEASSERT_RESET"
[MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
[MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
[MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
[MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
};
 
static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
static const char *sequence_name(enum mipi_seq seq_id)
{
if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
return seq_name[seq_id];
else
return "(unknown)";
}
 
static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
int index;
 
if (!data)
if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
return;
 
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
data = dev_priv->vbt.dsi.sequence[seq_id];
if (!data) {
DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
seq_id, sequence_name(seq_id));
return;
}
 
/* go to the first element of the sequence */
WARN_ON(*data != seq_id);
 
DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
 
/* Skip Sequence Byte. */
data++;
 
/* parse each byte till we reach end of sequence byte - 0x00 */
/* Skip Size of Sequence. */
if (dev_priv->vbt.dsi.seq_version >= 3)
data += 4;
 
while (1) {
index = *data;
mipi_elem_exec = exec_elem[index];
if (!mipi_elem_exec) {
DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
return;
}
u8 operation_byte = *data++;
u8 operation_size = 0;
 
/* goto element payload */
data++;
if (operation_byte == MIPI_SEQ_ELEM_END)
break;
 
/* execute the element specific rotines */
if (operation_byte < ARRAY_SIZE(exec_elem))
mipi_elem_exec = exec_elem[operation_byte];
else
mipi_elem_exec = NULL;
 
/* Size of Operation. */
if (dev_priv->vbt.dsi.seq_version >= 3)
operation_size = *data++;
 
if (mipi_elem_exec) {
data = mipi_elem_exec(intel_dsi, data);
 
/*
* After processing the element, data should point to
* next element or end of sequence
* check if have we reached end of sequence
*/
if (*data == 0x00)
break;
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
DRM_ERROR("Unsupported MIPI operation byte %u\n",
operation_byte);
return;
}
}
}
 
static int vbt_panel_prepare(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_unprepare(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_enable(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_disable(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
428,10 → 440,7
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
 
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
bits_per_pixel = 16;
bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
 
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
685,6 → 694,8
 
/* This is cheating a bit with the cleanup. */
vbt_panel = kzalloc(sizeof(*vbt_panel), GFP_KERNEL);
if (!vbt_panel)
return NULL;
 
vbt_panel->intel_dsi = intel_dsi;
drm_panel_init(&vbt_panel->panel);
/drivers/video/drm/i915/intel_dsi_pll.c
30,15 → 30,7
#include "i915_drv.h"
#include "intel_dsi.h"
 
#define DSI_HSS_PACKET_SIZE 4
#define DSI_HSE_PACKET_SIZE 4
#define DSI_HSA_PACKET_EXTRA_SIZE 6
#define DSI_HBP_PACKET_EXTRA_SIZE 6
#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
 
static int dsi_pixel_format_bpp(int pixel_format)
int dsi_pixel_format_bpp(int pixel_format)
{
int bpp;
 
71,77 → 63,6
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
 
#ifdef DSI_CLK_FROM_RR
 
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
int pixel_format, int video_mode_format,
int lane_count, bool eotp)
{
u32 bpp;
u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
u32 bytes_per_line, bytes_per_frame;
u32 num_frames;
u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
u32 dsi_bit_clock_hz;
u32 dsi_clk;
 
bpp = dsi_pixel_format_bpp(pixel_format);
 
hactive = mode->hdisplay;
vactive = mode->vdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
 
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
 
hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
 
bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
 
/*
* XXX: Need to accurately calculate LP to HS transition timeout and add
* it to bytes_per_line/bytes_per_frame.
*/
 
if (eotp && video_mode_format == VIDEO_MODE_BURST)
bytes_per_line += DSI_EOTP_PACKET_SIZE;
 
bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
vactive * bytes_per_line + vfp * bytes_per_line;
 
if (eotp &&
(video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
bytes_per_frame += DSI_EOTP_PACKET_SIZE;
 
num_frames = drm_mode_vrefresh(mode);
bytes_per_x_frames = num_frames * bytes_per_frame;
 
bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
 
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
dsi_clk = dsi_bit_clock_hz / 1000;
 
if (eotp && video_mode_format == VIDEO_MODE_BURST)
dsi_clk *= 2;
 
return dsi_clk;
}
 
#else
 
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
155,8 → 76,6
return dsi_clk_khz;
}
 
#endif
 
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
322,7 → 241,7
bpp, pipe_bpp);
}
 
u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
384,7 → 303,7
return pclk;
}
 
u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
u32 pclk;
u32 dsi_clk;
419,6 → 338,14
return pclk;
}
 
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
if (IS_BROXTON(encoder->base.dev))
return bxt_dsi_get_pclk(encoder, pipe_bpp);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp);
}
 
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
/drivers/video/drm/i915/intel_fbc.c
43,7 → 43,7
 
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
return dev_priv->fbc.activate != NULL;
return HAS_FBC(dev_priv);
}
 
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
56,6 → 56,11
return INTEL_INFO(dev_priv)->gen < 4;
}
 
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen <= 3;
}
 
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
74,19 → 79,17
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
int *width, int *height)
{
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
int w, h;
 
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
w = drm_rect_height(&plane_state->src) >> 16;
h = drm_rect_width(&plane_state->src) >> 16;
if (intel_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
w = drm_rect_width(&plane_state->src) >> 16;
h = drm_rect_height(&plane_state->src) >> 16;
w = cache->plane.src_w;
h = cache->plane.src_h;
}
 
if (width)
95,18 → 98,17
*height = h;
}
 
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
struct drm_framebuffer *fb)
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
struct intel_fbc_state_cache *cache)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
int lines;
 
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
intel_fbc_get_plane_source_size(cache, NULL, &lines);
if (INTEL_INFO(dev_priv)->gen >= 7)
lines = min(lines, 2048);
 
/* Hardware needs the full buffer stride, not just the active area. */
return lines * fb->pitches[0];
return lines * cache->fb.stride;
}
 
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
113,8 → 115,6
{
u32 fbc_ctl;
 
dev_priv->fbc.active = false;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
130,21 → 130,17
}
}
 
static void i8xx_fbc_activate(struct intel_crtc *crtc)
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
int cfb_pitch;
int i;
u32 fbc_ctl;
 
dev_priv->fbc.active = true;
 
/* Note: fbc.threshold == 1 for i8xx */
cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
cfb_pitch = params->cfb_size / FBC_LL_SIZE;
if (params->fb.stride < cfb_pitch)
cfb_pitch = params->fb.stride;
 
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev_priv))
161,9 → 157,9
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
 
/* enable it... */
173,7 → 169,7
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
fbc_ctl |= params->fb.fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
 
182,23 → 178,19
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_fbc_activate(struct intel_crtc *crtc)
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
 
dev_priv->fbc.active = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
 
I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
208,8 → 200,6
{
u32 dpfc_ctl;
 
dev_priv->fbc.active = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
230,19 → 220,14
POSTING_READ(MSG_FBC_REND_STATE);
}
 
static void ilk_fbc_activate(struct intel_crtc *crtc)
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
unsigned int y_offset;
 
dev_priv->fbc.active = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
threshold++;
 
switch (threshold) {
259,18 → 244,17
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= obj->fence_reg;
dpfc_ctl |= params->fb.fence_reg;
 
y_offset = get_crtc_fence_y_offset(crtc);
I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
}
 
intel_fbc_recompress(dev_priv);
280,8 → 264,6
{
u32 dpfc_ctl;
 
dev_priv->fbc.active = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
295,21 → 277,17
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_fbc_activate(struct intel_crtc *crtc)
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
 
dev_priv->fbc.active = true;
 
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
 
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
threshold++;
 
switch (threshold) {
337,8 → 315,8
ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
HSW_FBCQ_DIS);
}
 
345,12 → 323,52
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 
intel_fbc_recompress(dev_priv);
}
 
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
else
return i8xx_fbc_is_active(dev_priv);
}
 
static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
fbc->active = true;
 
if (INTEL_INFO(dev_priv)->gen >= 7)
gen7_fbc_activate(dev_priv);
else if (INTEL_INFO(dev_priv)->gen >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
else
i8xx_fbc_activate(dev_priv);
}
 
static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
fbc->active = false;
 
if (INTEL_INFO(dev_priv)->gen >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
else
i8xx_fbc_deactivate(dev_priv);
}
 
/**
* intel_fbc_is_active - Is FBC active?
* @dev_priv: i915 device instance
364,25 → 382,25
return dev_priv->fbc.active;
}
 
static void intel_fbc_activate(const struct drm_framebuffer *fb)
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct drm_i915_private *dev_priv = fb->dev->dev_private;
struct intel_crtc *crtc = dev_priv->fbc.crtc;
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
struct intel_crtc *crtc = fbc->crtc;
struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
 
dev_priv->fbc.activate(crtc);
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
pipe_name(crtc->pipe));
 
dev_priv->fbc.fb_id = fb->base.id;
dev_priv->fbc.y = crtc->base.y;
mutex_lock(&fbc->lock);
work->scheduled = false;
mutex_unlock(&fbc->lock);
return;
}
 
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
struct intel_fbc_work *work = &dev_priv->fbc.work;
struct intel_crtc *crtc = dev_priv->fbc.crtc;
int delay_ms = 50;
 
retry:
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
390,16 → 408,18
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
* It is also worth mentioning that since work->scheduled_vblank can be
* updated multiple times by the other threads, hitting the timeout is
* not an error condition. We'll just end up hitting the "goto retry"
* case below.
*/
wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
wait_event_timeout(vblank->queue,
drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
msecs_to_jiffies(50));
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
/* Were we cancelled? */
if (!work->scheduled)
406,128 → 426,81
goto out;
 
/* Were we delayed again while this function was sleeping? */
if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
jiffies)) {
mutex_unlock(&dev_priv->fbc.lock);
if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
mutex_unlock(&fbc->lock);
goto retry;
}
 
if (crtc->base.primary->fb == work->fb)
intel_fbc_activate(work->fb);
intel_fbc_hw_activate(dev_priv);
 
work->scheduled = false;
 
out:
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
drm_crtc_vblank_put(&crtc->base);
}
 
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
dev_priv->fbc.work.scheduled = false;
}
 
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc_work *work = &dev_priv->fbc.work;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!mutex_is_locked(&fbc->lock));
 
/* It is useless to call intel_fbc_cancel_work() in this function since
* we're not releasing fbc.lock, so it won't have an opportunity to grab
* it to discover that it was cancelled. So we just update the expected
* jiffy count. */
work->fb = crtc->base.primary->fb;
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
pipe_name(crtc->pipe));
return;
}
 
/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
* this function since we're not releasing fbc.lock, so it won't have an
* opportunity to grab it to discover that it was cancelled. So we just
* update the expected jiffy count. */
work->scheduled = true;
work->enable_jiffies = jiffies;
work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
drm_crtc_vblank_put(&crtc->base);
 
schedule_work(&work->work);
}
 
static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
struct intel_fbc *fbc = &dev_priv->fbc;
 
intel_fbc_cancel_work(dev_priv);
WARN_ON(!mutex_is_locked(&fbc->lock));
 
if (dev_priv->fbc.active)
dev_priv->fbc.deactivate(dev_priv);
}
/* Calling cancel_work() here won't help due to the fact that the work
* function grabs fbc->lock. Just set scheduled to false so the work
* function can know it was cancelled. */
fbc->work.scheduled = false;
 
/*
* intel_fbc_deactivate - deactivate FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function deactivates FBC if it's associated with the provided CRTC.
*/
void intel_fbc_deactivate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc)
__intel_fbc_deactivate(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
}
 
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
const char *reason)
static bool multiple_pipes_ok(struct intel_crtc *crtc)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return;
 
dev_priv->fbc.no_fbc_reason = reason;
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
}
 
static bool crtc_can_fbc(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_plane *primary = crtc->base.primary;
struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe = crtc->pipe;
 
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
return false;
 
if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
return false;
 
/* Don't even bother tracking anything we don't need. */
if (!no_fbc_on_multiple_pipes(dev_priv))
return true;
}
 
static bool crtc_is_valid(struct intel_crtc *crtc)
{
if (!intel_crtc_active(&crtc->base))
return false;
WARN_ON(!drm_modeset_is_locked(&primary->mutex));
 
if (!to_intel_plane_state(crtc->base.primary->state)->visible)
return false;
if (to_intel_plane_state(primary->state)->visible)
fbc->visible_pipes_mask |= (1 << pipe);
else
fbc->visible_pipes_mask &= ~(1 << pipe);
 
return true;
return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
}
 
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
int n_pipes = 0;
struct drm_crtc *crtc;
 
if (INTEL_INFO(dev_priv)->gen > 4)
return true;
 
for_each_pipe(dev_priv, pipe) {
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->primary->state)->visible)
n_pipes++;
}
 
return (n_pipes < 2);
}
 
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
int size,
581,16 → 554,16
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->state->fb;
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
int size, fb_cpp, ret;
 
WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
 
size = intel_fbc_calculate_cfb_size(crtc, fb);
fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
 
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
599,12 → 572,12
 
}
 
dev_priv->fbc.threshold = ret;
fbc->threshold = ret;
 
if (INTEL_INFO(dev_priv)->gen >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
615,23 → 588,22
if (ret)
goto err_fb;
 
dev_priv->fbc.compressed_llb = compressed_llb;
fbc->compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
dev_priv->mm.stolen_base + fbc->compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
dev_priv->fbc.compressed_fb.size,
dev_priv->fbc.threshold);
fbc->compressed_fb.size, fbc->threshold);
 
return 0;
 
err_fb:
kfree(compressed_llb);
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
639,25 → 611,27
 
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
i915_gem_stolen_remove_node(dev_priv,
&dev_priv->fbc.compressed_fb);
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (dev_priv->fbc.compressed_llb) {
i915_gem_stolen_remove_node(dev_priv,
dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
if (drm_mm_node_allocated(&fbc->compressed_fb))
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
 
if (fbc->compressed_llb) {
i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
kfree(fbc->compressed_llb);
}
}
 
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
__intel_fbc_cleanup_cfb(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
}
 
static bool stride_is_valid(struct drm_i915_private *dev_priv,
681,12 → 655,10
return true;
}
 
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
uint32_t pixel_format)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
switch (fb->pixel_format) {
switch (pixel_format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
return true;
693,7 → 665,7
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
if (IS_GEN2(dev))
if (IS_GEN2(dev_priv))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
713,6 → 685,7
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
 
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
726,7 → 699,8
max_h = 1536;
}
 
intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
&effective_h);
effective_w += crtc->adjusted_x;
effective_h += crtc->adjusted_y;
 
733,80 → 707,97
return effective_w <= max_w && effective_h <= max_h;
}
 
/**
* __intel_fbc_update - activate/deactivate FBC as needed, unlocked
* @crtc: the CRTC that triggered the update
*
* This function completely reevaluates the status of FBC, then activates,
* deactivates or maintains it on the same state.
*/
static void __intel_fbc_update(struct intel_crtc *crtc)
static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
 
if (!multiple_pipes_ok(dev_priv)) {
set_no_fbc_reason(dev_priv, "more than one pipe active");
goto out_disable;
}
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate =
ilk_pipe_pixel_rate(crtc_state);
 
if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
cache->plane.rotation = plane_state->base.rotation;
cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
cache->plane.visible = plane_state->visible;
 
if (!cache->plane.visible)
return;
 
if (!crtc_is_valid(crtc)) {
set_no_fbc_reason(dev_priv, "no output");
goto out_disable;
obj = intel_fb_obj(fb);
 
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = obj->fence_reg;
cache->fb.tiling_mode = obj->tiling_mode;
}
 
fb = crtc->base.primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &crtc->config->base.adjusted_mode;
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
set_no_fbc_reason(dev_priv, "incompatible mode");
goto out_disable;
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
 
if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
(cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
fbc->no_fbc_reason = "incompatible mode";
return false;
}
 
if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
set_no_fbc_reason(dev_priv, "mode too large for compression");
goto out_disable;
fbc->no_fbc_reason = "mode too large for compression";
return false;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
goto out_disable;
if (cache->fb.tiling_mode != I915_TILING_X ||
cache->fb.fence_reg == I915_FENCE_REG_NONE) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
set_no_fbc_reason(dev_priv, "rotation unsupported");
goto out_disable;
cache->plane.rotation != BIT(DRM_ROTATE_0)) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
 
if (!stride_is_valid(dev_priv, fb->pitches[0])) {
set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
goto out_disable;
if (!stride_is_valid(dev_priv, cache->fb.stride)) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
 
if (!pixel_format_is_valid(fb)) {
set_no_fbc_reason(dev_priv, "pixel format is invalid");
goto out_disable;
if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
fbc->no_fbc_reason = "pixel format is invalid";
return false;
}
 
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
ilk_pipe_pixel_rate(crtc->config) >=
dev_priv->cdclk_freq * 95 / 100) {
set_no_fbc_reason(dev_priv, "pixel rate is too big");
goto out_disable;
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
fbc->no_fbc_reason = "pixel rate is too big";
return false;
}
 
/* It is possible for the required CFB size change without a
819,189 → 810,321
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
if (intel_fbc_calculate_cfb_size(crtc, fb) >
dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
set_no_fbc_reason(dev_priv, "CFB requirements changed");
goto out_disable;
if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
fbc->compressed_fb.size * fbc->threshold) {
fbc->no_fbc_reason = "CFB requirements changed";
return false;
}
 
return true;
}
 
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
bool enable_by_default = IS_BROADWELL(dev_priv);
 
if (intel_vgpu_active(dev_priv->dev)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
 
if (i915.enable_fbc < 0 && !enable_by_default) {
fbc->no_fbc_reason = "disabled per chip default";
return false;
}
 
if (!i915.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param";
return false;
}
 
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
fbc->no_fbc_reason = "no enabled pipes can have FBC";
return false;
}
 
if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
fbc->no_fbc_reason = "no enabled planes can have FBC";
return false;
}
 
return true;
}
 
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
/* Since all our fields are integer types, use memset here so the
* comparison function can rely on memcmp because the padding will be
* zero. */
memset(params, 0, sizeof(*params));
 
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
params->fb.pixel_format = cache->fb.pixel_format;
params->fb.stride = cache->fb.stride;
params->fb.fence_reg = cache->fb.fence_reg;
 
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 
params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
}
 
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
struct intel_fbc_reg_params *params2)
{
/* We can use this since intel_fbc_get_reg_params() does a memset. */
return memcmp(params1, params2, sizeof(*params1)) == 0;
}
 
void intel_fbc_pre_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&fbc->lock);
 
if (!multiple_pipes_ok(crtc)) {
fbc->no_fbc_reason = "more than one pipe active";
goto deactivate;
}
 
if (!fbc->enabled || fbc->crtc != crtc)
goto unlock;
 
intel_fbc_update_state_cache(crtc);
 
deactivate:
intel_fbc_deactivate(dev_priv);
unlock:
mutex_unlock(&fbc->lock);
}
 
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_reg_params old_params;
 
WARN_ON(!mutex_is_locked(&fbc->lock));
 
if (!fbc->enabled || fbc->crtc != crtc)
return;
 
if (!intel_fbc_can_activate(crtc)) {
WARN_ON(fbc->active);
return;
}
 
old_params = fbc->params;
intel_fbc_get_reg_params(crtc, &fbc->params);
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.crtc == crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->base.y &&
dev_priv->fbc.active)
if (fbc->active &&
intel_fbc_reg_params_equal(&old_params, &fbc->params))
return;
 
if (intel_fbc_is_active(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("deactivating FBC for update\n");
__intel_fbc_deactivate(dev_priv);
}
 
intel_fbc_deactivate(dev_priv);
intel_fbc_schedule_activation(crtc);
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_is_active(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
__intel_fbc_deactivate(dev_priv);
fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
}
}
 
/*
* intel_fbc_update - activate/deactivate FBC as needed
* @crtc: the CRTC that triggered the update
*
* This function reevaluates the overall state and activates or deactivates FBC.
*/
void intel_fbc_update(struct intel_crtc *crtc)
void intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_update(crtc);
mutex_unlock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
__intel_fbc_post_update(crtc);
mutex_unlock(&fbc->lock);
}
 
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
if (fbc->enabled)
return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
else
return fbc->possible_framebuffer_bits;
}
 
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
unsigned int fbc_bits;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
if (origin == ORIGIN_GTT)
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
 
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
if (fbc->enabled && fbc->busy_bits)
intel_fbc_deactivate(dev_priv);
 
if (dev_priv->fbc.busy_bits)
__intel_fbc_deactivate(dev_priv);
 
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
}
 
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
if (origin == ORIGIN_GTT)
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
fbc->busy_bits &= ~frontbuffer_bits;
 
if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
if (!fbc->busy_bits && fbc->enabled &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
} else {
__intel_fbc_deactivate(dev_priv);
__intel_fbc_update(dev_priv->fbc.crtc);
else
__intel_fbc_post_update(fbc->crtc);
}
 
mutex_unlock(&fbc->lock);
}
 
mutex_unlock(&dev_priv->fbc.lock);
/**
* intel_fbc_choose_crtc - select a CRTC to enable FBC on
* @dev_priv: i915 device instance
* @state: the atomic state structure
*
* This function looks at the proposed state for CRTCs and planes, then chooses
* which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
* true.
*
* Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
* enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
*/
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
bool fbc_crtc_present = false;
int i, j;
 
mutex_lock(&fbc->lock);
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (fbc->crtc == to_intel_crtc(crtc)) {
fbc_crtc_present = true;
break;
}
}
/* This atomic commit doesn't involve the CRTC currently tied to FBC. */
if (!fbc_crtc_present && fbc->crtc != NULL)
goto out;
 
/* Simply choose the first CRTC that is compatible and has a visible
* plane. We could go for fancier schemes such as checking the plane
* size, but this would just affect the few platforms that don't tie FBC
* to pipe or plane A. */
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
 
if (!intel_plane_state->visible)
continue;
 
for_each_crtc_in_state(state, crtc, crtc_state, j) {
struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc_state);
 
if (plane_state->crtc != crtc)
continue;
 
if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
break;
 
intel_crtc_state->enable_fbc = true;
goto out;
}
}
 
out:
mutex_unlock(&fbc->lock);
}
 
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
*
* This function checks if it's possible to enable FBC on the following CRTC,
* then enables it. Notice that it doesn't activate FBC.
* This function checks if the given CRTC was chosen for FBC, then enables it if
* possible. Notice that it doesn't activate FBC. It is valid to call
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
void intel_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
mutex_lock(&fbc->lock);
 
if (dev_priv->fbc.enabled) {
WARN_ON(dev_priv->fbc.crtc == crtc);
goto out;
if (fbc->enabled) {
WARN_ON(fbc->crtc == NULL);
if (fbc->crtc == crtc) {
WARN_ON(!crtc->config->enable_fbc);
WARN_ON(fbc->active);
}
 
WARN_ON(dev_priv->fbc.active);
WARN_ON(dev_priv->fbc.crtc != NULL);
 
if (intel_vgpu_active(dev_priv->dev)) {
set_no_fbc_reason(dev_priv, "VGPU is active");
goto out;
}
 
if (i915.enable_fbc < 0) {
set_no_fbc_reason(dev_priv, "disabled per chip default");
if (!crtc->config->enable_fbc)
goto out;
}
 
if (!i915.enable_fbc) {
set_no_fbc_reason(dev_priv, "disabled per module param");
goto out;
}
WARN_ON(fbc->active);
WARN_ON(fbc->crtc != NULL);
 
if (!crtc_can_fbc(crtc)) {
set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
goto out;
}
 
intel_fbc_update_state_cache(crtc);
if (intel_fbc_alloc_cfb(crtc)) {
set_no_fbc_reason(dev_priv, "not enough stolen memory");
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
 
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
 
dev_priv->fbc.enabled = true;
dev_priv->fbc.crtc = crtc;
fbc->enabled = true;
fbc->crtc = crtc;
out:
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
}
 
/**
1013,61 → 1136,91
*/
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc = dev_priv->fbc.crtc;
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_crtc *crtc = fbc->crtc;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
WARN_ON(!dev_priv->fbc.enabled);
WARN_ON(dev_priv->fbc.active);
assert_pipe_disabled(dev_priv, crtc->pipe);
WARN_ON(!mutex_is_locked(&fbc->lock));
WARN_ON(!fbc->enabled);
WARN_ON(fbc->active);
WARN_ON(crtc->active);
 
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
__intel_fbc_cleanup_cfb(dev_priv);
 
dev_priv->fbc.enabled = false;
dev_priv->fbc.crtc = NULL;
fbc->enabled = false;
fbc->crtc = NULL;
}
 
/**
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
* intel_fbc_disable - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function disables FBC if it's associated with the provided CRTC.
*/
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
void intel_fbc_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc) {
WARN_ON(!dev_priv->fbc.enabled);
WARN_ON(dev_priv->fbc.active);
mutex_lock(&fbc->lock);
if (fbc->crtc == crtc) {
WARN_ON(!fbc->enabled);
WARN_ON(fbc->active);
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
 
cancel_work_sync(&fbc->work.work);
}
 
/**
* intel_fbc_disable - globally disable FBC
* intel_fbc_global_disable - globally disable FBC
* @dev_priv: i915 device instance
*
* This function disables FBC regardless of which CRTC is associated with it.
*/
void intel_fbc_disable(struct drm_i915_private *dev_priv)
void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.enabled)
mutex_lock(&fbc->lock);
if (fbc->enabled)
__intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
mutex_unlock(&fbc->lock);
 
cancel_work_sync(&fbc->work.work);
}
 
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
*
* The FBC code needs to track CRTC visibility since the older platforms can't
* have FBC enabled while multiple pipes are used. This function does the
* initial setup at driver load to make sure FBC is matching the real hardware.
*/
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
 
/* Don't even bother tracking anything if we don't need. */
if (!no_fbc_on_multiple_pipes(dev_priv))
return;
 
for_each_intel_crtc(dev_priv->dev, crtc)
if (intel_crtc_active(&crtc->base) &&
to_intel_plane_state(crtc->base.primary->state)->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
 
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
*
1075,21 → 1228,22
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe;
 
INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
mutex_init(&dev_priv->fbc.lock);
dev_priv->fbc.enabled = false;
dev_priv->fbc.active = false;
dev_priv->fbc.work.scheduled = false;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
fbc->work.scheduled = false;
 
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
fbc->no_fbc_reason = "unsupported by this chipset";
return;
}
 
for_each_pipe(dev_priv, pipe) {
dev_priv->fbc.possible_framebuffer_bits |=
fbc->possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe);
 
if (fbc_on_pipe_a_only(dev_priv))
1096,30 → 1250,13
break;
}
 
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->fbc.is_active = ilk_fbc_is_active;
dev_priv->fbc.activate = gen7_fbc_activate;
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->fbc.is_active = ilk_fbc_is_active;
dev_priv->fbc.activate = ilk_fbc_activate;
dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (IS_GM45(dev_priv)) {
dev_priv->fbc.is_active = g4x_fbc_is_active;
dev_priv->fbc.activate = g4x_fbc_activate;
dev_priv->fbc.deactivate = g4x_fbc_deactivate;
} else {
dev_priv->fbc.is_active = i8xx_fbc_is_active;
dev_priv->fbc.activate = i8xx_fbc_activate;
dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
 
/* This value was pulled out of someone's hat */
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
 
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */
if (dev_priv->fbc.is_active(dev_priv))
dev_priv->fbc.deactivate(dev_priv);
if (intel_fbc_hw_is_active(dev_priv))
intel_fbc_hw_deactivate(dev_priv);
}
/drivers/video/drm/i915/intel_fbdev.c
35,7 → 35,7
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
//#include <linux/vga_switcheroo.h>
#include <linux/vga_switcheroo.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
170,8 → 170,6
 
out:
mutex_unlock(&dev->struct_mutex);
if (!IS_ERR_OR_NULL(fb))
drm_framebuffer_unreference(fb);
return ret;
}
 
392,8 → 390,8
continue;
}
 
encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) {
encoder = connector->state->best_encoder;
if (!encoder || WARN_ON(!connector->state->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
 
406,7 → 404,7
 
num_connectors_enabled++;
 
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
 
/*
* Make sure we're not trying to drive multiple connectors
452,17 → 450,22
* usually contains. But since our current
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly.
*
* This is crtc->mode and not crtc->state->mode for the
* fastboot check to work correctly. crtc_state->mode has
* I915_MODE_FLAG_INHERITED, which we clear to force check
* state.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
modes[i] = &encoder->crtc->mode;
modes[i] = &connector->state->crtc->mode;
}
crtcs[i] = new_crtc;
 
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
connector->state->crtc->base.id,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
682,6 → 685,7
ifbdev->helper.atomic = true;
 
dev_priv->fbdev = ifbdev;
 
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
 
return 0;
/drivers/video/drm/i915/intel_guc.h
43,9 → 43,10
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
uint32_t wq_head;
 
/* GuC submission statistics & status */
uint64_t submissions[I915_NUM_RINGS];
uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
88,6 → 89,8
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
 
struct drm_i915_gem_object *ads_obj;
 
struct drm_i915_gem_object *ctx_pool_obj;
struct ida ctx_ids;
 
103,8 → 106,8
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
 
uint64_t submissions[I915_NUM_RINGS];
uint32_t last_seqno[I915_NUM_RINGS];
uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
};
 
/* intel_guc_loader.c */
122,5 → 125,6
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
int i915_guc_wq_check_space(struct i915_guc_client *client);
 
#endif
/drivers/video/drm/i915/intel_guc_fwif.h
39,10 → 39,18
#define GUC_CTX_PRIORITY_HIGH 1
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
#define GUC_CTX_PRIORITY_NORMAL 3
#define GUC_CTX_PRIORITY_NUM 4
 
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
 
#define GUC_RENDER_ENGINE 0
#define GUC_VIDEO_ENGINE 1
#define GUC_BLITTER_ENGINE 2
#define GUC_VIDEOENHANCE_ENGINE 3
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
 
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
81,11 → 89,14
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
 
#define GUC_CTL_ARAT_HIGH 1
#define GUC_CTL_ARAT_LOW 2
 
#define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_GTTYPE_SHIFT 0
#define GUC_CTL_COREFAMILY_SHIFT 7
 
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
97,9 → 108,12
#define GUC_LOG_ISR_PAGES 3
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
 
#define GUC_CTL_PAGE_FAULT_CONTROL 5
 
#define GUC_CTL_WA 6
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
 
#define GUC_CTL_FEATURE 7
#define GUC_CTL_VCS2_ENABLED (1 << 0)
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
109,6 → 123,7
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
#define GUC_CTL_ENABLE_SLPC (1 << 7)
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
 
#define GUC_CTL_DEBUG 8
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
118,9 → 133,19
/* Verbosity range-check limits, without the shift */
#define GUC_LOG_VERBOSITY_MIN 0
#define GUC_LOG_VERBOSITY_MAX 3
#define GUC_LOG_VERBOSITY_MASK 0x0000000f
#define GUC_LOG_DESTINATION_MASK (3 << 4)
#define GUC_LOG_DISABLED (1 << 6)
#define GUC_PROFILE_ENABLED (1 << 7)
#define GUC_WQ_TRACK_ENABLED (1 << 8)
#define GUC_ADS_ENABLED (1 << 9)
#define GUC_DEBUG_RESERVED (1 << 10)
#define GUC_ADS_ADDR_SHIFT 11
#define GUC_ADS_ADDR_MASK 0xfffff800
 
#define GUC_CTL_RSRVD 9
 
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
 
/**
* DOC: GuC Firmware Layout
267,7 → 292,7
u64 db_trigger_phy;
u16 db_id;
 
struct guc_execlist_context lrc[I915_NUM_RINGS];
struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
 
u8 attribute;
 
299,6 → 324,99
#define GUC_POWER_D2 3
#define GUC_POWER_D3 4
 
/* Scheduling policy settings */
 
/* Reset engine upon preempt failure */
#define POLICY_RESET_ENGINE (1<<0)
/* Preempt to idle on quantum expiry */
#define POLICY_PREEMPT_TO_IDLE (1<<1)
 
#define POLICY_MAX_NUM_WI 15
 
struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum;
u32 reserved1;
 
/* Time to wait for a preemption request to completed before issuing a
* reset. (in micro seconds). */
u32 preemption_time;
 
/* How much time to allow to run after the first fault is observed.
* Then preempt afterwards. (in micro seconds) */
u32 fault_time;
 
u32 policy_flags;
u32 reserved[2];
} __packed;
 
struct guc_policies {
struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
 
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */
u32 dpc_promote_time;
 
/* Must be set to take these new values. */
u32 is_valid;
 
/* Max number of WIs to process per call. A large value may keep CS
* idle. */
u32 max_num_work_items;
 
u32 reserved[19];
} __packed;
 
/* GuC MMIO reg state struct */
 
#define GUC_REGSET_FLAGS_NONE 0x0
#define GUC_REGSET_POWERCYCLE 0x1
#define GUC_REGSET_MASKED 0x2
#define GUC_REGSET_ENGINERESET 0x4
#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
 
#define GUC_REGSET_MAX_REGISTERS 25
#define GUC_MMIO_WHITE_LIST_START 0x24d0
#define GUC_MMIO_WHITE_LIST_MAX 12
#define GUC_S3_SAVE_SPACE_PAGES 10
 
struct guc_mmio_regset {
struct __packed {
u32 offset;
u32 value;
u32 flags;
} registers[GUC_REGSET_MAX_REGISTERS];
 
u32 values_valid;
u32 number_of_registers;
} __packed;
 
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
 
/* MMIO registers that are set as non privileged */
struct __packed {
u32 mmio_start;
u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
u32 count;
} mmio_white_list[GUC_MAX_ENGINES_NUM];
} __packed;
 
/* GuC Additional Data Struct */
 
struct guc_ads {
u32 reg_state_addr;
u32 reg_state_buffer;
u32 golden_context_lrca;
u32 scheduler_policies;
u32 reserved0[3];
u32 eng_state_size[GUC_MAX_ENGINES_NUM];
u32 reserved2[4];
} __packed;
 
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
/drivers/video/drm/i915/intel_guc_loader.c
165,6 → 165,13
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
 
if (guc->ads_obj) {
u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
>> PAGE_SHIFT;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
}
 
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
192,7 → 199,7
* the value matches either of two values representing completion
* of the GuC boot process.
*
* This is used for polling the GuC status in a wait_for_atomic()
* This is used for polling the GuC status in a wait_for()
* loop below.
*/
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
252,7 → 259,7
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
 
/*
* Spin-wait for the DMA to complete & the GuC to start up.
* Wait for the DMA to complete & the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
259,7 → 266,7
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
 
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
I915_READ(DMA_CTRL), status);
438,6 → 445,7
 
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
 
return err;
}
554,10 → 562,12
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
guc_fw->guc_fw_path, err);
 
mutex_lock(&dev->struct_mutex);
obj = guc_fw->guc_fw_obj;
if (obj)
drm_gem_object_unreference(&obj->base);
guc_fw->guc_fw_obj = NULL;
mutex_unlock(&dev->struct_mutex);
 
release_firmware(fw); /* OK even if fw is NULL */
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
624,10 → 634,11
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
mutex_lock(&dev->struct_mutex);
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
 
mutex_lock(&dev->struct_mutex);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
guc_fw->guc_fw_obj = NULL;
/drivers/video/drm/i915/intel_hdmi.c
836,6 → 836,22
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
 
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
 
DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
 
drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
adapter, enable);
}
 
static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
845,6 → 861,8
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
 
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
 
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
1143,6 → 1161,8
}
 
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
 
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
 
static void g4x_disable_hdmi(struct intel_encoder *encoder)
1168,27 → 1188,42
intel_disable_hdmi(encoder);
}
 
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
if (IS_G4X(dev_priv))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
else
return 225000;
}
 
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
 
if (respect_downstream_limits) {
if (hdmi->dp_dual_mode.max_tmds_clock)
max_tmds_clock = min(max_tmds_clock,
hdmi->dp_dual_mode.max_tmds_clock);
if (!hdmi->has_hdmi_sink)
max_tmds_clock = min(max_tmds_clock, 165000);
}
 
return max_tmds_clock;
}
 
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_dvi_limit)
int clock, bool respect_downstream_limits)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
if (clock < 25000)
return MODE_CLOCK_LOW;
if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
return MODE_CLOCK_HIGH;
 
/* BXT DPLL can't generate 223-240 MHz */
1210,11 → 1245,19
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
 
clock = mode->clock;
 
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
 
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
 
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
 
1304,7 → 1347,7
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
1344,10 → 1387,35
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
 
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
 
kfree(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
 
static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
 
if (type == DRM_DP_DUAL_MODE_NONE ||
type == DRM_DP_DUAL_MODE_UNKNOWN)
return;
 
hdmi->dp_dual_mode.type = type;
hdmi->dp_dual_mode.max_tmds_clock =
drm_dp_dual_mode_max_tmds_clock(type, adapter);
 
DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
drm_dp_get_dual_mode_type_name(type),
hdmi->dp_dual_mode.max_tmds_clock);
}
 
static bool
intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{
1363,6 → 1431,8
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
 
intel_hdmi_dp_dual_mode_detect(connector);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
 
2049,6 → 2119,11
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
 
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return;
 
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
2232,6 → 2307,7
intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
 
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
/drivers/video/drm/i915/intel_lrc.c
223,9 → 223,11
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
 
static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj);
 
263,65 → 265,92
return 0;
}
 
static void
logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
(ring->id == VCS || ring->id == VCS2);
 
ring->ctx_desc_template = GEN8_CTX_VALID;
ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(dev))
ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
/* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
 
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
if (ring->disable_lite_restore_wa)
ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
}
 
/**
* intel_execlists_ctx_id() - get the Execlists Context ID
* @ctx_obj: Logical Ring Context backing object.
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
*
* Do not confuse with ctx->id! Unfortunately we have a name overload
* here: the old context ID we pass to userspace as a handler so that
* they can refer to a context, and the new context ID we pass to the
* ELSP so that the GPU can inform us of the context status via
* interrupts.
* @ctx: Context to work on
* @ring: Engine the descriptor will be used with
*
* Return: 20-bits globally unique context ID.
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
* which remains valid until the context is unpinned.
*
* This is what a descriptor looks like, from LSB to MSB:
* bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
* bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
* bits 52-63: reserved, may encode the engine ID (for GuC)
*/
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
static void
intel_lr_context_descriptor_update(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
uint64_t lrca, desc;
 
lrca = ctx->engine[ring->id].lrc_vma->node.start +
LRC_PPHWSP_PN * PAGE_SIZE;
 
/* LRCA is required to be 4K aligned so the more significant 20 bits
* are globally unique */
return lrca >> 12;
desc = ring->ctx_desc_template; /* bits 0-11 */
desc |= lrca; /* bits 12-31 */
desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
 
ctx->engine[ring->id].lrc_desc = desc;
}
 
static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
(ring->id == VCS || ring->id == VCS2);
return ctx->engine[ring->id].lrc_desc;
}
 
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
/**
* intel_execlists_ctx_id() - get the Execlists Context ID
* @ctx: Context to get the ID for
* @ring: Engine to get the ID for
*
* Do not confuse with ctx->id! Unfortunately we have a name overload
* here: the old context ID we pass to userspace as a handler so that
* they can refer to a context, and the new context ID we pass to the
* ELSP so that the GPU can inform us of the context status via
* interrupts.
*
* The context ID is a portion of the context descriptor, so we can
* just extract the required part from the cached descriptor.
*
* Return: 20-bits globally unique context ID.
*/
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
 
WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
 
desc = GEN8_CTX_VALID;
desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(ctx_obj->base.dev))
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
desc |= lrca;
desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
 
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
 
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
if (disable_lite_restore_wa(ring))
desc |= GEN8_CTX_FORCE_RESTORE;
 
return desc;
return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
}
 
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
363,20 → 392,9
{
struct intel_engine_cs *ring = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
 
BUG_ON(!ctx_obj);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_TAIL+1] = rq->tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* True 32b PPGTT with dynamic page allocation: update PDP
390,8 → 408,6
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
 
kunmap_atomic(reg_state);
 
return 0;
}
 
431,8 → 447,7
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
list_add_tail(&req0->execlist_link,
list_move_tail(&req0->execlist_link,
&ring->execlist_retired_req_list);
req0 = cursor;
} else {
478,15 → 493,12
execlist_link);
 
if (head_req != NULL) {
struct drm_i915_gem_object *ctx_obj =
head_req->ctx->engine[ring->id].state;
if (intel_execlists_ctx_id(ctx_obj) == request_id) {
if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
 
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
list_add_tail(&head_req->execlist_link,
list_move_tail(&head_req->execlist_link,
&ring->execlist_retired_req_list);
return true;
}
496,6 → 508,19
return false;
}
 
static void get_context_status(struct intel_engine_cs *ring,
u8 read_pointer,
u32 *status, u32 *context_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
return;
 
*status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
*context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
}
 
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
516,7 → 541,7
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
 
523,10 → 548,10
spin_lock(&ring->execlist_lock);
 
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
 
get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
&status, &status_id);
 
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
 
545,7 → 570,7
}
}
 
if (disable_lite_restore_wa(ring)) {
if (ring->disable_lite_restore_wa) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
556,13 → 581,16
 
spin_unlock(&ring->execlist_lock);
 
WARN(submit_contexts > 2, "More than two context complete events?\n");
if (unlikely(submit_contexts > 2))
DRM_ERROR("More than two context complete events?\n");
 
ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
/* Update the read pointer to the old write pointer. Manual ringbuffer
* management ftw </sarcasm> */
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
_MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
((u32)ring->next_context_status_buffer &
GEN8_CSB_PTR_MASK) << 8));
_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
ring->next_context_status_buffer << 8));
}
 
static int execlists_context_queue(struct drm_i915_gem_request *request)
571,8 → 599,8
struct drm_i915_gem_request *cursor;
int num_elements = 0;
 
if (request->ctx != ring->default_context)
intel_lr_context_pin(request);
if (request->ctx != request->i915->kernel_context)
intel_lr_context_pin(request->ctx, ring);
 
i915_gem_request_reference(request);
 
592,8 → 620,7
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
list_add_tail(&tail_req->execlist_link,
list_move_tail(&tail_req->execlist_link,
&ring->execlist_retired_req_list);
}
}
660,17 → 687,27
 
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
int ret = 0;
 
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
 
if (request->ctx != request->ring->default_context) {
ret = intel_lr_context_pin(request);
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
struct intel_guc *guc = &request->i915->guc;
 
ret = i915_guc_wq_check_space(guc->execbuf_client);
if (ret)
return ret;
}
 
return 0;
if (request->ctx != request->i915->kernel_context)
ret = intel_lr_context_pin(request->ctx, request->ring);
 
return ret;
}
 
static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
724,23 → 761,46
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static void
static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = request->ring;
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
struct intel_engine_cs *engine = request->ring;
 
intel_logical_ring_advance(request->ringbuf);
intel_logical_ring_advance(ringbuf);
request->tail = ringbuf->tail;
 
request->tail = request->ringbuf->tail;
/*
* Here we add two extra NOOPs as padding to avoid
* lite restore of a context with HEAD==TAIL.
*
* Caller must reserve WA_TAIL_DWORDS for us!
*/
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
if (intel_ring_stopped(ring))
return;
if (intel_ring_stopped(engine))
return 0;
 
if (engine->last_context != request->ctx) {
if (engine->last_context)
intel_lr_context_unpin(engine->last_context, engine);
if (request->ctx != request->i915->kernel_context) {
intel_lr_context_pin(request->ctx, engine);
engine->last_context = request->ctx;
} else {
engine->last_context = NULL;
}
}
 
if (dev_priv->guc.execbuf_client)
i915_guc_submit(dev_priv->guc.execbuf_client, request);
else
execlists_context_queue(request);
 
return 0;
}
 
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
967,8 → 1027,9
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
 
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(req);
if (ctx_obj && (ctx != req->i915->kernel_context))
intel_lr_context_unpin(ctx, ring);
 
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
1012,24 → 1073,39
return 0;
}
 
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj,
struct intel_ringbuffer *ringbuf)
static int intel_lr_context_do_pin(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct page *lrc_state_page;
uint32_t *lrc_reg_state;
int ret;
 
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
 
lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
if (WARN_ON(!lrc_state_page)) {
ret = -ENODEV;
goto unpin_ctx_obj;
}
 
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
 
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, ring);
lrc_reg_state = kmap(lrc_state_page);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
 
/* Invalidate GuC TLB. */
1044,39 → 1120,42
return ret;
}
 
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
int ret = 0;
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
if (ctx->engine[engine->id].pin_count++ == 0) {
ret = intel_lr_context_do_pin(ctx, engine);
if (ret)
goto reset_pin_count;
 
i915_gem_context_reference(ctx);
}
return ret;
 
reset_pin_count:
rq->ctx->engine[ring->id].pin_count = 0;
ctx->engine[engine->id].pin_count = 0;
return ret;
}
 
void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
 
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (--rq->ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) {
// kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
ctx->engine[engine->id].lrc_desc = 0;
ctx->engine[engine->id].lrc_reg_state = NULL;
 
i915_gem_context_unreference(ctx);
}
}
}
 
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
1087,7 → 1166,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
 
if (WARN_ON_ONCE(w->count == 0))
if (w->count == 0)
return 0;
 
ring->gpu_caches_dirty = true;
1474,7 → 1553,7
u8 next_context_status_buffer_hw;
 
lrc_setup_hardware_status_page(ring,
ring->default_context->engine[ring->id].state);
dev_priv->kernel_context->engine[ring->id].state);
 
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1493,9 → 1572,11
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
* SKL | ? | ? |
* BXT | ? | ? |
*/
next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
& GEN8_CSB_PTR_MASK);
next_context_status_buffer_hw =
GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
 
/*
* When the CSB registers are reset (also after power-up / gpu reset),
1698,7 → 1779,7
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa;
bool vf_flush_wa = false;
u32 flags = 0;
int ret;
 
1720,14 → 1801,14
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
 
/*
* On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
* control.
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
if (IS_GEN9(ring->dev))
vf_flush_wa = true;
}
 
ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
1791,44 → 1872,71
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
}
 
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
#define WA_TAIL_DWORDS 2
 
static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
{
return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
}
 
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
 
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
ret = intel_logical_ring_begin(request, 8);
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
 
cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
intel_logical_ring_emit(ringbuf, cmd);
intel_logical_ring_emit(ringbuf,
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
hws_seqno_address(request->ring) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(request);
return intel_logical_ring_advance_and_submit(request);
}
 
/*
* Here we add two extra NOOPs as padding to avoid
* lite restore of a context with HEAD==TAIL.
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
 
ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
 
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
 
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
return 0;
return intel_logical_ring_advance_and_submit(request);
}
 
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
1911,12 → 2019,44
ring->status_page.obj = NULL;
}
 
ring->disable_lite_restore_wa = false;
ring->ctx_desc_template = 0;
 
lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
}
 
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
static void
logical_ring_default_vfuncs(struct drm_device *dev,
struct intel_engine_cs *ring)
{
/* Default vfuncs which can be overriden by each engine. */
ring->init_hw = gen8_init_common_ring;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
}
 
static inline void
logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
{
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
 
static int
logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{
struct intel_context *dctx = to_i915(dev)->kernel_context;
int ret;
 
/* Intentionally left blank. */
1933,19 → 2073,18
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
 
logical_ring_init_platform_invariants(ring);
 
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
 
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
ret = intel_lr_context_deferred_alloc(dctx, ring);
if (ret)
goto error;
 
/* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(
ring,
ring->default_context->engine[ring->id].state,
ring->default_context->engine[ring->id].ringbuf);
ret = intel_lr_context_do_pin(dctx, ring);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
1968,14 → 2107,17
 
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
ring->guc_id = GUC_RENDER_ENGINE;
ring->mmio_base = RENDER_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
 
logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
logical_ring_default_vfuncs(dev, ring);
 
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
ring->init_hw = gen9_init_render_ring;
else
1982,18 → 2124,8
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
ring->emit_request = gen8_emit_request_render;
 
ring->dev = dev;
 
2027,25 → 2159,12
 
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
ring->guc_id = GUC_VIDEO_ENGINE;
ring->mmio_base = GEN6_BSD_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
 
return logical_ring_init(dev, ring);
}
2055,22 → 2174,14
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
ring->name = "bds2 ring";
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
ring->guc_id = GUC_VIDEO_ENGINE2;
ring->mmio_base = GEN8_BSD2_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
 
return logical_ring_init(dev, ring);
}
2082,25 → 2193,12
 
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
ring->guc_id = GUC_BLITTER_ENGINE;
ring->mmio_base = BLT_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
 
return logical_ring_init(dev, ring);
}
2112,25 → 2210,12
 
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
ring->mmio_base = VEBOX_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
ring->init_hw = gen8_init_common_ring;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
 
return logical_ring_init(dev, ring);
}
2235,6 → 2320,27
return rpcs;
}
 
static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
{
u32 indirect_ctx_offset;
 
switch (INTEL_INFO(ring->dev)->gen) {
default:
MISSING_CASE(INTEL_INFO(ring->dev)->gen);
/* fall through */
case 9:
indirect_ctx_offset =
GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
break;
case 8:
indirect_ctx_offset =
GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
break;
}
 
return indirect_ctx_offset;
}
 
static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
2278,7 → 2384,8
ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE));
(HAS_RESOURCE_STREAMER(dev) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
/* Ring buffer start address is not known until the buffer is pinned.
2307,7 → 2414,7
(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
 
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
intel_lr_indirect_ctx_offset(ring) << 6;
 
reg_state[CTX_BB_PER_CTX_PTR+1] =
(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2368,26 → 2475,39
{
int i;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
for (i = I915_NUM_RINGS; --i >= 0; ) {
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
if (ctx_obj) {
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
if (!ctx_obj)
continue;
 
if (ctx == ring->default_context) {
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
WARN_ON(ctx->engine[ring->id].pin_count);
 
WARN_ON(ctx->engine[i].pin_count);
intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
}
}
}
 
static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
/**
* intel_lr_context_size() - return the size of the context for an engine
* @ring: which engine to find the context size for
*
* Each engine may require a different amount of space for a context image,
* so when allocating (or copying) an image, this function can be used to
* find the right size for the specific engine.
*
* Return: size (in bytes) of an engine-specific context image
*
* Note: this size includes the HWSP, which is part of the context image
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
 
2455,7 → 2575,7
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ctx->engine[ring->id].state);
 
context_size = round_up(get_lr_context_size(ring), 4096);
context_size = round_up(intel_lr_context_size(ring), 4096);
 
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2481,14 → 2601,13
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
 
if (ctx != ring->default_context && ring->init_context) {
if (ctx != ctx->i915->kernel_context && ring->init_context) {
struct drm_i915_gem_request *req;
 
ret = i915_gem_request_alloc(ring,
ctx, &req);
if (ret) {
DRM_ERROR("ring create req: %d\n",
ret);
req = i915_gem_request_alloc(ring, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}
 
/drivers/video/drm/i915/intel_lrc.h
25,8 → 25,6
#define _INTEL_LRC_H_
 
#define GEN8_LR_CONTEXT_ALIGN 4096
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x07
 
/* Execlists regs */
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
40,6 → 38,22
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
 
/* The docs specify that the write pointer wraps around after 5h, "After status
* is written out to the last available status QW at offset 5h, this pointer
* wraps to 0."
*
* Therefore, one must infer than even though there are 3 bits available, 6 and
* 7 appear to be * reserved.
*/
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x7
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
#define GEN8_CSB_WRITE_PTR(csb_status) \
(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
#define GEN8_CSB_READ_PTR(csb_status) \
(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
 
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
84,14 → 98,19
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
 
void intel_lr_context_free(struct intel_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring);
 
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *ring);
 
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
struct i915_execbuffer_params;
98,7 → 117,6
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
/drivers/video/drm/i915/intel_lvds.c
31,6 → 31,7
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
122,6 → 123,10
 
pipe_config->base.adjusted_mode.flags |= flags;
 
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
 
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
tmp = I915_READ(PFIT_CONTROL);
478,11 → 483,8
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
if (!HAS_PCH_SPLIT(dev)) {
drm_modeset_lock_all(dev);
if (!HAS_PCH_SPLIT(dev))
intel_display_resume(dev);
drm_modeset_unlock_all(dev);
}
 
dev_priv->modeset_restore = MODESET_DONE;
 
/drivers/video/drm/i915/intel_pm.c
37,6 → 37,8
void getrawmonotonic(struct timespec *ts);
 
/**
* DOC: RC6
*
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
551,7 → 553,7
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
* @wm: chip FIFO params
* @pixel_size: display pixel size
* @cpp: bytes per pixel
* @latency_ns: memory latency for the platform
*
* Calculate the watermark level (the level at which the display plane will
567,8 → 569,7
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
const struct intel_watermark_params *wm,
int fifo_size,
int pixel_size,
int fifo_size, int cpp,
unsigned long latency_ns)
{
long entries_required, wm_size;
579,7 → 580,7
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
1000;
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 
643,13 → 644,13
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
int clock = adjusted_mode->crtc_clock;
 
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
cpp, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
659,7 → 660,7
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
cpp, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
668,7 → 669,7
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
cpp, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
677,7 → 678,7
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
cpp, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
701,7 → 702,7
{
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
int htotal, hdisplay, clock, pixel_size;
int htotal, hdisplay, clock, cpp;
int line_time_us, line_count;
int entries, tlb_miss;
 
716,10 → 717,10
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
731,7 → 732,7
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
entries = line_count * crtc->cursor->state->crtc_w * cpp;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
787,7 → 788,7
{
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
int hdisplay, htotal, pixel_size, clock;
int hdisplay, htotal, cpp, clock;
unsigned long line_time_us;
int line_count, line_size;
int small, large;
803,14 → 804,14
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
line_size = hdisplay * cpp;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
small = ((clock * cpp / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
817,7 → 818,7
*display_wm = entries + display->guard_size;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
entries = line_count * cpp * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
909,13 → 910,13
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
unsigned int pipe_htotal,
unsigned int horiz_pixels,
unsigned int bytes_per_pixel,
unsigned int cpp,
unsigned int latency)
{
unsigned int ret;
 
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
ret = (ret + 1) * horiz_pixels * cpp;
ret = DIV_ROUND_UP(ret, 64);
 
return ret;
944,7 → 945,7
int level)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int clock, htotal, pixel_size, width, wm;
int clock, htotal, cpp, width, wm;
 
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
952,7 → 953,7
if (!state->visible)
return 0;
 
pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
clock = crtc->config->base.adjusted_mode.crtc_clock;
htotal = crtc->config->base.adjusted_mode.crtc_htotal;
width = crtc->config->pipe_src_w;
968,7 → 969,7
*/
wm = 63;
} else {
wm = vlv_wm_method2(clock, htotal, width, pixel_size,
wm = vlv_wm_method2(clock, htotal, width, cpp,
dev_priv->wm.pri_latency[level] * 10);
}
 
1442,7 → 1443,7
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
 
1450,7 → 1451,7
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
cpp * hdisplay;
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
1460,7 → 1461,7
entries, srwm);
 
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * crtc->cursor->state->crtc_w;
cpp * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
1521,7 → 1522,7
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
if (IS_GEN2(dev))
cpp = 4;
 
1543,7 → 1544,7
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
if (IS_GEN2(dev))
cpp = 4;
 
1589,7 → 1590,7
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
 
1597,7 → 1598,7
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
cpp * hdisplay;
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
1677,6 → 1678,9
if (pipe_h < pfit_h)
pipe_h = pfit_h;
 
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
 
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
1685,8 → 1689,7
}
 
/* latency must be in 0.1us units. */
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
uint32_t latency)
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
{
uint64_t ret;
 
1693,7 → 1696,7
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
 
ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
ret = (uint64_t) pixel_rate * cpp * latency;
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
 
return ret;
1701,7 → 1704,7
 
/* latency must be in 0.1us units. */
static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t horiz_pixels, uint8_t cpp,
uint32_t latency)
{
uint32_t ret;
1708,17 → 1711,30
 
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
if (WARN_ON(!pipe_htotal))
return UINT_MAX;
 
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
ret = (ret + 1) * horiz_pixels * cpp;
ret = DIV_ROUND_UP(ret, 64) + 2;
return ret;
}
 
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t bytes_per_pixel)
uint8_t cpp)
{
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
/*
* Neither of these should be possible since this function shouldn't be
* called if the CRTC is off or the plane is invisible. But let's be
* extra paranoid to avoid a potential divide-by-zero if we screw up
* elsewhere in the driver.
*/
if (WARN_ON(!cpp))
return 0;
if (WARN_ON(!horiz_pixels))
return 0;
 
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
}
 
struct ilk_wm_maximums {
1737,13 → 1753,14
uint32_t mem_value,
bool is_lp)
{
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
int cpp = pstate->base.fb ?
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
 
if (!cstate->base.active || !pstate->visible)
return 0;
 
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
 
if (!is_lp)
return method1;
1751,8 → 1768,7
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
bpp,
mem_value);
cpp, mem_value);
 
return min(method1, method2);
}
1765,18 → 1781,18
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
int cpp = pstate->base.fb ?
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
 
if (!cstate->base.active || !pstate->visible)
return 0;
 
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
bpp,
mem_value);
cpp, mem_value);
return min(method1, method2);
}
 
1809,12 → 1825,13
const struct intel_plane_state *pstate,
uint32_t pri_val)
{
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
int cpp = pstate->base.fb ?
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 
if (!cstate->base.active || !pstate->visible)
return 0;
 
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
}
 
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2007,15 → 2024,20
}
 
static uint32_t
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
hsw_compute_linetime_wm(struct drm_device *dev,
struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
const struct drm_display_mode *adjusted_mode =
&cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
 
if (!intel_crtc->active)
if (!cstate->base.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
if (WARN_ON(dev_priv->cdclk_freq == 0))
return 0;
 
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
2323,8 → 2345,7
pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev,
&intel_crtc->base);
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
 
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2860,25 → 2881,28
const struct drm_plane_state *pstate,
int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
uint32_t width = 0, height = 0;
 
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
 
if (intel_rotation_90_or_270(pstate->rotation))
swap(width, height);
 
/* for planar format */
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
return intel_crtc->config->pipe_src_w *
intel_crtc->config->pipe_src_h *
return width * height *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
return (intel_crtc->config->pipe_src_w/2) *
(intel_crtc->config->pipe_src_h/2) *
return (width / 2) * (height / 2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
 
/* for packed formats */
return intel_crtc->config->pipe_src_w *
intel_crtc->config->pipe_src_h *
drm_format_plane_cpp(fb->pixel_format, 0);
return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
}
 
/*
2957,8 → 2981,9
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
 
if (fb == NULL)
if (!to_intel_plane_state(plane->state)->visible)
continue;
 
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
 
2984,7 → 3009,7
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
 
if (pstate->fb == NULL)
if (!to_intel_plane_state(pstate)->visible)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
3034,12 → 3059,11
 
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
* for the read latency) and bytes_per_pixel should always be <= 8, so that
* for the read latency) and cpp should always be <= 8, so that
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
uint32_t latency)
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
{
uint32_t wm_intermediate_val, ret;
 
3046,7 → 3070,7
if (latency == 0)
return UINT_MAX;
 
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
wm_intermediate_val = latency * pixel_rate * cpp / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
 
return ret;
3053,7 → 3077,7
}
 
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t horiz_pixels, uint8_t cpp,
uint64_t tiling, uint32_t latency)
{
uint32_t ret;
3063,7 → 3087,7
if (latency == 0)
return UINT_MAX;
 
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
plane_bytes_per_line = horiz_pixels * cpp;
 
if (tiling == I915_FORMAT_MOD_Y_TILED ||
tiling == I915_FORMAT_MOD_Yf_TILED) {
3108,28 → 3132,36
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
struct intel_plane_state *intel_pstate =
to_intel_plane_state(plane->state);
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
uint8_t bytes_per_pixel;
uint8_t cpp;
uint32_t width = 0, height = 0;
 
if (latency == 0 || !cstate->base.active || !fb)
if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
return false;
 
bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
 
if (intel_rotation_90_or_270(plane->state->rotation))
swap(width, height);
 
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
bytes_per_pixel,
latency);
cpp, latency);
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
cstate->pipe_src_w,
bytes_per_pixel,
width,
cpp,
fb->modifier[0],
latency);
 
plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
plane_bytes_per_line = width * cpp;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3137,11 → 3169,11
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
if (intel_rotation_90_or_270(plane->state->rotation)) {
int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
 
switch (bpp) {
switch (cpp) {
case 1:
min_scanlines = 16;
break;
3630,9 → 3662,11
}
}
 
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
static void ilk_program_watermarks(struct intel_crtc_state *cstate)
{
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
3665,7 → 3699,6
 
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
3685,7 → 3718,7
 
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
 
ilk_program_watermarks(dev_priv);
ilk_program_watermarks(cstate);
}
 
static void skl_pipe_wm_active_state(uint32_t val,
4073,7 → 4106,7
dev_priv->display.update_wm(crtc);
}
 
/**
/*
* Lock protecting IPS related data structures
*/
DEFINE_SPINLOCK(mchdev_lock);
4109,11 → 4142,13
static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
 
spin_lock_irq(&mchdev_lock);
 
rgvmodectl = I915_READ(MEMMODECTL);
 
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4546,21 → 4581,71
}
if (HAS_RC6p(dev))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
 
else
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
 
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
 
if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
enable_rc6 = false;
}
 
/*
* The exact context size is not known for BXT, so assume a page size
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
(rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
dev_priv->gtt.stolen_reserved_size))) {
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
 
if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
enable_rc6 = false;
}
 
if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_HW_ENABLE)) &&
((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
!(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
enable_rc6 = false;
}
 
return enable_rc6;
}
 
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
/* No RC6 before Ironlake and code is gone for ilk. */
if (INTEL_INFO(dev)->gen < 6)
return 0;
 
if (!enable_rc6)
return 0;
 
if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
DRM_INFO("RC6 disabled by BIOS\n");
return 0;
}
 
/* Respect the kernel parameter if it is set */
if (enable_rc6 >= 0) {
int mask;
4730,8 → 4815,7
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4750,8 → 4834,7
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
if (NEEDS_WaRsDisableCoarsePowerGating(dev))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5169,8 → 5252,6
u32 pcbr;
int pctx_size = 32*1024;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5192,7 → 5273,7
u32 pcbr;
int pctx_size = 24*1024;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
mutex_lock(&dev->struct_mutex);
 
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
5220,7 → 5301,7
pctx = i915_gem_object_create_stolen(dev, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
return;
goto out;
}
 
pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5229,6 → 5310,7
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
mutex_unlock(&dev->struct_mutex);
}
 
static void valleyview_cleanup_pctx(struct drm_device *dev)
5238,7 → 5320,7
if (WARN_ON(!dev_priv->vlv_pctx))
return;
 
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
 
6047,7 → 6129,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
6182,8 → 6263,8
return;
 
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
7019,6 → 7100,7
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.program_watermarks = ilk_program_watermarks;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
7184,9 → 7266,10
{
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
div = vlv_gpu_freq_div(czclk_freq) / 2;
div = vlv_gpu_freq_div(czclk_freq);
if (div < 0)
return div;
div /= 2;
 
return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
7195,9 → 7278,10
{
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
mul = vlv_gpu_freq_div(czclk_freq) / 2;
mul = vlv_gpu_freq_div(czclk_freq);
if (mul < 0)
return mul;
mul /= 2;
 
/* CHV needs even values */
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
/drivers/video/drm/i915/intel_psr.c
225,7 → 225,12
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
 
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE);
}
 
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
283,6 → 288,9
if (IS_HASWELL(dev))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
 
if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
val |= EDP_PSR_TP1_TIME_2500us;
else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
343,8 → 351,15
 
dev_priv->psr.source_ok = false;
 
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms with DDI implementation of PSR have different
* PSR registers per transcoder and we only implement transcoder EDP
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
if (HAS_DDI(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
 
353,6 → 368,12
return false;
}
 
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false;
}
 
if (IS_HASWELL(dev) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
366,12 → 387,6
return false;
}
 
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
return false;
}
 
dev_priv->psr.source_ok = true;
return true;
}
802,6 → 817,36
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 
/* Per platform default */
if (i915.enable_psr == -1) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
}
 
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
/* On VLV and CHV only standby mode is supported. */
dev_priv->psr.link_standby = true;
else
/* For new platforms let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
 
/* Override link_standby x link_off defaults */
if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
dev_priv->psr.link_standby = true;
}
if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
dev_priv->psr.link_standby = false;
}
 
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
}
/drivers/video/drm/i915/intel_ringbuffer.c
746,9 → 746,9
 
ret = i915_gem_render_state_init(req);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
return ret;
 
return ret;
return 0;
}
 
static int wa_add(struct drm_i915_private *dev_priv,
789,6 → 789,22
 
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[ring->id];
 
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
 
WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
i915_mmio_reg_offset(reg));
wa->hw_whitelist_count[ring->id]++;
 
return 0;
}
 
static int gen8_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
894,6 → 910,7
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
int ret;
 
/* WaEnableLbsSlaRetryTimerDecrement:skl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
964,6 → 981,20
/* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
/* WaOCLCoherentLineFlush:skl,bxt */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
 
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
if (ret)
return ret;
 
/* WaAllowUMDToModifyHDCChicken1:skl,bxt */
ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
 
return 0;
}
 
1019,6 → 1050,16
if (ret)
return ret;
 
/*
* Actual WA is to disable percontext preemption granularity control
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
}
 
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
1072,6 → 1113,11
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
if (ret)
return ret;
 
return skl_tune_iz_hashing(ring);
}
 
1107,6 → 1153,20
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
 
/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
 
ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
if (ret)
return ret;
}
 
return 0;
}
 
1118,6 → 1178,7
WARN_ON(ring->id != RCS);
 
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
if (IS_BROADWELL(dev))
return bdw_init_workarounds(ring);
1868,15 → 1929,13
offset = cs_offset;
}
 
ret = intel_ring_begin(req, 4);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
return 0;
1997,11 → 2056,36
 
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
vunmap(ringbuf->virtual_start);
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
ringbuf->vma = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
 
static u32 *vmap_obj(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
struct page **pages;
void *addr;
int i;
 
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
return NULL;
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
pages[i++] = sg_page_iter_page(&sg_iter);
 
addr = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages);
 
return addr;
}
 
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
2011,10 → 2095,28
unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret;
 
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (HAS_LLC(dev_priv) && !obj->stolen) {
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
if (ret)
return ret;
 
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) {
i915_gem_object_ggtt_unpin(obj);
return ret;
}
 
ringbuf->virtual_start = vmap_obj(obj);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);
return -ENOMEM;
}
} else {
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
flags | PIN_MAPPABLE);
if (ret)
return ret;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) {
i915_gem_object_ggtt_unpin(obj);
2021,6 → 2123,9
return ret;
}
 
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
 
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
2027,7 → 2132,10
i915_gem_object_ggtt_unpin(obj);
return -EINVAL;
}
}
 
ringbuf->vma = i915_gem_obj_to_ggtt(obj);
 
return 0;
}
 
2643,6 → 2751,7
 
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
ring->mmio_base = RENDER_RING_BASE;
 
if (INTEL_INFO(dev)->gen >= 8) {
2791,6 → 2900,7
 
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
 
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
2867,6 → 2977,7
 
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
 
ring->write_tail = ring_write_tail;
ring->mmio_base = GEN8_BSD2_RING_BASE;
2897,6 → 3008,7
 
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
 
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
2954,6 → 3066,7
 
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
 
ring->mmio_base = VEBOX_RING_BASE;
ring->write_tail = ring_write_tail;
/drivers/video/drm/i915/intel_ringbuffer.h
93,11 → 93,13
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
u32 instdone[I915_NUM_INSTDONE_REG];
};
 
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
struct i915_vma *vma;
 
struct intel_engine_cs *ring;
struct list_head link;
147,14 → 149,16
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
RCS = 0x0,
RCS = 0,
BCS,
VCS,
BCS,
VECS,
VCS2
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
#define I915_NUM_RINGS 5
#define LAST_USER_RING (VECS + 1)
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
unsigned int guc_id;
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
268,6 → 272,8
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
305,7 → 311,6
 
wait_queue_head_t irq_queue;
 
struct intel_context *default_context;
struct intel_context *last_context;
 
struct intel_ring_hangcheck hangcheck;
406,7 → 411,7
ring->status_page.page_addr[reg] = value;
}
 
/**
/*
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
423,6 → 428,7
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
/drivers/video/drm/i915/intel_runtime_pm.c
284,6 → 284,13
1 << PIPE_C | 1 << PIPE_B);
}
 
static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
 
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
309,6 → 316,14
}
}
 
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (power_well->data == SKL_DISP_PW_2)
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
 
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
334,6 → 349,7
 
} else {
if (enable_requested) {
hsw_power_well_pre_disable(dev_priv);
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
456,15 → 472,19
*/
}
 
static void gen9_set_dc_state_debugmask_memory_up(
struct drm_i915_private *dev_priv)
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
uint32_t val;
uint32_t val, mask;
 
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
 
if (IS_BROXTON(dev_priv))
mask |= DC_STATE_DEBUG_MASK_CORES;
 
/* The below bit doesn't need to be cleared ever afterwards */
val = I915_READ(DC_STATE_DEBUG);
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
if ((val & mask) != mask) {
val |= mask;
I915_WRITE(DC_STATE_DEBUG, val);
POSTING_READ(DC_STATE_DEBUG);
}
525,9 → 545,6
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
state = DC_STATE_EN_UPTO_DC5;
 
if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
gen9_set_dc_state_debugmask_memory_up(dev_priv);
 
val = I915_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
577,7 → 594,8
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
 
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
"Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
613,7 → 631,8
{
struct drm_device *dev = dev_priv->dev;
 
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
"Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
640,7 → 659,8
{
assert_can_disable_dc5(dev_priv);
 
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
i915.enable_dc != 0 && i915.enable_dc != 1)
assert_can_disable_dc6(dev_priv);
 
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
668,7 → 688,6
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
706,6 → 725,9
state_mask = SKL_POWER_WELL_STATE(power_well->data);
is_enabled = tmp & state_mask;
 
if (!enable && enable_requested)
skl_power_well_pre_disable(dev_priv, power_well);
 
if (enable) {
if (!enable_requested) {
WARN((tmp & state_mask) &&
712,17 → 734,6
!I915_READ(HSW_PWR_WELL_BIOS),
"Invalid for power well status to be enabled, unless done by the BIOS, \
when request is to disable!\n");
if (power_well->data == SKL_DISP_PW_2) {
/*
* DDI buffer programming unnecessary during
* driver-load/resume as it's already done
* during modeset initialization then. It's
* also invalid here as encoder list is still
* uninitialized.
*/
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
}
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
 
828,7 → 839,8
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
i915.enable_dc != 0 && i915.enable_dc != 1)
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
840,7 → 852,8
if (power_well->count > 0) {
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
} else {
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
i915.enable_dc != 0 &&
i915.enable_dc != 1)
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
else
993,6 → 1006,9
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
/* make sure we're done processing display irqs */
synchronize_irq(dev_priv->dev->irq);
 
vlv_power_sequencer_reset(dev_priv);
}
 
1941,7 → 1957,7
{
struct i915_power_well *well;
 
if (!IS_SKYLAKE(dev_priv))
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
 
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1955,7 → 1971,7
{
struct i915_power_well *well;
 
if (!IS_SKYLAKE(dev_priv))
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
 
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2125,8 → 2141,8
 
skl_init_cdclk(dev_priv);
 
if (dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
gen9_set_dc_state_debugmask(dev_priv);
}
 
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
/drivers/video/drm/i915/intel_sdvo.c
1527,6 → 1527,7
struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
1537,6 → 1538,9
if (intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
 
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
 
if (intel_sdvo->is_lvds) {
if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
/drivers/video/drm/i915/intel_sdvo_regs.h
24,8 → 24,8
* Eric Anholt <eric@anholt.net>
*/
 
/**
* @file SDVO command definitions and structures.
/*
* SDVO command definitions and structures.
*/
 
#define SDVO_OUTPUT_FIRST (0)
66,24 → 66,24
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
 
/** This matches the EDID DTD structure, more or less */
/* This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
u16 clock; /**< pixel clock, in 10kHz units */
u8 h_active; /**< lower 8 bits (pixels) */
u8 h_blank; /**< lower 8 bits (pixels) */
u8 h_high; /**< upper 4 bits each h_active, h_blank */
u8 v_active; /**< lower 8 bits (lines) */
u8 v_blank; /**< lower 8 bits (lines) */
u8 v_high; /**< upper 4 bits each v_active, v_blank */
u16 clock; /* pixel clock, in 10kHz units */
u8 h_active; /* lower 8 bits (pixels) */
u8 h_blank; /* lower 8 bits (pixels) */
u8 h_high; /* upper 4 bits each h_active, h_blank */
u8 v_active; /* lower 8 bits (lines) */
u8 v_blank; /* lower 8 bits (lines) */
u8 v_high; /* upper 4 bits each v_active, v_blank */
} part1;
 
struct {
u8 h_sync_off; /**< lower 8 bits, from hblank start */
u8 h_sync_width; /**< lower 8 bits (pixels) */
/** lower 4 bits each vsync offset, vsync width */
u8 h_sync_off; /* lower 8 bits, from hblank start */
u8 h_sync_width; /* lower 8 bits (pixels) */
/* lower 4 bits each vsync offset, vsync width */
u8 v_sync_off_width;
/**
/*
* 2 high bits of hsync offset, 2 high bits of hsync width,
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
*/
90,7 → 90,7
u8 sync_off_width_high;
u8 dtd_flags;
u8 sdvo_flags;
/** bits 6-7 of vsync offset at bits 6-7 */
/* bits 6-7 of vsync offset at bits 6-7 */
u8 v_sync_off_high;
u8 reserved;
} part2;
97,8 → 97,8
} __packed;
 
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
u16 min; /* pixel clock, in 10kHz units */
u16 max; /* pixel clock, in 10kHz units */
} __packed;
 
struct intel_sdvo_preferred_input_timing_args {
144,7 → 144,7
 
#define SDVO_CMD_RESET 0x01
 
/** Returns a struct intel_sdvo_caps */
/* Returns a struct intel_sdvo_caps */
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
 
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
152,7 → 152,7
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
 
/**
/*
* Reports which inputs are trained (managed to sync).
*
* Devices must have trained within 2 vsyncs of a mode change.
164,10 → 164,10
unsigned int pad:6;
} __packed;
 
/** Returns a struct intel_sdvo_output_flags of active outputs. */
/* Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
 
/**
/*
* Sets the current set of active outputs.
*
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
175,7 → 175,7
*/
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
 
/**
/*
* Returns the current mapping of SDVO inputs to outputs on the device.
*
* Returns two struct intel_sdvo_output_flags structures.
185,7 → 185,7
u16 in0, in1;
};
 
/**
/*
* Sets the current mapping of SDVO inputs to outputs on the device.
*
* Takes two struct i380_sdvo_output_flags structures.
192,22 → 192,22
*/
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
 
/**
/*
* Returns a struct intel_sdvo_output_flags of attached displays.
*/
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
 
/**
/*
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
*/
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
 
/**
/*
* Takes a struct intel_sdvo_output_flags.
*/
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
 
/**
/*
* Returns a struct intel_sdvo_output_flags of displays with hot plug
* interrupts enabled.
*/
221,7 → 221,7
unsigned int pad:6;
} __packed;
 
/**
/*
* Selects which input is affected by future input commands.
*
* Commands affected include SET_INPUT_TIMINGS_PART[12],
234,7 → 234,7
unsigned int pad:7;
} __packed;
 
/**
/*
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
280,7 → 280,7
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
 
/**
/*
* Generates a DTD based on the given width, height, and flags.
*
* This will be supported by any device supporting scaling or interlaced
300,17 → 300,17
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
 
/** Returns a struct intel_sdvo_pixel_clock_range */
/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
/** Returns a struct intel_sdvo_pixel_clock_range */
/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
 
/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
 
/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
317,7 → 317,7
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
 
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
/* 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
376,7 → 376,7
 
#define SDVO_CMD_SET_TV_FORMAT 0x29
 
/** Returns the resolutiosn that can be used with the given TV format */
/* Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
unsigned int ntsc_m:1;
539,7 → 539,7
#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
/**
/*
* The panel power sequencing parameters are in units of milliseconds.
* The high fields are bits 8:9 of the 10-bit values.
*/
/drivers/video/drm/i915/intel_sideband.c
129,17 → 129,18
return val;
}
 
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
SB_CRRDDA_NP, reg, &val);
return val;
}
 
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
u8 port, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
SB_CRWRDA_NP, reg, &val);
}
 
171,20 → 172,6
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRRDDA_NP, reg, &val);
return val;
}
 
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
{
u32 val = 0;
/drivers/video/drm/i915/intel_sprite.c
183,28 → 183,33
}
 
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
skl_update_plane(struct drm_plane *drm_plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(drm_plane->state)->ckey;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
int scaler_id;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[plane_state->scaler_id];
 
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
213,14 → 218,12
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
 
rotation = drm_plane->state->rotation;
rotation = plane_state->base.rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
 
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
 
scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
 
/* Sizes are 0 based */
src_w--;
src_h--;
241,9 → 244,10
surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
 
if (intel_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
/* stride: Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1);
261,13 → 265,13
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
 
/* program plane scaler */
if (scaler_id >= 0) {
if (plane_state->scaler_id >= 0) {
uint32_t ps_ctrl = 0;
int scaler_id = plane_state->scaler_id;
 
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
crtc_state->scaler_state.scalers[scaler_id].mode;
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
339,24 → 343,29
}
 
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
vlv_update_plane(struct drm_plane *dplane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(dplane->state)->ckey;
u32 sprsurf_offset, linear_offset;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
sprctl = SP_ENABLE;
 
418,20 → 427,18
crtc_w--;
crtc_h--;
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
&x, &y,
obj->tiling_mode,
pixel_size,
linear_offset = y * fb->pitches[0] + x * cpp;
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0], cpp,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
 
x += src_w;
y += src_h;
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
 
if (key->flags) {
479,23 → 486,28
}
 
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
ivb_update_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(plane->state)->ckey;
u32 sprsurf_offset, linear_offset;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
sprctl = SPRITE_ENABLE;
 
548,14 → 560,13
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset = y * fb->pitches[0] + x * cpp;
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0], cpp,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
 
/* HSW and BDW does this automagically in hardware */
562,8 → 573,7
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += src_w;
y += src_h;
linear_offset += src_h * fb->pitches[0] +
src_w * pixel_size;
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
}
 
617,23 → 627,28
}
 
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
ilk_update_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(plane->state)->ckey;
u32 dvssurf_offset, linear_offset;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
dvscntr = DVS_ENABLE;
 
682,19 → 697,18
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset = y * fb->pitches[0] + x * cpp;
dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0], cpp,
fb->pitches[0]);
linear_offset -= dvssurf_offset;
 
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
 
x += src_w;
y += src_h;
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
 
if (key->flags) {
759,7 → 773,6
int hscale, vscale;
int max_scale, min_scale;
bool can_scale;
int pixel_size;
 
if (!fb) {
state->visible = false;
881,6 → 894,7
/* Check size restrictions when scaling */
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
WARN_ON(!can_scale);
 
892,9 → 906,7
if (src_w < 3 || src_h < 3)
state->visible = false;
 
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 
if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
918,30 → 930,6
return 0;
}
 
static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
 
crtc = crtc ? crtc : plane->crtc;
 
if (state->visible) {
intel_plane->update_plane(plane, crtc, fb,
state->dst.x1, state->dst.y1,
drm_rect_width(&state->dst),
drm_rect_height(&state->dst),
state->src.x1 >> 16,
state->src.y1 >> 16,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16);
} else {
intel_plane->disable_plane(plane, crtc);
}
}
 
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
1123,7 → 1111,6
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
/drivers/video/drm/i915/intel_uncore.c
329,13 → 329,54
}
}
 
static bool
fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
u32 dbg;
 
dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
 
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
return true;
}
 
static bool
vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
u32 cer;
 
cer = __raw_i915_read32(dev_priv, CLAIM_ER);
if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
return false;
 
__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
 
return true;
}
 
static bool
check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
return fpga_check_for_unclaimed_mmio(dev_priv);
 
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_check_for_unclaimed_mmio(dev_priv);
 
return false;
}
 
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
361,6 → 402,8
 
void intel_uncore_sanitize(struct drm_device *dev)
{
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
}
587,39 → 630,39
}
 
static void
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool read, bool before)
__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const i915_reg_t reg,
const bool read,
const bool before)
{
const char *op = read ? "reading" : "writing to";
const char *when = before ? "before" : "after";
 
if (!i915.mmio_debug)
/* XXX. We limit the auto arming traces for mmio
* debugs on these platforms. There are just too many
* revealed by these and CI/Bat suffers from the noise.
* Please fix and then re-enable the automatic traces.
*/
if (i915.mmio_debug < 2 &&
(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
 
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, i915_mmio_reg_offset(reg));
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
if (WARN(check_for_unclaimed_mmio(dev_priv),
"Unclaimed register detected %s %s register 0x%x\n",
before ? "before" : "after",
read ? "reading" : "writing to",
i915_mmio_reg_offset(reg)))
i915.mmio_debug--; /* Only report the first N failures */
}
}
 
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
static inline void
unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const i915_reg_t reg,
const bool read,
const bool before)
{
static bool mmio_debug_once = true;
 
if (i915.mmio_debug || !mmio_debug_once)
if (likely(!i915.mmio_debug))
return;
 
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
i915.mmio_debug = mmio_debug_once--;
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
}
 
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
666,9 → 709,11
unsigned long irqflags; \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
unclaimed_reg_debug(dev_priv, reg, true, true)
 
#define GEN6_READ_FOOTER \
unclaimed_reg_debug(dev_priv, reg, true, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
701,11 → 746,9
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (NEEDS_FORCE_WAKE(offset)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
 
753,7 → 796,6
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
767,7 → 809,6
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
 
866,9 → 907,11
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
unclaimed_reg_debug(dev_priv, reg, false, true)
 
#define GEN6_WRITE_FOOTER \
unclaimed_reg_debug(dev_priv, reg, false, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 
#define __gen6_write(x) \
894,13 → 937,10
if (NEEDS_FORCE_WAKE(offset)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
 
930,12 → 970,9
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
__raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
 
989,7 → 1026,6
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
1004,8 → 1040,6
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
 
1229,6 → 1263,8
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
 
dev_priv->uncore.unclaimed_mmio_check = 1;
 
switch (INTEL_INFO(dev)->gen) {
default:
case 9:
1586,13 → 1622,26
return intel_get_gpu_reset(dev) != NULL;
}
 
void intel_uncore_check_errors(struct drm_device *dev)
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return check_for_unclaimed_mmio(dev_priv);
}
 
if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed register before interrupt\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
{
if (unlikely(i915.mmio_debug ||
dev_priv->uncore.unclaimed_mmio_check <= 0))
return false;
 
if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915.mmio_debug++;
dev_priv->uncore.unclaimed_mmio_check--;
return true;
}
 
return false;
}
/drivers/video/drm/i915/kms_display.c
542,7 → 542,6
struct drm_plane *plane;
 
int ret;
ENTER();
 
drm_for_each_plane(plane, dev)
{
594,8 → 593,6
set_mode(dev, os_display->connector, os_display->crtc, usermode, false);
};
 
LEAVE();
 
return ret;
};
 
/drivers/video/drm/i915/kos_cursor.c
20,7 → 20,7
cursor_state->crtc_x = x;
cursor_state->crtc_y = y;
 
intel_crtc_update_cursor(crtc, 1);
intel_crtc_update_cursor(crtc, cursor_state);
};
 
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
28,6 → 28,8
struct drm_i915_private *dev_priv = os_display->ddev->dev_private;
struct drm_crtc *crtc = os_display->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *cursor_plane = crtc->cursor;
struct intel_plane_state *cursor_state = to_intel_plane_state(cursor_plane->state);
 
cursor_t *old;
 
42,9 → 44,12
else
intel_crtc->cursor_addr = (addr_t)cursor->cobj;
 
intel_crtc->base.cursor->state->crtc_w = 64;
intel_crtc->base.cursor->state->crtc_h = 64;
intel_crtc->base.cursor->state->rotation = 0;
cursor_state->visible = 1;
 
cursor_plane->state->crtc_w = 64;
cursor_plane->state->crtc_h = 64;
cursor_plane->state->rotation = 0;
 
mutex_unlock(&cursor_lock);
 
move_cursor_kms(cursor, crtc->cursor_x, crtc->cursor_y);
/drivers/video/drm/i915/main.c
16,7 → 16,7
#include "bitmap.h"
#include "i915_kos32.h"
 
#define DRV_NAME "i915 v4.5.7"
#define DRV_NAME "i915 v4.6.7"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
/drivers/video/drm/i915/pci.c
17,6 → 17,9
 
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
 
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
 
/*
* Translate the low bits of the PCI base
* to the resource type
/drivers/video/drm/i915/utils.c
624,3 → 624,53
 
return retval;
}
 
void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot)
{
void *vaddr;
char *tmp;
int i;
 
vaddr = AllocKernelSpace(count << 12);
if(vaddr == NULL)
return NULL;
 
for(i = 0, tmp = vaddr; i < count; i++)
{
MapPage(tmp, page_to_phys(pages[i]), PG_SW);
tmp+= 4096;
};
 
return vaddr;
};
 
void vunmap(const void *addr)
{
FreeKernelSpace((void*)addr);
}
 
void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
void __iomem *ioremap_wc(resource_size_t offset, unsigned long size)
{
// return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100);
}
 
void iounmap(volatile void __iomem *addr)
{
FreeKernelSpace((void*)addr);
}
 
unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
{
// if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
// else
// memset(to, 0, n);
return n;
}