Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4559 → Rev 4560

/drivers/video/drm/drm_crtc.c
198,6 → 198,7
{ DRM_MODE_CONNECTOR_TV, "TV" },
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
};
 
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
207,6 → 208,7
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
};
 
void drm_connector_ida_init(void)
669,6 → 671,29
EXPORT_SYMBOL(drm_crtc_cleanup);
 
/**
* drm_crtc_index - find the index of a registered CRTC
* @crtc: CRTC to find index for
*
* Given a registered CRTC, return the index of that CRTC within a DRM
* device's list of CRTCs.
*/
unsigned int drm_crtc_index(struct drm_crtc *crtc)
{
unsigned int index = 0;
struct drm_crtc *tmp;
 
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
if (tmp == crtc)
return index;
 
index++;
}
 
BUG();
}
EXPORT_SYMBOL(drm_crtc_index);
 
/**
* drm_mode_probed_add - add a mode to a connector's probed mode list
* @connector: connector the new mode
* @mode: mode data
1297,7 → 1322,7
}
 
/**
* drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
* drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
1313,6 → 1338,9
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
 
if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
return -EINVAL;
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
1550,7 → 1578,7
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
1577,6 → 1605,19
return ret;
}
 
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
const struct drm_file *file_priv)
{
/*
* If user-space hasn't configured the driver to expose the stereo 3D
* modes, don't expose them.
*/
if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
return false;
 
return true;
}
 
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
1621,7 → 1662,7
obj = drm_mode_object_find(dev, out_resp->connector_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
1642,6 → 1683,7
 
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head)
if (drm_mode_expose_to_userspace(mode, file_priv))
mode_count++;
 
out_resp->connector_id = connector->base.id;
1664,6 → 1706,9
copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
if (!drm_mode_expose_to_userspace(mode, file_priv))
continue;
 
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
1733,7 → 1778,7
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
encoder = obj_to_encoder(obj);
2040,6 → 2085,45
EXPORT_SYMBOL(drm_mode_set_config_internal);
 
#if 0
/*
* Checks that the framebuffer is big enough for the CRTC viewport
* (x, y, hdisplay, vdisplay)
*/
static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb)
 
{
int hdisplay, vdisplay;
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
 
if (drm_mode_is_stereo(mode)) {
struct drm_display_mode adjusted = *mode;
 
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
hdisplay = adjusted.crtc_hdisplay;
vdisplay = adjusted.crtc_vdisplay;
}
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
if (hdisplay > fb->width ||
vdisplay > fb->height ||
x > fb->width - hdisplay ||
y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height, hdisplay, vdisplay, x, y,
crtc->invert_dimensions ? " (inverted)" : "");
return -ENOSPC;
}
 
return 0;
}
 
/**
* drm_mode_setcrtc - set CRTC configuration
* @dev: drm device for the ioctl
2080,7 → 2164,7
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
2087,7 → 2171,6
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
if (crtc_req->mode_valid) {
int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
2104,7 → 2187,7
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
}
2123,24 → 2206,12
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
mode, fb);
if (ret)
goto out;
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
if (hdisplay > fb->width ||
vdisplay > fb->height ||
crtc_req->x > fb->width - hdisplay ||
crtc_req->y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height,
hdisplay, vdisplay, crtc_req->x, crtc_req->y,
crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
}
 
if (crtc_req->count_connectors == 0 && mode) {
DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
2184,7 → 2255,7
if (!obj) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
2232,7 → 2303,7
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -EINVAL;
return -ENOENT;
}
crtc = obj_to_crtc(obj);
 
2284,6 → 2355,7
return drm_mode_cursor_common(dev, req, file_priv);
}
#endif
 
/* Original addfb only supported RGB formats, so figure out which one */
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
2441,6 → 2513,8
case DRM_FORMAT_YVU444:
return 0;
default:
DRM_DEBUG_KMS("invalid pixel format %s\n",
drm_get_format_name(r->pixel_format));
return -EINVAL;
}
}
2606,7 → 2680,7
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
 
return -EINVAL;
return -ENOENT;
}
 
/**
2634,7 → 2708,7
 
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
return -EINVAL;
return -ENOENT;
 
r->height = fb->height;
r->width = fb->width;
2679,7 → 2753,7
 
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
return -EINVAL;
return -ENOENT;
 
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
2717,10 → 2791,8
}
 
if (fb->funcs->dirty) {
drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
}
3014,7 → 3086,7
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto done;
}
property = obj_to_property(obj);
3145,7 → 3217,7
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto done;
}
blob = obj_to_blob(obj);
3267,7 → 3339,7
 
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
if (!obj->properties) {
3320,8 → 3392,10
drm_modeset_lock_all(dev);
 
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
if (!arg_obj) {
ret = -ENOENT;
goto out;
}
if (!arg_obj->properties)
goto out;
 
3334,8 → 3408,10
 
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
if (!prop_obj)
if (!prop_obj) {
ret = -ENOENT;
goto out;
}
property = obj_to_property(prop_obj);
 
if (!drm_property_change_is_valid(property, arg->value))
3422,7 → 3498,7
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
3481,7 → 3557,7
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
3601,7 → 3677,8
*bpp = 32;
break;
default:
DRM_DEBUG_KMS("unsupported pixel format\n");
DRM_DEBUG_KMS("unsupported pixel format %s\n",
drm_get_format_name(format));
*depth = 0;
*bpp = 0;
break;
/drivers/video/drm/drm_crtc_helper.c
39,6 → 39,10
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
 
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
 
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
76,7 → 80,8
{
struct drm_display_mode *mode;
 
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
DRM_MODE_FLAG_3D_MASK))
return;
 
list_for_each_entry(mode, &connector->modes, head) {
86,6 → 91,9
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
mode->status = MODE_NO_STEREO;
}
 
return;
105,9 → 113,9
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->probe() @connector
* callback for drivers that use the crtc helpers for output mode filtering and
* detection.
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* RETURNS:
* Number of modes found on @connector.
175,6 → 183,8
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
 
list_for_each_entry(mode, &connector->modes, head) {
314,35 → 324,6
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
* @crtc: crtc to test
*
* Return false if @encoder can't be driven by @crtc, true otherwise.
*/
static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *crtc)
{
struct drm_device *dev;
struct drm_crtc *tmp;
int crtc_mask = 1;
 
WARN(!crtc, "checking null crtc?\n");
 
dev = crtc->dev;
 
list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
if (tmp == crtc)
break;
crtc_mask <<= 1;
}
 
if (encoder->possible_crtcs & crtc_mask)
return true;
return false;
}
 
/*
* Check the CRTC we're going to map each output to vs. its current
* CRTC. If they don't match, we have to disable the output and the CRTC
395,22 → 376,25
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
bool saved_enabled;
struct drm_encoder *encoder;
bool ret = true;
 
saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
 
adjusted_mode = drm_mode_duplicate(dev, mode);
if (!adjusted_mode)
if (!adjusted_mode) {
crtc->enabled = saved_enabled;
return false;
}
 
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
523,13 → 507,13
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc);
drm_calc_timestamping_constants(crtc, &crtc->hwmode);
 
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
crtc->hwmode = saved_hwmode;
crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
557,6 → 541,14
continue;
 
connector->encoder = NULL;
 
/*
* drm_helper_disable_unused_functions() ought to be
* doing this, but since we've decoupled the encoder
* from the connector above, the required connection
* between them is henceforth no longer available.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
}
}
 
583,9 → 575,8
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_crtc *save_crtcs, *new_crtc, *crtc;
struct drm_crtc *new_crtc;
struct drm_encoder *save_encoders, *new_encoder, *encoder;
struct drm_framebuffer *old_fb = NULL;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
621,38 → 612,28
 
dev = set->crtc->dev;
 
/* Allocate space for the backup of all (non-pointer) crtc, encoder and
* connector data. */
save_crtcs = kzalloc(dev->mode_config.num_crtc *
sizeof(struct drm_crtc), GFP_KERNEL);
if (!save_crtcs)
return -ENOMEM;
 
/*
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
save_encoders = kzalloc(dev->mode_config.num_encoder *
sizeof(struct drm_encoder), GFP_KERNEL);
if (!save_encoders) {
kfree(save_crtcs);
if (!save_encoders)
return -ENOMEM;
}
 
save_connectors = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_connector), GFP_KERNEL);
if (!save_connectors) {
kfree(save_crtcs);
kfree(save_encoders);
return -ENOMEM;
}
 
/* Copy data. Note that driver private data is not affected.
/*
* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
save_crtcs[count++] = *crtc;
}
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
save_encoders[count++] = *encoder;
}
775,19 → 756,17
mode_changed = true;
 
if (mode_changed) {
set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
if (set->crtc->enabled) {
if (drm_helper_crtc_in_use(set->crtc)) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
old_fb = set->crtc->fb;
set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
set->crtc->fb = old_fb;
set->crtc->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
802,14 → 781,13
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
 
old_fb = set->crtc->fb;
if (set->crtc->fb != set->fb)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
set->x, set->y, save_set.fb);
if (ret != 0) {
set->crtc->fb = old_fb;
set->crtc->x = save_set.x;
set->crtc->y = save_set.y;
set->crtc->fb = save_set.fb;
goto fail;
}
}
816,17 → 794,11
 
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
return 0;
 
fail:
/* Restore all previous data. */
count = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
*crtc = save_crtcs[count++];
}
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
*encoder = save_encoders[count++];
}
844,7 → 816,6
 
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
1124,7 → 1095,7
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
void drm_helper_hpd_irq_event(struct drm_device *dev)
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
1131,7 → 1102,7
bool changed = false;
 
if (!dev->mode_config.poll_enabled)
return;
return false;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1156,5 → 1127,7
 
if (changed)
drm_kms_helper_hotplug_event(dev);
 
return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/drivers/video/drm/drm_dp_helper.c
228,12 → 228,12
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 
/* Helpers for DP link training */
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
 
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
242,7 → 242,7
return (l >> s) & 0xf;
}
 
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
262,7 → 262,7
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
 
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
277,7 → 277,7
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
 
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
290,7 → 290,7
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
 
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
303,7 → 303,7
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
311,7 → 311,7
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else
/drivers/video/drm/drm_edid.c
463,6 → 463,15
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
 
/*
* These more or less come from the DMT spec. The 720x400 modes are
* inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
* modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
* should be 1152x870, again for the Mac, but instead we use the x864 DMT
* mode.
*
* The DMT modes have been fact-checked; the rest are mild guesses.
*/
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
565,7 → 574,7
{ 1600, 1200, 75, 0 },
{ 1600, 1200, 85, 0 },
{ 1792, 1344, 60, 0 },
{ 1792, 1344, 85, 0 },
{ 1792, 1344, 75, 0 },
{ 1856, 1392, 60, 0 },
{ 1856, 1392, 75, 0 },
{ 1920, 1200, 60, 1 },
596,347 → 605,347
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, },
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, },
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, },
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, },
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, },
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, },
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, },
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, },
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, },
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, },
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, },
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, },
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, },
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, },
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, },
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, },
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, },
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, },
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
};
 
/*
1269,6 → 1278,18
}
EXPORT_SYMBOL(drm_get_edid);
 
/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
* Return duplicate edid or NULL on allocation failure.
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
}
EXPORT_SYMBOL(drm_edid_duplicate);
 
/*** EDID parsing ***/
 
/**
1313,7 → 1334,7
}
 
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
 
/**
* edid_fixup_preferred - set preferred modes based on quirk list
1328,6 → 1349,7
{
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
 
if (list_empty(&connector->probed_modes))
return;
1350,10 → 1372,14
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
 
cur_vrefresh = cur_mode->vrefresh ?
cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
preferred_vrefresh = preferred_mode->vrefresh ?
preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_mode, target_refresh) <
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
preferred_mode = cur_mode;
}
}
2073,7 → 2099,7
u8 *est = ((u8 *)timing) + 5;
 
for (i = 0; i < 6; i++) {
for (j = 7; j > 0; j--) {
for (j = 7; j >= 0; j--) {
m = (i * 8) + (7 - j);
if (m >= ARRAY_SIZE(est3_modes))
break;
2409,7 → 2435,7
 
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks(to_match, cea_mode))
drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1;
}
return 0;
2458,7 → 2484,7
 
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks(to_match, hdmi_mode))
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1;
}
return 0;
2512,6 → 2538,9
if (!newmode)
continue;
 
/* Carry over the stereo flags */
newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
 
/*
* The current mode could be either variant. Make
* sure to pick the "other" clock for the new mode.
2533,27 → 2562,166
return modes;
}
 
static struct drm_display_mode *
drm_display_mode_from_vic_index(struct drm_connector *connector,
const u8 *video_db, u8 video_len,
u8 video_index)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
u8 cea_mode;
 
if (video_db == NULL || video_index >= video_len)
return NULL;
 
/* CEA modes are numbered 1..127 */
cea_mode = (video_db[video_index] & 127) - 1;
if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
return NULL;
 
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
newmode->vrefresh = 0;
 
return newmode;
}
 
static int
do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
int i, modes = 0;
 
for (i = 0; i < len; i++) {
struct drm_display_mode *mode;
mode = drm_display_mode_from_vic_index(connector, db, len, i);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
}
}
 
return modes;
}
 
struct stereo_mandatory_mode {
int width, height, vrefresh;
unsigned int flags;
};
 
static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
{ 1920, 1080, 50,
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
{ 1920, 1080, 60,
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
{ 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
{ 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
{ 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
};
 
static bool
stereo_match_mandatory(const struct drm_display_mode *mode,
const struct stereo_mandatory_mode *stereo_mode)
{
unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
 
return mode->hdisplay == stereo_mode->width &&
mode->vdisplay == stereo_mode->height &&
interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
}
 
static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
const u8 *mode;
u8 cea_mode;
const struct drm_display_mode *mode;
struct list_head stereo_modes;
int modes = 0, i;
 
INIT_LIST_HEAD(&stereo_modes);
 
list_for_each_entry(mode, &connector->probed_modes, head) {
for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
const struct stereo_mandatory_mode *mandatory;
struct drm_display_mode *new_mode;
 
if (!stereo_match_mandatory(mode,
&stereo_mandatory_modes[i]))
continue;
 
mandatory = &stereo_mandatory_modes[i];
new_mode = drm_mode_duplicate(dev, mode);
if (!new_mode)
continue;
 
new_mode->flags |= mandatory->flags;
list_add_tail(&new_mode->head, &stereo_modes);
modes++;
}
}
 
list_splice_tail(&stereo_modes, &connector->probed_modes);
 
return modes;
}
 
static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode;
 
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
return 0;
}
 
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
return 0;
 
drm_mode_probed_add(connector, newmode);
 
return 1;
}
 
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
const u8 *video_db, u8 video_len, u8 video_index)
{
struct drm_display_mode *newmode;
int modes = 0;
 
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
if (structure & (1 << 0)) {
newmode = drm_display_mode_from_vic_index(connector, video_db,
video_len,
video_index);
if (newmode) {
newmode->vrefresh = 0;
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (structure & (1 << 6)) {
newmode = drm_display_mode_from_vic_index(connector, video_db,
video_len,
video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
if (structure & (1 << 8)) {
newmode = drm_display_mode_from_vic_index(connector, video_db,
video_len,
video_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
 
return modes;
}
2564,14 → 2732,17
* @db: start of the CEA vendor specific block
* @len: length of the CEA block payload, ie. one can access up to db[len]
*
* Parses the HDMI VSDB looking for modes to add to @connector.
* Parses the HDMI VSDB looking for modes to add to @connector. This function
* also adds the stereo 3d modes when applicable.
*/
static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
const u8 *video_db, u8 video_len)
{
struct drm_device *dev = connector->dev;
int modes = 0, offset = 0, i;
u8 vic_len;
int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
u16 structure_all;
 
if (len < 8)
goto out;
2590,32 → 2761,110
 
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
offset += 2;
if (len < (8 + offset))
if (len < (8 + offset + 2))
goto out;
 
/* 3D_Present */
offset++;
if (db[8 + offset] & (1 << 7)) {
modes += add_hdmi_mandatory_stereo_modes(connector);
 
/* 3D_Multi_present */
multi_present = (db[8 + offset] & 0x60) >> 5;
}
 
offset++;
vic_len = db[8 + offset] >> 5;
hdmi_3d_len = db[8 + offset] & 0x1f;
 
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
struct drm_display_mode *newmode;
u8 vic;
 
vic = db[9 + offset + i];
modes += add_hdmi_mode(connector, vic);
}
offset += 1 + vic_len;
 
vic--; /* VICs start at 1 */
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
continue;
if (multi_present == 1)
multi_len = 2;
else if (multi_present == 2)
multi_len = 4;
else
multi_len = 0;
 
if (len < (8 + offset + hdmi_3d_len - 1))
goto out;
 
if (hdmi_3d_len < multi_len)
goto out;
 
if (multi_present == 1 || multi_present == 2) {
/* 3D_Structure_ALL */
structure_all = (db[8 + offset] << 8) | db[9 + offset];
 
/* check if 3D_MASK is present */
if (multi_present == 2)
mask = (db[10 + offset] << 8) | db[11 + offset];
else
mask = 0xffff;
 
for (i = 0; i < 16; i++) {
if (mask & (1 << i))
modes += add_3d_struct_modes(connector,
structure_all,
video_db,
video_len, i);
}
}
 
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
if (!newmode)
continue;
offset += multi_len;
 
for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
int vic_index;
struct drm_display_mode *newmode = NULL;
unsigned int newflag = 0;
bool detail_present;
 
detail_present = ((db[8 + offset + i] & 0x0f) > 7);
 
if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
break;
 
/* 2D_VIC_order_X */
vic_index = db[8 + offset + i] >> 4;
 
/* 3D_Structure_X */
switch (db[8 + offset + i] & 0x0f) {
case 0:
newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
break;
case 6:
newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
break;
case 8:
/* 3D_Detail_X */
if ((db[9 + offset + i] >> 4) == 1)
newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
break;
}
 
if (newflag != 0) {
newmode = drm_display_mode_from_vic_index(connector,
video_db,
video_len,
vic_index);
 
if (newmode) {
newmode->flags |= newflag;
drm_mode_probed_add(connector, newmode);
modes++;
}
}
 
if (detail_present)
i++;
}
 
out:
return modes;
}
2673,8 → 2922,8
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
const u8 *cea = drm_find_cea_extension(edid);
const u8 *db;
u8 dbl;
const u8 *db, *hdmi = NULL, *video = NULL;
u8 dbl, hdmi_len, video_len = 0;
int modes = 0;
 
if (cea && cea_revision(cea) >= 3) {
2687,13 → 2936,26
db = &cea[i];
dbl = cea_db_payload_len(db);
 
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes(connector, db + 1, dbl);
else if (cea_db_is_hdmi_vsdb(db))
modes += do_hdmi_vsdb_modes(connector, db, dbl);
if (cea_db_tag(db) == VIDEO_BLOCK) {
video = db + 1;
video_len = dbl;
modes += do_cea_modes(connector, video, dbl);
}
else if (cea_db_is_hdmi_vsdb(db)) {
hdmi = db;
hdmi_len = dbl;
}
}
}
 
/*
* We parse the HDMI VSDB after having added the cea modes as we will
* be patching their flags when the sink supports stereo 3D.
*/
if (hdmi)
modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
video_len);
 
return modes;
}
 
3296,6 → 3558,19
}
EXPORT_SYMBOL(drm_add_modes_noedid);
 
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, &connector->probed_modes, head) {
if (drm_mode_width(mode) == hpref &&
drm_mode_height(mode) == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
}
EXPORT_SYMBOL(drm_set_preferred_mode);
 
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
3329,6 → 3604,33
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
 
switch (layout) {
case DRM_MODE_FLAG_3D_FRAME_PACKING:
return HDMI_3D_STRUCTURE_FRAME_PACKING;
case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
case DRM_MODE_FLAG_3D_L_DEPTH:
return HDMI_3D_STRUCTURE_L_DEPTH;
case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
default:
return HDMI_3D_STRUCTURE_INVALID;
}
}
 
/**
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
3346,6 → 3648,7
const struct drm_display_mode *mode)
{
int err;
u32 s3d_flags;
u8 vic;
 
if (!frame || !mode)
3352,14 → 3655,22
return -EINVAL;
 
vic = drm_match_hdmi_mode(mode);
if (!vic)
s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
 
if (!vic && !s3d_flags)
return -EINVAL;
 
if (vic && s3d_flags)
return -EINVAL;
 
err = hdmi_vendor_infoframe_init(frame);
if (err < 0)
return err;
 
if (vic)
frame->vic = vic;
else
frame->s3d_struct = s3d_structure_from_display_mode(mode);
 
return 0;
}
/drivers/video/drm/drm_fb_helper.c
39,10 → 39,6
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
 
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
 
static LIST_HEAD(kernel_fb_helper_list);
 
/**
155,9 → 151,34
 
if (bound < crtcs_bound)
return false;
 
return true;
}
 
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
bool ret;
ret = drm_fb_helper_force_kernel_mode();
if (ret == true)
DRM_ERROR("Failed to restore crtc configuration\n");
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
 
static void drm_fb_helper_sysrq(int dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
 
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
 
#endif
 
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
584,7 → 605,6
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
int ret = 0;
int i;
 
595,8 → 615,6
}
 
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
modeset = &fb_helper->crtc_info[i].mode_set;
 
modeset->x = var->xoffset;
1080,7 → 1098,6
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *best_crtc;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
1092,7 → 1109,6
connector = fb_helper_conn->connector;
 
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
1141,7 → 1157,6
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
1308,8 → 1323,7
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
u32 max_width, max_height;
 
if (!fb_helper->fb)
return 0;
1324,10 → 1338,8
 
max_width = fb_helper->fb->width;
max_height = fb_helper->fb->height;
bpp_sel = fb_helper->fb->bits_per_pixel;
 
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
drm_modeset_lock_all(dev);
1339,5 → 1351,24
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
 
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
static int __init drm_fb_helper_modinit(void)
{
const char *name = "fbcon";
struct module *fbcon;
 
mutex_lock(&module_mutex);
fbcon = find_module(name);
mutex_unlock(&module_mutex);
 
if (!fbcon)
request_module_nowait(name);
return 0;
}
 
module_init(drm_fb_helper_modinit);
#endif
/drivers/video/drm/drm_gem.c
28,6 → 28,8
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/shmem_fs.h>
#include <linux/err.h>
86,19 → 88,19
int
drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
struct drm_vma_offset_manager *vma_offset_manager;
 
mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
 
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
 
dev->mm_private = mm;
drm_vma_offset_manager_init(&mm->vma_manager,
dev->vma_offset_manager = vma_offset_manager;
drm_vma_offset_manager_init(vma_offset_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
 
108,11 → 110,10
void
drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
 
drm_vma_offset_manager_destroy(&mm->vma_manager);
kfree(mm);
dev->mm_private = NULL;
drm_vma_offset_manager_destroy(dev->vma_offset_manager);
kfree(dev->vma_offset_manager);
dev->vma_offset_manager = NULL;
}
 
/**
124,11 → 125,12
{
struct file *filp;
 
drm_gem_private_object_init(dev, obj, size);
 
filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
 
drm_gem_private_object_init(dev, obj, size);
obj->filp = filp;
 
return 0;
156,40 → 158,6
EXPORT_SYMBOL(drm_gem_private_object_init);
 
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
 
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
 
if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
 
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
free(obj->filp);
free:
kfree(obj);
return NULL;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
 
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
}
 
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
204,13 → 172,6
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
/*
* The object name held a reference to this object, drop
* that now.
*
* This cannot be the last reference, since the handle holds one too.
*/
kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
 
268,6 → 229,7
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
 
// drm_vma_node_revoke(&obj->vma_node, filp->filp);
 
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
278,9 → 240,19
EXPORT_SYMBOL(drm_gem_handle_delete);
 
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
*
* This implements the ->dumb_destroy kms driver callback for drivers which use
* gem to manage their backing storage.
*/
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
EXPORT_SYMBOL(drm_gem_dumb_destroy);
 
/**
* drm_gem_handle_create_tail - internal functions to create a handle
*
317,6 → 289,12
}
*handlep = ret;
 
// ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
// if (ret) {
// drm_gem_handle_delete(file_priv, *handlep);
// return ret;
// }
 
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret) {
344,7 → 322,7
}
EXPORT_SYMBOL(drm_gem_handle_create);
 
 
#if 0
/**
* drm_gem_free_mmap_offset - release a fake mmap offset for an object
* @obj: obj in question
351,14 → 329,12
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
*/
#if 0
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
 
drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
380,54 → 356,131
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
 
/* Set the object up for mmap'ing */
list = &obj->map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map)
return -ENOMEM;
return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
size / PAGE_SIZE);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
 
map = list->map;
map->type = _DRM_GEM;
map->size = obj->size;
map->handle = obj;
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* @obj: obj in question
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
*/
int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
return drm_gem_create_mmap_offset_size(obj, obj->size);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
obj->size / PAGE_SIZE, 0, false);
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @obj: obj in question
* @gfpmask: gfp mask of requested pages
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
{
struct inode *inode;
struct address_space *mapping;
struct page *p, **pages;
int i, npages;
 
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
ret = -ENOSPC;
goto out_free_list;
}
/* This is the shared memory object that backs the GEM resource */
inode = file_inode(obj->filp);
mapping = inode->i_mapping;
 
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
obj->size / PAGE_SIZE, 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* driver author is doing something really wrong:
*/
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
 
npages = obj->size >> PAGE_SHIFT;
 
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
 
gfpmask |= mapping_gfp_mask(mapping);
 
for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
 
/* There is a hypothetical issue w/ drivers that require
* buffer memory in the low 4GB.. if the pages are un-
* pinned, and swapped out, they can end up swapped back
* in above 4GB. If pages are already in memory, then
* shmem_read_mapping_page_gfp will ignore the gfpmask,
* even if the already in-memory page disobeys the mask.
*
* It is only a theoretical issue today, because none of
* the devices with this limitation can be populated with
* enough memory to trigger the issue. But this BUG_ON()
* is here as a reminder in case the problem with
* shmem_read_mapping_page_gfp() isn't solved by the time
* it does become a real issue.
*
* See this thread: http://lkml.org/lkml/2011/7/11/238
*/
BUG_ON((gfpmask & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
 
list->hash.key = list->file_offset_node->start;
ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
if (ret) {
DRM_ERROR("failed to add to map hash\n");
goto out_free_mm;
return pages;
 
fail:
while (i--)
page_cache_release(pages[i]);
 
drm_free_large(pages);
return ERR_CAST(p);
}
EXPORT_SYMBOL(drm_gem_get_pages);
 
return 0;
/**
* drm_gem_put_pages - helper to free backing pages for a GEM object
* @obj: obj in question
* @pages: pages to free
* @dirty: if true, pages will be marked as dirty
* @accessed: if true, the pages will be marked as accessed
*/
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed)
{
int i, npages;
 
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
kfree(list->map);
list->map = NULL;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* driver author is doing something really wrong:
*/
WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
 
return ret;
npages = obj->size >> PAGE_SHIFT;
 
for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty(pages[i]);
 
if (accessed)
mark_page_accessed(pages[i]);
 
/* Undo the reference we took when populating the table */
page_cache_release(pages[i]);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
drm_free_large(pages);
}
EXPORT_SYMBOL(drm_gem_put_pages);
#endif
 
/** Returns a reference to the object named by the handle. */
504,9 → 557,6
goto err;
 
obj->name = ret;
 
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
}
 
args->name = (uint64_t) obj->name;
581,7 → 631,6
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
 
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 
if (dev->driver->gem_close_object)
/drivers/video/drm/drm_global.c
68,7 → 68,6
{
int ret;
struct drm_global_item *item = &glob[ref->global_type];
void *object;
 
mutex_lock(&item->mutex);
if (item->refcount == 0) {
86,7 → 85,6
}
++item->refcount;
ref->object = item->object;
object = item->object;
mutex_unlock(&item->mutex);
return 0;
out_err:
/drivers/video/drm/drm_irq.c
44,9 → 44,8
#include <linux/export.h>
 
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) ( \
(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
((count) % DRM_VBLANKTIME_RBSIZE)])
#define vblanktimestamp(dev, crtc, count) \
((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
102,7 → 101,7
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
dev->irq_enabled = 1;
dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
150,45 → 149,41
}
 
/**
* drm_calc_timestamping_constants - Calculate and
* store various constants which are later needed by
* vblank and swap-completion timestamping, e.g, by
* drm_calc_vbltimestamp_from_scanoutpos().
* They are derived from crtc's true scanout timing,
* so they take things like panel scaling or other
* adjustments into account.
* drm_calc_timestamping_constants - Calculate vblank timestamp constants
*
* @crtc drm_crtc whose timestamp constants should be updated.
* @mode display mode containing the scanout timings
*
* Calculate and store various constants which are later
* needed by vblank and swap-completion timestamping, e.g,
* by drm_calc_vbltimestamp_from_scanoutpos(). They are
* derived from crtc's true scanout timing, so they take
* things like panel scaling or other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc)
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
u64 dotclock;
int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
int dotclock = mode->crtc_clock;
 
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
/* Valid dotclock? */
if (dotclock > 0) {
int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
 
/* Fields of interlaced scanout modes are only halve a frame duration.
* Double the dotclock to get halve the frame-/line-/pixelduration.
/*
* Convert scanline length in pixels and video
* dot clock to line duration, frame duration
* and pixel duration in nanoseconds:
*/
if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
dotclock *= 2;
pixeldur_ns = 1000000 / dotclock;
linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
 
/* Valid dotclock? */
if (dotclock > 0) {
int frame_size;
/* Convert scanline length in pixels and video dot clock to
* line duration, frame duration and pixel duration in
* nanoseconds:
/*
* Fields of interlaced scanout modes are only half a frame duration.
*/
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
1000000000), dotclock);
frame_size = crtc->hwmode.crtc_htotal *
crtc->hwmode.crtc_vtotal;
framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
dotclock);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
framedur_ns /= 2;
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
198,11 → 193,11
crtc->framedur_ns = framedur_ns;
 
DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
crtc->base.id, crtc->hwmode.crtc_htotal,
crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
crtc->base.id, mode->crtc_htotal,
mode->crtc_vtotal, mode->crtc_vdisplay);
DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
(int) linedur_ns, (int) pixeldur_ns);
crtc->base.id, dotclock, framedur_ns,
linedur_ns, pixeldur_ns);
}
EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
235,6 → 230,7
* 0 = Default.
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
* @mode: mode which defines the scanout timings
*
* Returns negative value on error, failure or if not supported in current
* video mode:
254,14 → 250,13
int *max_error,
struct timeval *vblank_time,
unsigned flags,
struct drm_crtc *refcrtc)
const struct drm_crtc *refcrtc,
const struct drm_display_mode *mode)
{
// ktime_t stime, etime, mono_time_offset;
struct timeval tv_etime;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vbl_status;
int vpos, hpos, i;
s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
bool invbl;
 
if (crtc < 0 || crtc >= dev->num_crtcs) {
275,10 → 270,6
return -EIO;
}
 
mode = &refcrtc->hwmode;
vtotal = mode->crtc_vtotal;
vdisplay = mode->crtc_vdisplay;
 
/* Durations of frames, lines, pixels in nanoseconds. */
framedur_ns = refcrtc->framedur_ns;
linedur_ns = refcrtc->linedur_ns;
287,7 → 278,7
/* If mode timing undefined, just return as no-op:
* Happens during initial modesetting of a crtc.
*/
if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
if (framedur_ns == 0) {
DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
return -EAGAIN;
}
318,10 → 309,10
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 0x1;
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
dev->vblank_inmodeset[crtc] |= 0x2;
dev->vblank[crtc].inmodeset |= 0x2;
}
#endif
}
336,15 → 327,15
if (!dev->num_crtcs)
return;
 
if (dev->vblank_inmodeset[crtc]) {
if (dev->vblank[crtc].inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
if (dev->vblank_inmodeset[crtc] & 0x2)
if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
 
dev->vblank_inmodeset[crtc] = 0;
dev->vblank[crtc].inmodeset = 0;
}
#endif
}
/drivers/video/drm/drm_modes.c
705,12 → 705,18
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* @p: mode
* @adjust_flags: unused? (FIXME)
* @adjust_flags: a combination of adjustment flags
*
* LOCKING:
* None.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
*
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
* interlaced modes.
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
717,6 → 723,7
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
 
p->crtc_clock = p->clock;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
750,6 → 757,20
p->crtc_vtotal *= p->vscan;
}
 
if (adjust_flags & CRTC_STEREO_DOUBLE) {
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
 
switch (layout) {
case DRM_MODE_FLAG_3D_FRAME_PACKING:
p->crtc_clock *= 2;
p->crtc_vdisplay += p->crtc_vtotal;
p->crtc_vsync_start += p->crtc_vtotal;
p->crtc_vsync_end += p->crtc_vtotal;
p->crtc_vtotal += p->crtc_vtotal;
break;
}
}
 
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
828,12 → 849,16
} else if (mode1->clock != mode2->clock)
return false;
 
return drm_mode_equal_no_clocks(mode1, mode2);
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
return false;
 
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
EXPORT_SYMBOL(drm_mode_equal);
 
/**
* drm_mode_equal_no_clocks - test modes for equality
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
841,12 → 866,13
* None.
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks.
* don't check the pixel clocks nor the stereo layout.
*
* RETURNS:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2)
{
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
858,12 → 884,13
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
mode1->flags == mode2->flags)
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
 
return false;
}
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
/drivers/video/drm/drm_pci.c
102,7 → 102,7
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
ClearPageReserved(virt_to_page((void *)addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
260,16 → 260,11
return 0;
}
 
static int drm_pci_agp_init(struct drm_device *dev)
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
277,15 → 272,14
1024 * 1024);
}
}
return 0;
}
 
static void drm_pci_agp_destroy(struct drm_device *dev)
void drm_pci_agp_destroy(struct drm_device *dev)
{
if (drm_core_has_AGP(dev) && dev->agp) {
if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
drm_agp_clear(dev);
drm_agp_destroy(dev->agp);
kfree(dev->agp);
dev->agp = NULL;
}
}
297,8 → 291,6
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
.agp_destroy = drm_pci_agp_destroy,
};
#endif
 
/drivers/video/drm/drm_stub.c
32,7 → 32,7
*/
 
#include <linux/module.h>
 
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
79,6 → 79,7
const char *function_name,
const char *format, ...)
{
struct va_format vaf;
va_list args;
 
// if (drm_debug & request_level) {
/drivers/video/drm/drm_vma_manager.c
25,7 → 25,7
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <drm/drm_vma_manager.h>
//#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
/drivers/video/drm/i915/hmm.c
File deleted
/drivers/video/drm/i915/intel_fb.c
File deleted
/drivers/video/drm/i915/execbuffer.c
File deleted
/drivers/video/drm/i915/Makefile
3,7 → 3,7
CC = gcc.exe
FASM = e:/fasm/fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DRM_I915_FBDEV
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
50,8 → 50,8
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_execbuffer.c \
i915_gem_gtt.c \
i915_gem_execbuffer.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_gpu_error.c \
61,13 → 61,15
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_pll.c \
intel_dvo.c \
intel_fb.c \
intel_fbdev.c \
intel_hdmi.c \
intel_i2c.c \
intel_lvds.c \
intel_modes.c \
intel_opregion.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
/drivers/video/drm/i915/Makefile.lto
2,7 → 2,7
CC = gcc
FASM = e:/fasm/fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DRM_I915_FBDEV
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
49,8 → 49,8
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_execbuffer.c \
i915_gem_gtt.c \
i915_gem_execbuffer.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_gpu_error.c \
60,13 → 60,15
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_pll.c \
intel_dvo.c \
intel_fb.c \
intel_fbdev.c \
intel_hdmi.c \
intel_i2c.c \
intel_lvds.c \
intel_modes.c \
intel_opregion.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
/drivers/video/drm/i915/dvo.h
77,17 → 77,6
struct drm_display_mode *mode);
 
/*
* Callback to adjust the mode to be set in the CRTC.
*
* This allows an output to adjust the clock or even the entire set of
* timings, which is used for panels with fixed timings or for
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
/*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
/drivers/video/drm/i915/dvo_ns2501.c
87,50 → 87,7
* when switching the resolution.
*/
 
static void enable_dvo(struct intel_dvo_device *dvo)
{
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
struct i2c_adapter *adapter = dvo->i2c_bus;
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
 
DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
 
ns->dvoc = I915_READ(DVO_C);
ns->pll_a = I915_READ(_DPLL_A);
ns->srcdim = I915_READ(DVOC_SRCDIM);
ns->fw_blc = I915_READ(FW_BLC);
 
I915_WRITE(DVOC, 0x10004084);
I915_WRITE(_DPLL_A, 0xd0820000);
I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
I915_WRITE(FW_BLC, 0x1080304);
 
I915_WRITE(DVOC, 0x90004084);
}
 
/*
* Restore the I915 registers modified by the above
* trigger function.
*/
static void restore_dvo(struct intel_dvo_device *dvo)
{
struct i2c_adapter *adapter = dvo->i2c_bus;
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
I915_WRITE(DVOC, ns->dvoc);
I915_WRITE(_DPLL_A, ns->pll_a);
I915_WRITE(DVOC_SRCDIM, ns->srcdim);
I915_WRITE(FW_BLC, ns->fw_blc);
}
 
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
300,7 → 257,7
struct drm_display_mode *adjusted_mode)
{
bool ok;
bool restore = false;
int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
DRM_DEBUG_KMS
476,21 → 433,8
ns->reg_8_shadow |= NS2501_8_BPAS;
}
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
 
if (!ok) {
if (restore)
restore_dvo(dvo);
enable_dvo(dvo);
restore = true;
} while (!ok && retries--);
}
} while (!ok);
/*
* Restore the old i915 registers before
* forcing the ns2501 on.
*/
if (restore)
restore_dvo(dvo);
}
 
/* set the NS2501 power state */
static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
510,7 → 454,7
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
bool ok;
bool restore = false;
int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
 
537,18 → 481,9
ok &=
ns2501_writeb(dvo, 0x35,
enable ? 0xff : 0x00);
if (!ok) {
if (restore)
restore_dvo(dvo);
enable_dvo(dvo);
restore = true;
} while (!ok && retries--);
}
} while (!ok);
 
if (restore)
restore_dvo(dvo);
}
}
 
static void ns2501_dump_regs(struct intel_dvo_device *dvo)
{
/drivers/video/drm/i915/i915_dma.c
54,7 → 54,7
intel_ring_emit(LP_RING(dev_priv), x)
 
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
__intel_ring_advance(LP_RING(dev_priv))
 
/**
* Lock test for when it's just for synchronization of ring access.
85,6 → 85,14
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
 
/*
* The dri breadcrumb update races against the drm master disappearing.
* Instead of trying to fix this (this is by far not the only ums issue)
* just don't do the update in kms mode.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
647,7 → 655,7
 
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
sizeof(struct drm_clip_rect),
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
709,7 → 717,7
 
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
sizeof(*cliprects), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
789,7 → 797,7
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
if (ring->irq_get(ring)) {
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
ring->irq_put(ring);
} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
826,7 → 834,7
result = i915_emit_irq(dev);
mutex_unlock(&dev->struct_mutex);
 
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
1151,7 → 1159,7
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
 
fb_obj = kos_gem_fb_object_create(dev,0,12*1024*1024);
main_fb_obj = kos_gem_fb_object_create(dev,0,16*1024*1024);
 
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
1164,6 → 1172,8
if (ret)
goto cleanup_gem_stolen;
 
intel_power_domains_init_hw(dev);
 
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
1170,7 → 1180,7
 
ret = i915_gem_init(dev);
if (ret)
goto cleanup_irq;
goto cleanup_power;
 
 
intel_modeset_gem_init(dev);
1177,9 → 1187,11
 
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
if (INTEL_INFO(dev)->num_pipes == 0)
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
intel_display_power_put(dev, POWER_DOMAIN_VGA);
return 0;
}
 
ret = intel_fbdev_init(dev);
if (ret)
1213,7 → 1225,9
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_irq:
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
intel_display_power_put(dev, POWER_DOMAIN_VGA);
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
// i915_gem_cleanup_stolen(dev);
1227,7 → 1241,33
 
 
 
#if IS_ENABLED(CONFIG_FB)
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;
 
ap = alloc_apertures(1);
if (!ap)
return;
 
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
 
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 
remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 
kfree(ap);
}
#else
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
}
#endif
 
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
1268,7 → 1308,7
info = (struct intel_device_info *) flags;
 
 
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
 
1278,21 → 1318,16
 
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->backlight.lock);
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
 
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
intel_pm_setup(dev);
 
intel_display_crc_init(dev);
 
i915_dump_device_info(dev_priv);
 
/* Not all pre-production machines fall into this category, only the
1330,20 → 1365,17
 
intel_uncore_early_sanitize(dev);
 
if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
 
intel_uncore_init(dev);
 
ret = i915_gem_gtt_init(dev);
if (ret)
goto put_bridge;
goto out_regs;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
 
pci_set_master(dev->pdev);
 
1363,7 → 1395,7
dev_priv->gtt.mappable = AllocKernelSpace(8192);
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
goto out_gtt;
}
 
/* The i915 workqueue is primarily used for batched retirement of
1387,13 → 1419,9
}
system_wq = dev_priv->wq;
 
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
 
intel_irq_init(dev);
intel_pm_init(dev);
intel_uncore_sanitize(dev);
intel_uncore_init(dev);
 
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
1420,14 → 1448,19
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
 
if (HAS_POWER_WELL(dev))
i915_init_power_well(dev);
// if (INTEL_INFO(dev)->num_pipes) {
// ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
// if (ret)
// goto out_gem_unload;
// }
 
intel_power_domains_init(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
goto out_power_well;
}
} else {
/* Start out suspended in ums mode. */
1444,28 → 1477,19
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
 
intel_init_runtime_pm(dev_priv);
 
main_device = dev;
 
return 0;
 
out_power_well:
out_gem_unload:
// if (dev_priv->mm.inactive_shrinker.shrink)
// unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
// if (dev->pdev->msi_enabled)
// pci_disable_msi(dev->pdev);
 
// intel_teardown_gmbus(dev);
// intel_teardown_mchbar(dev);
// destroy_workqueue(dev_priv->wq);
out_mtrrfree:
// arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
// io_mapping_free(dev_priv->gtt.mappable);
// dev_priv->gtt.gtt_remove(dev);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
out_gtt:
out_regs:
put_bridge:
// pci_dev_put(dev_priv->bridge_dev);
free_priv:
kfree(dev_priv);
return ret;
1478,15 → 1502,21
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
return ret;
}
 
intel_fini_runtime_pm(dev_priv);
 
intel_gpu_ips_teardown();
 
if (HAS_POWER_WELL(dev)) {
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_set_power_well(dev, true);
i915_remove_power_well(dev);
}
intel_display_set_init_power(dev, true);
intel_power_domains_remove(dev);
 
i915_teardown_sysfs(dev);
 
1493,16 → 1523,6
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
 
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
 
1557,10 → 1577,9
 
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
 
drm_vblank_cleanup(dev);
 
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
 
1569,6 → 1588,10
 
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
intel_uncore_fini(dev);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
 
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
 
1622,7 → 1645,7
return;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev);
intel_fbdev_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
1634,8 → 1657,10
 
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
}
 
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1694,6 → 1719,7
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
 
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1709,4 → 1735,3
}
#endif
 
 
/drivers/video/drm/i915/i915_drv.c
62,17 → 62,17
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
 
unsigned int i915_powersave __read_mostly = 0;
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
module_param_named(semaphores, i915_semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
int i915_enable_rc6 __read_mostly = 0;
int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6. "
81,7 → 81,7
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
 
int i915_enable_fbc __read_mostly = 0;
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
122,8 → 122,8
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
 
int i915_enable_ppgtt __read_mostly = 0;
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
int i915_enable_ppgtt __read_mostly = -1;
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
 
172,6 → 172,7
static const struct intel_device_info intel_i915g_info = {
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
178,10 → 179,13
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
188,6 → 192,8
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
};
 
static const struct intel_device_info intel_i965g_info = {
194,6 → 200,7
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
.has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
};
 
static const struct intel_device_info intel_i965gm_info = {
201,6 → 208,7
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
};
 
static const struct intel_device_info intel_g33_info = {
207,12 → 215,13
.gen = 3, .is_g33 = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
};
 
static const struct intel_device_info intel_g45_info = {
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
 
static const struct intel_device_info intel_gm45_info = {
220,7 → 229,7
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
 
static const struct intel_device_info intel_pineview_info = {
232,7 → 241,7
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
 
static const struct intel_device_info intel_ironlake_m_info = {
239,16 → 248,15
.gen = 5, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
 
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct intel_device_info intel_sandybridge_m_info = {
255,19 → 263,16
.gen = 6, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
.has_force_wake = 1,
};
 
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
.need_gfx_hws = 1, .has_hotplug = 1, \
.has_bsd_ring = 1, \
.has_blt_ring = 1, \
.has_llc = 1, \
.has_force_wake = 1
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1
 
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
278,7 → 283,6
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
.has_fbc = 1,
};
 
static const struct intel_device_info intel_ivybridge_q_info = {
293,6 → 297,7
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
};
 
301,6 → 306,7
.num_pipes = 2,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
};
 
309,7 → 315,7
.is_haswell = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_vebox_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
 
static const struct intel_device_info intel_haswell_m_info = {
318,10 → 324,25
.is_mobile = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
.has_vebox_ring = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
};
 
static const struct intel_device_info intel_broadwell_d_info = {
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
};
 
static const struct intel_device_info intel_broadwell_m_info = {
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
};
 
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
349,7 → 370,9
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info)
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
INTEL_BDW_D_IDS(&intel_broadwell_d_info)
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
405,7 → 428,7
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
412,6 → 435,12
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("This is Broadwell, assuming "
"LynxPoint LP PCH\n");
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
433,8 → 462,14
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
return false;
 
/* Until we get further testing... */
if (IS_GEN8(dev)) {
WARN_ON(!i915_preliminary_hw_support);
return false;
}
 
if (i915_semaphores >= 0)
return i915_semaphores;
 
444,7 → 479,7
return false;
#endif
 
return 1;
return true;
}
 
#if 0
453,6 → 488,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
intel_runtime_pm_get(dev_priv);
 
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
461,7 → 498,7
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
intel_set_power_well(dev, true);
intel_display_set_init_power(dev, true);
 
drm_kms_helper_poll_disable(dev);
 
471,9 → 508,7
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
 
mutex_lock(&dev->struct_mutex);
error = i915_gem_idle(dev);
mutex_unlock(&dev->struct_mutex);
error = i915_gem_suspend(dev);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
488,12 → 523,16
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
mutex_unlock(&dev->mode_config.mutex);
 
intel_modeset_suspend_hw(dev);
}
 
i915_gem_suspend_gtt_mappings(dev);
 
i915_save_state(dev);
 
intel_opregion_fini(dev);
565,11 → 604,24
drm_helper_hpd_irq_event(dev);
}
 
static int __i915_drm_thaw(struct drm_device *dev)
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
 
intel_uncore_early_sanitize(dev);
 
intel_uncore_sanitize(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
 
intel_power_domains_init_hw(dev);
 
i915_restore_state(dev);
intel_opregion_setup(dev);
 
588,6 → 640,7
intel_modeset_init_hw(dev);
 
drm_modeset_lock_all(dev);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
 
624,25 → 677,17
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
 
intel_runtime_pm_put(dev_priv);
return error;
}
 
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
 
intel_uncore_sanitize(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
} else if (drm_core_check_feature(dev, DRIVER_MODESET))
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_check_and_clear_faults(dev);
 
__i915_drm_thaw(dev);
 
return error;
return __i915_drm_thaw(dev, true);
}
 
int i915_resume(struct drm_device *dev)
658,20 → 703,12
 
pci_set_master(dev->pdev);
 
intel_uncore_sanitize(dev);
 
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
* earlier) need to restore the GTT mappings since the BIOS might clear
* all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
 
ret = __i915_drm_thaw(dev);
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (ret)
return ret;
 
709,10 → 746,6
 
simulated = dev_priv->gpu_error.stop_rings != 0;
 
if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
ret = -ENODEV;
} else {
ret = intel_gpu_reset(dev);
 
/* Also reset the gpu hangman. */
720,15 → 753,14
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
} else
dev_priv->gpu_error.last_reset = get_seconds();
}
 
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
749,31 → 781,15
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
 
dev_priv->ums.mm_suspended = 0;
 
i915_gem_init_swizzling(dev);
 
for_each_ring(ring, dev_priv, i)
ring->init(ring);
 
i915_gem_context_init(dev);
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret)
i915_gem_cleanup_aliasing_ppgtt(dev);
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
return ret;
}
 
/*
* It would make sense to re-init all the other hw state, at
* least the rps/rc6/emon init done within modeset_init_hw. For
* some unknown reason, this blows up my ilk, so don't.
*/
 
mutex_unlock(&dev->struct_mutex);
 
drm_irq_uninstall(dev);
drm_irq_install(dev);
intel_hpd_init(dev);
789,6 → 805,12
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
 
if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
}
 
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
881,6 → 903,7
return i915_drm_freeze(drm_dev);
}
 
 
#endif
 
static struct drm_driver driver = {
888,7 → 911,7
* deal with them for Intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
.load = i915_driver_load,
909,7 → 932,6
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
 
// .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
/drivers/video/drm/i915/i915_drv.h
66,6 → 66,7
#define DRIVER_DATE "20080730"
 
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
PIPE_B,
PIPE_C,
100,6 → 101,18
};
#define port_name(p) ((p) + 'A')
 
#define I915_NUM_PHYS_VLV 1
 
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
};
 
enum dpio_phy {
DPIO_PHY0,
DPIO_PHY1
};
 
enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
110,14 → 123,31
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
 
POWER_DOMAIN_NUM,
};
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
 
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP))
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
 
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
237,9 → 267,12
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
struct work_struct asle_work;
};
#define OPREGION_SIZE (8*1024)
 
297,11 → 330,12
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
u32 bbstate[I915_NUM_RINGS];
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
u64 bbaddr[I915_NUM_RINGS];
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
308,6 → 342,7
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_ring {
bool valid;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
333,13 → 368,16
u32 dirty:1;
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
u32 cache_level:3;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
int hangcheck_score[I915_NUM_RINGS];
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
 
struct intel_connector;
struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
347,7 → 385,7
 
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*enable_fbc)(struct drm_crtc *crtc);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
369,7 → 407,7
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
379,7 → 417,6
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
387,7 → 424,8
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
struct drm_crtc *crtc,
struct drm_display_mode *mode);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
402,11 → 440,34
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
 
int (*setup_backlight)(struct intel_connector *connector);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
void (*disable_backlight)(struct intel_connector *connector);
void (*enable_backlight)(struct intel_connector *connector);
};
 
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
void (*force_wake_get)(struct drm_i915_private *dev_priv,
int fw_engine);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
int fw_engine);
 
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
 
void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
uint8_t val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
uint16_t val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
uint32_t val, bool trace);
void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
uint64_t val, bool trace);
};
 
struct intel_uncore {
416,6 → 477,11
 
unsigned fifo_count;
unsigned forcewake_count;
 
unsigned fw_rendercount;
unsigned fw_mediacount;
 
struct delayed_work force_wake_work;
};
 
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
432,7 → 498,7
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
func(has_force_wake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
440,9 → 506,6
func(has_overlay) sep \
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_bsd_ring) sep \
func(has_blt_ring) sep \
func(has_vebox_ring) sep \
func(has_llc) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
454,6 → 517,7
u32 display_mmio_offset;
u8 num_pipes:3;
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
};
 
554,10 → 618,21
struct i915_hw_ppgtt {
struct i915_address_space base;
unsigned num_pd_entries;
union {
struct page **pt_pages;
struct page *gen8_pt_pages;
};
struct page **pd_pages;
int num_pd_pages;
int num_pt_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[4];
};
union {
dma_addr_t *pt_dma_addr;
 
dma_addr_t *gen8_pt_dma_addr[4];
};
int (*enable)(struct drm_device *dev);
};
 
582,6 → 657,13
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
 
};
 
struct i915_ctx_hang_stats {
590,6 → 672,12
 
/* This context had batch active when hang was declared */
unsigned batch_active;
 
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
 
/* This context is banned to submit more work */
bool banned;
};
 
/* This must match up with the value previously used for execbuf2.rsvd1. */
598,10 → 686,13
struct kref ref;
int id;
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
 
struct list_head link;
};
 
struct i915_fbc {
617,7 → 708,6
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
} *fbc_work;
 
enum no_fbc_reason {
635,17 → 725,9
} no_fbc_reason;
};
 
enum no_psr_reason {
PSR_NO_SOURCE, /* Not supported on platform */
PSR_NO_SINK, /* Not supported by panel */
PSR_MODULE_PARAM,
PSR_CRTC_NOT_ACTIVE,
PSR_PWR_WELL_ENABLED,
PSR_NOT_TILED,
PSR_SPRITE_ENABLED,
PSR_S3D_ENABLED,
PSR_INTERLACED_ENABLED,
PSR_HSW_NOT_DDIA,
struct i915_psr {
bool sink_support;
bool source_ok;
};
 
enum intel_pch {
664,7 → 746,6
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
 
struct intel_fbdev;
struct intel_fbc_work;
716,6 → 797,7
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
835,9 → 917,6
struct work_struct work;
u32 pm_iir;
 
/* On vlv we need to manually drop to Vmin with a delayed work. */
struct delayed_work vlv_work;
 
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
844,8 → 923,14
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
u8 rp1_delay;
u8 rp0_delay;
u8 hw_max;
 
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
bool enabled;
struct delayed_work delayed_resume_work;
 
/*
882,13 → 967,31
 
/* Power well structure for haswell */
struct i915_power_well {
struct drm_device *device;
spinlock_t lock;
const char *name;
bool always_on;
/* power well enable/disable usage count */
int count;
int i915_request;
unsigned long domains;
void *data;
void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
bool enable);
bool (*is_enabled)(struct drm_device *dev,
struct i915_power_well *power_well);
};
 
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
int power_well_count;
 
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
struct i915_power_well *power_wells;
};
 
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
914,9 → 1017,11
int mm_suspended;
};
 
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info;
u32 *remap_info[MAX_L3_SLICES];
struct work_struct error_work;
int which_slice;
};
 
struct i915_gem_mm {
938,8 → 1043,6
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
bool shrinker_no_lock_stealing;
 
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
953,6 → 1056,15
struct delayed_work retire_work;
 
/**
* When we detect an idle GPU, we want to turn on
* powersaving features. So once we see that there
* are no more requests outstanding and no more
* arrive within a small period of time, we fire
* off the idle_work.
*/
struct delayed_work idle_work;
 
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
990,6 → 1102,9
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
 
struct timer_list hangcheck_timer;
 
/* For reset and error_state handling. */
998,37 → 1113,34
struct drm_i915_error_state *first_error;
struct work_struct work;
 
unsigned long last_reset;
 
unsigned long missed_irq_rings;
 
/**
* State variable and reset counter controlling the reset flow
* State variable controlling the reset flow and count
*
* Upper bits are for the reset counter. This counter is used by the
* wait_seqno code to race-free noticed that a reset event happened and
* that it needs to restart the entire ioctl (since most likely the
* seqno it waited for won't ever signal anytime soon).
* This is a counter which gets incremented when reset is triggered,
* and again when reset has been handled. So odd values (lowest bit set)
* means that reset is in progress and even values that
* (reset_counter >> 1):th reset was successfully completed.
*
* If reset is not completed succesfully, the I915_WEDGE bit is
* set meaning that hardware is terminally sour and there is no
* recovery. All waiters on the reset_queue will be woken when
* that happens.
*
* This counter is used by the wait_seqno code to notice that reset
* event happened and it needs to restart the entire ioctl (since most
* likely the seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
*
* Lowest bit controls the reset state machine: Set means a reset is in
* progress. This state will (presuming we don't have any bugs) decay
* into either unset (successful reset) or the special WEDGED value (hw
* terminally sour). All waiters on the reset_queue will be woken when
* that happens.
*/
atomic_t reset_counter;
 
/**
* Special values/flags for reset_counter
*
* Note that the code relies on
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
* being true.
*/
#define I915_RESET_IN_PROGRESS_FLAG 1
#define I915_WEDGED 0xffffffff
#define I915_WEDGED (1 << 31)
 
/**
* Waitqueue to signal when the reset has completed. Used by clients
1038,6 → 1150,9
 
/* For gpu hang simulation. */
unsigned int stop_rings;
 
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
};
 
enum modeset_restore {
1046,6 → 1161,14
MODESET_SUSPENDED,
};
 
struct ddi_vbt_port_info {
uint8_t hdmi_level_shift;
 
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
};
 
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1071,10 → 1194,22
int edp_bpp;
struct edp_power_seq edp_pps;
 
struct {
u16 pwm_freq_hz;
bool active_low_pwm;
} backlight;
 
/* MIPI DSI */
struct {
u16 panel_id;
} dsi;
 
int crt_ddc_pin;
 
int child_dev_num;
struct child_device_config *child_dev;
union child_device_config *child_dev;
 
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
};
 
enum intel_ddb_partitioning {
1090,6 → 1225,15
uint32_t fbc_val;
};
 
struct ilk_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
uint32_t wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
 
/*
* This struct tracks the state needed for the Package C8+ feature.
*
1159,6 → 1303,40
} regsave;
};
 
struct i915_runtime_pm {
bool suspended;
};
 
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
INTEL_PIPE_CRC_SOURCE_PLANE2,
INTEL_PIPE_CRC_SOURCE_PF,
INTEL_PIPE_CRC_SOURCE_PIPE,
/* TV/DP on pre-gen5/vlv can't use the pipe source. */
INTEL_PIPE_CRC_SOURCE_TV,
INTEL_PIPE_CRC_SOURCE_DP_B,
INTEL_PIPE_CRC_SOURCE_DP_C,
INTEL_PIPE_CRC_SOURCE_DP_D,
INTEL_PIPE_CRC_SOURCE_AUTO,
INTEL_PIPE_CRC_SOURCE_MAX,
};
 
struct intel_pipe_crc_entry {
uint32_t frame;
uint32_t crc[5];
};
 
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
bool opened; /* exclusive access to the result file */
struct intel_pipe_crc_entry *entries;
enum intel_pipe_crc_source source;
int head, tail;
wait_queue_head_t wq;
};
 
typedef struct drm_i915_private {
struct drm_device *dev;
 
1203,7 → 1381,10
struct mutex dpio_lock;
 
/** Cached value of IMR to avoid reads in updating the bitfield */
union {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
u32 pm_irq_mask;
 
1229,15 → 1410,9
 
/* overlay */
struct intel_overlay *overlay;
unsigned int sprite_scaling_enabled;
 
/* backlight */
struct {
int level;
bool enabled;
spinlock_t lock; /* bl registers and the above bl fields */
struct backlight_device *device;
} backlight;
/* backlight registers and fields in struct intel_panel */
spinlock_t backlight_lock;
 
/* LVDS info */
bool no_aux_handshake;
1282,9 → 1457,14
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
 
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
 
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
/* Reclocking support */
bool render_reclock_avail;
1307,17 → 1487,18
* mchdev_lock in intel_pm.c */
struct intel_ilk_power_mgmt ips;
 
/* Haswell power well */
struct i915_power_well power_well;
struct i915_power_domains power_domains;
 
enum no_psr_reason no_psr_reason;
struct i915_psr psr;
 
struct i915_gpu_error gpu_error;
 
struct drm_i915_gem_object *vlv_pctx;
 
#ifdef CONFIG_DRM_I915_FBDEV
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
#endif
 
/*
* The console may be contended at resume, but we don't
1328,8 → 1509,8
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
 
bool hw_contexts_disabled;
uint32_t hw_context_size;
struct list_head context_list;
 
u32 fdi_rx_config;
 
1347,10 → 1528,15
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
 
/* current hardware state */
struct ilk_wm_values hw;
} wm;
 
struct i915_package_c8 pc8;
 
struct i915_runtime_pm pm;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
1410,8 → 1596,6
struct list_head ring_list;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
/**
* This is set if the object is on the active lists (has pending
1497,13 → 1681,6
void *dma_buf_vmapping;
int vmapping_count;
 
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
 
struct intel_ring_buffer *ring;
 
/** Breadcrumb of last rendering to the buffer. */
1515,11 → 1692,14
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
 
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
 
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
 
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
unsigned long user_pin_count;
struct drm_file *pin_filp;
 
/** for phy allocated objects */
1570,48 → 1750,61
};
 
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
 
struct {
spinlock_t lock;
struct list_head request_list;
struct delayed_work idle_work;
} mm;
struct idr context_idr;
 
struct i915_ctx_hang_stats hang_stats;
atomic_t rps_wait_boost;
};
 
#define INTEL_INFO(dev) (to_i915(dev)->info)
 
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
(dev)->pci_device == 0x0106 || \
(dev)->pci_device == 0x010A)
#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
(dev)->pdev->device == 0x0152 || \
(dev)->pdev->device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
(dev)->pdev->device == 0x0106 || \
(dev)->pdev->device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
(((dev)->pdev->device & 0xf) == 0x2 || \
((dev)->pdev->device & 0xf) == 0x6 || \
((dev)->pdev->device & 0xf) == 0xe))
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
/*
* The genX designation typically refers to the render engine, so render
1625,10 → 1818,15
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
 
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
#define BLT_RING (1<<BCS)
#define VEBOX_RING (1<<VECS)
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1650,19 → 1848,20
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
 
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
#define HAS_IPS(dev) (IS_ULT(dev))
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
 
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1678,35 → 1877,14
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 
#define GT_FREQUENCY_MULTIPLIER 50
 
#include "i915_trace.h"
 
/**
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
* enabled, and as soon as new workload arises GPU wakes up automatically as well.
*
* There are different RC6 modes available in Intel GPU, which differentiate
* among each other with the latency required to enter and leave RC6 and
* voltage consumed by the GPU in different states.
*
* The combination of the following flags define which states GPU is allowed
* to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
* RC6pp is deepest RC6. Their support by hardware varies according to the
* GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
#define INTEL_RC6_ENABLE (1<<0)
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
 
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
1764,21 → 1942,19
void i915_handle_error(struct drm_device *dev, bool wedged);
 
extern void intel_irq_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
 
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1830,14 → 2006,11
void i915_gem_load(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
 
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1849,6 → 2022,7
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
 
1876,9 → 2050,8
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring);
 
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
1919,7 → 2092,7
}
}
 
void i915_gem_retire_requests(struct drm_device *dev);
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
1926,24 → 2099,29
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
& I915_RESET_IN_PROGRESS_FLAG);
& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}
 
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter) == I915_WEDGED;
return atomic_read(&error->reset_counter) & I915_WEDGED;
}
 
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
 
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
1970,6 → 2148,7
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
uint32_t
2001,6 → 2180,9
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
 
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
 
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2037,10 → 2219,9
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
#undef obj_to_ggtt
 
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
2100,6 → 2281,7
unsigned cache_level,
bool mappable,
bool nonblock);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
 
/* i915_gem_stolen.c */
2139,6 → 2321,11
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
void intel_display_crc_init(struct drm_device *dev);
#else
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
 
/* i915_gpu_error.c */
__printf(2, 3)
2192,15 → 2379,31
extern void intel_i2c_reset(struct drm_device *dev);
 
/* intel_opregion.c */
struct intel_encoder;
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
#else
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
return 0;
}
static inline int
intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
return 0;
}
#endif
 
/* intel_acpi.c */
2237,6 → 2440,8
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
/* overlay */
#ifdef CONFIG_DEBUG_FS
2254,8 → 2459,8
* must be set to prevent GT core from power down and stale values being
* returned.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2264,48 → 2469,63
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination);
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
 
#define __i915_read(x) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
__i915_read(8)
__i915_read(16)
__i915_read(32)
__i915_read(64)
#undef __i915_read
void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 
#define __i915_write(x) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
__i915_write(8)
__i915_write(16)
__i915_write(32)
__i915_write(64)
#undef __i915_write
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
(((reg) >= 0x2000 && (reg) < 0x4000) ||\
((reg) >= 0x5000 && (reg) < 0x8000) ||\
((reg) >= 0xB000 && (reg) < 0x12000) ||\
((reg) >= 0x2E000 && (reg) < 0x30000))
 
#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
(((reg) >= 0x12000 && (reg) < 0x14000) ||\
((reg) >= 0x22000 && (reg) < 0x24000) ||\
((reg) >= 0x30000 && (reg) < 0x40000))
 
#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
 
#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
 
#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 
#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
 
#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
 
#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
 
2372,13 → 2592,13
struct drm_i915_gem_object
*kos_gem_fb_object_create(struct drm_device *dev, u32 gtt_offset, u32 size);
 
extern struct drm_i915_gem_object *fb_obj;
extern struct drm_i915_gem_object *main_fb_obj;
 
static struct drm_i915_gem_object *get_fb_obj()
{
return fb_obj;
return main_fb_obj;
};
 
 
#define ioread32(addr) readl(addr)
 
 
/drivers/video/drm/i915/i915_gem.c
65,6 → 65,9
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force);
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
81,8 → 84,8
struct drm_i915_fence_reg *fence,
bool enable);
 
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
static bool cpu_cache_is_coherent(struct drm_device *dev,
283,7 → 286,7
struct drm_mode_create_dumb *args)
{
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
args->size, &args->handle);
460,12 → 463,10
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}
}
 
ret = i915_gem_object_get_pages(obj);
if (ret)
775,12 → 776,10
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
needs_clflush_after = cpu_write_needs_clflush(obj);
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
}
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
982,12 → 981,31
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
ret = 0;
if (seqno == ring->outstanding_lazy_request)
if (seqno == ring->outstanding_lazy_seqno)
ret = i915_add_request(ring, NULL);
 
return ret;
}
 
static void fake_irq(unsigned long data)
{
// wake_up_process((struct task_struct *)data);
}
 
static bool missed_irq(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
 
static bool can_wait_boost(struct drm_i915_file_private *file_priv)
{
if (file_priv == NULL)
return true;
 
return !atomic_xchg(&file_priv->rps_wait_boost, true);
}
 
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
1008,13 → 1026,16
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned reset_counter,
bool interruptible, struct timespec *timeout)
bool interruptible,
struct timespec *timeout,
struct drm_i915_file_private *file_priv)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct timespec before, now, wait_time={1,0};
unsigned long timeout_jiffies;
long end;
bool wait_forever = true;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
struct timespec before, now;
unsigned long timeout_expire, wait_time;
wait_queue_t __wait;
int ret;
 
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1022,69 → 1043,72
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
 
trace_i915_gem_request_wait_begin(ring, seqno);
timeout_expire = timeout ? GetTimerTicks() + timespec_to_jiffies_timeout(timeout) : 0;
wait_time = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
 
if (timeout != NULL) {
wait_time = *timeout;
wait_forever = false;
if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
&file_priv->mm.idle_work,
msecs_to_jiffies(100));
}
 
timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
 
if (WARN_ON(!ring->irq_get(ring)))
if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
 
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
INIT_LIST_HEAD(&__wait.task_list);
__wait.evnt = CreateEvent(NULL, MANUAL_DESTROY);
 
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
i915_reset_in_progress(&dev_priv->gpu_error) || \
reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
EXIT_COND,
timeout_jiffies);
else
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(ring, seqno);
 
for (;;) {
unsigned long flags;
 
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
end = -EAGAIN;
 
/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
* gone. */
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
* is truely gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
if (ret == 0)
ret = -EAGAIN;
break;
}
 
getrawmonotonic(&now);
if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
ret = 0;
break;
}
 
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
if (timeout && time_after_eq(GetTimerTicks(), timeout_expire)) {
ret = -ETIME;
break;
}
 
if (timeout) {
// struct timespec sleep_time = timespec_sub(now, before);
// *timeout = timespec_sub(*timeout, sleep_time);
spin_lock_irqsave(&ring->irq_queue.lock, flags);
if (list_empty(&__wait.task_list))
__add_wait_queue(&ring->irq_queue, &__wait);
spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
 
WaitEventTimeout(__wait.evnt, 1);
 
if (!list_empty(&__wait.task_list)) {
spin_lock_irqsave(&ring->irq_queue.lock, flags);
list_del_init(&__wait.task_list);
spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
}
};
trace_i915_gem_request_wait_end(ring, seqno);
 
switch (end) {
case -EIO:
case -EAGAIN: /* Wedged */
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
DestroyEvent(__wait.evnt);
 
if (!irq_test_in_progress)
ring->irq_put(ring);
 
return ret;
}
}
 
/**
* Waits for a sequence number to be signaled, and cleans up the
1111,7 → 1135,7
 
return __wait_seqno(ring, seqno,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL);
interruptible, NULL, NULL);
}
 
static int
1161,6 → 1185,7
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_file *file,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
1187,7 → 1212,7
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
1236,7 → 1261,7
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
if (ret)
goto unref;
 
1751,6 → 1776,13
}
}
 
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
}
 
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
1872,11 → 1904,10
if (ret)
return ret;
 
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
request = ring->preallocated_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
 
 
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
1885,17 → 1916,13
request_ring_position = intel_ring_get_tail(ring);
 
ret = ring->add_request(ring);
if (ret) {
kfree(request);
if (ret)
return ret;
}
 
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
request->ctx = ring->last_context;
request->batch_obj = obj;
 
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
1903,7 → 1930,12
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
request->batch_obj = obj;
 
/* Hold a reference to the current context so that we can inspect
* it later in case a hangcheck error event fires.
*/
request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
 
1923,7 → 1955,8
}
 
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
 
if (!dev_priv->ums.mm_suspended) {
// i915_queue_hangcheck(ring->dev);
1950,10 → 1983,8
return;
 
spin_lock(&file_priv->mm.lock);
if (request->file_priv) {
list_del(&request->client_list);
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
}
 
2018,6 → 2049,21
return false;
}
 
static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
{
const unsigned long elapsed = GetTimerTicks()/100 - hs->guilty_ts;
 
if (hs->banned)
return true;
 
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
DRM_ERROR("context hanging too fast, declaring banned!\n");
return true;
}
 
return false;
}
 
static void i915_set_reset_status(struct intel_ring_buffer *ring,
struct drm_i915_gem_request *request,
u32 acthd)
2035,7 → 2081,7
 
if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
offset,
2054,12 → 2100,15
hs = &request->file_priv->hang_stats;
 
if (hs) {
if (guilty)
if (guilty) {
hs->banned = i915_context_is_banned(hs);
hs->batch_active++;
else
hs->guilty_ts = GetTimerTicks()/100;
} else {
hs->batch_pending++;
}
}
}
 
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
2090,6 → 2139,23
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
 
i915_gem_object_move_to_inactive(obj);
}
 
/*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
* implicit references on things like e.g. ppgtt address spaces through
* the request.
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
2099,17 → 2165,7
 
i915_gem_free_request(request);
}
 
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
 
i915_gem_object_move_to_inactive(obj);
}
}
 
void i915_gem_restore_fences(struct drm_device *dev)
{
2149,6 → 2205,8
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, ring);
 
i915_gem_cleanup_ringbuffer(dev);
 
i915_gem_restore_fences(dev);
}
 
2213,59 → 2271,55
WARN_ON(i915_verify_lists(ring->dev));
}
 
void
bool
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
bool idle = true;
int i;
 
for_each_ring(ring, dev_priv, i)
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
}
 
if (idle)
mod_delayed_work(dev_priv->wq,
&dev_priv->mm.idle_work,
msecs_to_jiffies(100));
 
return idle;
}
 
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
struct intel_ring_buffer *ring;
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.retire_work.work);
struct drm_device *dev = dev_priv->dev;
bool idle;
int i;
 
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
dev = dev_priv->dev;
 
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
idle = false;
if (mutex_trylock(&dev->struct_mutex)) {
idle = i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
}
if (!idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
return;
}
 
i915_gem_retire_requests(dev);
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.idle_work.work);
 
/* Send a periodic flush down the ring so we don't hold onto GEM
* objects indefinitely.
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty)
i915_add_request(ring, NULL);
 
idle &= list_empty(&ring->request_list);
intel_mark_idle(dev_priv->dev);
}
 
if (!dev_priv->ums.mm_suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
 
mutex_unlock(&dev->struct_mutex);
}
 
/**
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
2361,7 → 2415,7
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
 
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
if (timeout)
args->timeout_ns = timespec_to_ns(timeout);
return ret;
2408,6 → 2462,7
if (ret)
return ret;
 
trace_i915_gem_ring_sync_to(from, to, seqno);
ret = to->sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
2455,9 → 2510,12
if (list_empty(&vma->vma_link))
return 0;
 
if (!drm_mm_node_allocated(&vma->node))
goto destroy;
if (!drm_mm_node_allocated(&vma->node)) {
i915_gem_vma_destroy(vma);
 
return 0;
}
 
if (obj->pin_count)
return -EBUSY;
 
2487,7 → 2545,6
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
 
list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
2495,17 → 2552,19
obj->map_and_fenceable = true;
 
drm_mm_remove_node(&vma->node);
 
destroy:
i915_gem_vma_destroy(vma);
 
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
* NB: Until we have real VMAs there will only ever be one */
WARN_ON(!list_empty(&obj->vma_list));
* no more VMAs exist. */
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
 
return 0;
}
 
2698,6 → 2757,7
obj->stride, obj->tiling_mode);
 
switch (INTEL_INFO(dev)->gen) {
case 8:
case 7:
case 6:
case 5:
2797,7 → 2857,7
}
 
if (avail == NULL)
return NULL;
goto deadlock;
 
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2807,7 → 2867,12
return reg;
}
 
return NULL;
deadlock:
/* Wait for completion of pending flips which consume fences */
// if (intel_has_pending_fb_unpin(dev))
// return ERR_PTR(-EAGAIN);
 
return ERR_PTR(-EDEADLK);
}
 
/**
2852,8 → 2917,8
}
} else if (enable) {
reg = i915_find_fence_reg(dev);
if (reg == NULL)
return -EDEADLK;
if (IS_ERR(reg))
return PTR_ERR(reg);
 
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
3194,8 → 3259,7
 
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj)) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
&dev_priv->gtt.base);
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
if (vma)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
3566,7 → 3630,7
if (seqno == 0)
return 0;
 
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
3670,6 → 3734,11
goto out;
}
 
if (obj->user_pin_count == ULONG_MAX) {
ret = -EBUSY;
goto out;
}
 
if (obj->user_pin_count == 0) {
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
3823,7 → 3892,6
{
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
 
3881,16 → 3949,11
} else
obj->cache_level = I915_CACHE_NONE;
 
trace_i915_gem_object_create(obj);
 
return obj;
}
 
int i915_gem_init_object(struct drm_gem_object *obj)
{
BUG();
 
return 0;
}
 
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3898,6 → 3961,8
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
 
intel_runtime_pm_get(dev_priv);
 
trace_i915_gem_object_destroy(obj);
 
 
3944,11 → 4009,24
 
kfree(obj->bit_17);
i915_gem_object_free(obj);
 
intel_runtime_pm_put(dev_priv);
}
 
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm)
return vma;
 
return NULL;
}
 
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
3968,30 → 4046,47
return vma;
}
 
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
 
return vma;
}
 
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
 
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
return;
 
list_del(&vma->vma_link);
 
kfree(vma);
}
 
#if 0
int
i915_gem_idle(struct drm_device *dev)
i915_gem_suspend(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
int ret = 0;
 
if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
mutex_lock(&dev->struct_mutex);
if (dev_priv->ums.mm_suspended)
goto err;
 
ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (ret)
goto err;
 
i915_gem_retire_requests(dev);
 
/* Under UMS, be paranoid and evict. */
3998,48 → 4093,58
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
 
/* Cancel the retire work handler, which should be idle now. */
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound ums.mm_suspended!
*/
dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 
return 0;
 
err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
#endif
 
void i915_gem_l3_remap(struct drm_device *dev)
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 misccpctl;
int i;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
 
if (!HAS_L3_GPU_CACHE(dev))
return;
if (!HAS_L3_DPF(dev) || !remap_info)
return 0;
 
if (!dev_priv->l3_parity.remap_info)
return;
ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
if (ret)
return ret;
 
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
 
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, reg_base + i);
intel_ring_emit(ring, remap_info[i/4]);
}
 
/* Make sure all the writes land before disabling dop clock gating */
POSTING_READ(GEN7_L3LOG_BASE);
intel_ring_advance(ring);
 
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
return ret;
}
 
void i915_gem_init_swizzling(struct drm_device *dev)
4061,6 → 4166,8
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (IS_GEN8(dev))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
4131,7 → 4238,7
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
int ret, i;
 
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
4139,6 → 4246,10
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
if (IS_HASWELL(dev))
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4145,8 → 4256,6
I915_WRITE(GEN7_MSG_CTL, temp);
}
 
i915_gem_l3_remap(dev);
 
i915_gem_init_swizzling(dev);
 
ret = i915_gem_init_rings(dev);
4153,11 → 4262,20
if (ret)
return ret;
 
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
*/
i915_gem_context_init(dev);
ret = i915_gem_context_init(dev);
if (ret) {
i915_gem_cleanup_ringbuffer(dev);
DRM_ERROR("Context initialization failed %d\n", ret);
return ret;
}
 
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret) {
4255,26 → 4373,12
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
 
drm_irq_uninstall(dev);
 
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound ums.mm_suspended!
*/
if (ret != 0)
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
 
return ret;
return i915_gem_suspend(dev);
}
 
void
4285,11 → 4389,9
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
}
#endif
 
4319,6 → 4421,7
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4328,6 → 4431,8
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4370,7 → 4475,7
if (dev_priv->mm.phys_objs[id - 1] || !size)
return 0;
 
phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
if (!phys_obj)
return -ENOMEM;
 
4608,11 → 4713,10
 
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_address_space *vm;
struct i915_vma *vma;
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
if (i915_gem_obj_bound(o, vm))
list_for_each_entry(vma, &o->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
return true;
 
return false;
4635,26 → 4739,18
 
return 0;
}
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
 
 
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm)
return vma;
 
if (WARN_ON(list_empty(&obj->vma_list)))
return NULL;
}
 
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
return NULL;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = i915_gem_vma_create(obj, vm);
 
return vma;
}
/drivers/video/drm/i915/i915_gem_context.c
73,7 → 73,7
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
* GPU. The GPU has loaded it's state already and has stored away the gtt
* GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
117,6 → 117,9
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
case 8:
ret = GEN8_CXT_TOTAL_SIZE;
break;
default:
BUG();
}
129,6 → 132,7
struct i915_hw_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
 
list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
147,6 → 151,7
 
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
166,6 → 171,7
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
list_add_tail(&ctx->link, &dev_priv->context_list);
 
/* Default context will never have a file_priv */
if (file_priv == NULL)
178,6 → 184,10
 
ctx->file_priv = file_priv;
ctx->id = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
return ctx;
 
213,7 → 223,6
* may not be available. To avoid this we always pin the
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
226,6 → 235,8
goto err_unpin;
}
 
dev_priv->ring[RCS].default_context = ctx;
 
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
 
236,36 → 247,34
return ret;
}
 
void i915_gem_context_init(struct drm_device *dev)
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
return;
}
if (!HAS_HW_CONTEXTS(dev))
return 0;
 
/* If called from reset, or thaw... we've been here already */
if (dev_priv->hw_contexts_disabled ||
dev_priv->ring[RCS].default_context)
return;
if (dev_priv->ring[RCS].default_context)
return 0;
 
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
return;
return -E2BIG;
}
 
if (create_default_context(dev_priv)) {
dev_priv->hw_contexts_disabled = true;
DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
return;
ret = create_default_context(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
ret);
return ret;
}
 
DRM_DEBUG_DRIVER("HW context support initialized\n");
return 0;
}
 
void i915_gem_context_fini(struct drm_device *dev)
273,7 → 282,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
if (dev_priv->hw_contexts_disabled)
if (!HAS_HW_CONTEXTS(dev))
return;
 
/* The only known way to stop the gpu from accessing the hw context is
281,8 → 290,6
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
 
i915_gem_object_unpin(dctx->obj);
 
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
289,10 → 296,20
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
drm_gem_object_unreference(&dctx->obj->base);
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->obj->active);
i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
}
 
i915_gem_object_unpin(dctx->obj);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].default_context = NULL;
dev_priv->ring[RCS].last_context = NULL;
}
 
static int context_idr_cleanup(int id, void *p, void *data)
{
struct i915_hw_context *ctx = p;
308,7 → 325,6
struct drm_file *file,
u32 id)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
 
315,8 → 331,9
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
 
ctx = NULL;
if (!dev_priv->hw_contexts_disabled)
if (!HAS_HW_CONTEXTS(dev))
return ERR_PTR(-ENOENT);
 
ctx = i915_gem_context_get(file->driver_priv, id);
if (ctx == NULL)
return ERR_PTR(-ENOENT);
391,11 → 408,11
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
int ret, i;
 
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
if (from == to)
if (from == to && !to->remap_slice)
return 0;
 
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
428,8 → 445,6
 
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from == to)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
 
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
437,6 → 452,18
return ret;
}
 
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
 
ret = i915_gem_l3_remap(ring, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
else
to->remap_slice &= ~(1<<i);
}
 
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
444,11 → 471,8
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
459,17 → 483,7
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
 
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is
* consistent with out tracking. In case of emergency,
* scream.
*/
WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
return ret;
}
 
/* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_context_unreference(from);
}
486,8 → 500,6
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
* @id: context id number
* @seqno: sequence number by which the new context will be switched to
* @flags:
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
501,7 → 513,7
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *to;
 
if (dev_priv->hw_contexts_disabled)
if (!HAS_HW_CONTEXTS(ring->dev))
return 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
526,7 → 538,6
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
535,7 → 546,7
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
if (dev_priv->hw_contexts_disabled)
if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
/drivers/video/drm/i915/i915_gem_execbuffer.c
33,6 → 33,8
#include "intel_drv.h"
//#include <linux/dma_remapping.h>
 
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
 
static unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
48,35 → 50,35
return 0;
}
 
struct eb_objects {
struct list_head objects;
struct eb_vmas {
struct list_head vmas;
int and;
union {
struct drm_i915_gem_object *lut[0];
struct i915_vma *lut[0];
struct hlist_head buckets[0];
};
};
 
static struct eb_objects *
static struct eb_vmas *
eb_create(struct drm_i915_gem_execbuffer2 *args)
{
struct eb_objects *eb = NULL;
struct eb_vmas *eb = NULL;
 
if (args->flags & I915_EXEC_HANDLE_LUT) {
int size = args->buffer_count;
size *= sizeof(struct drm_i915_gem_object *);
size += sizeof(struct eb_objects);
unsigned size = args->buffer_count;
size *= sizeof(struct i915_vma *);
size += sizeof(struct eb_vmas);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
 
if (eb == NULL) {
int size = args->buffer_count;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
unsigned size = args->buffer_count;
unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
sizeof(struct eb_vmas),
GFP_TEMPORARY);
if (eb == NULL)
return eb;
85,12 → 87,12
} else
eb->and = -args->buffer_count;
 
INIT_LIST_HEAD(&eb->objects);
INIT_LIST_HEAD(&eb->vmas);
return eb;
}
 
static void
eb_reset(struct eb_objects *eb)
eb_reset(struct eb_vmas *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
97,52 → 99,102
}
 
static int
eb_lookup_objects(struct eb_objects *eb,
eb_lookup_vmas(struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
struct i915_address_space *vm,
struct drm_file *file)
{
int i;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret;
 
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
/* Grab a reference to the object and release the lock so we can lookup
* or create the VMA without using GFP_ATOMIC */
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
 
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
return -ENOENT;
ret = -ENOENT;
goto err;
}
 
if (!list_empty(&obj->exec_list)) {
if (!list_empty(&obj->obj_exec_link)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
return -EINVAL;
ret = -EINVAL;
goto err;
}
 
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->exec_list, &eb->objects);
list_add_tail(&obj->obj_exec_link, &objects);
}
spin_unlock(&file->table_lock);
 
obj->exec_entry = &exec[i];
i = 0;
while (!list_empty(&objects)) {
struct i915_vma *vma;
 
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
 
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
* vmas which are not actually bound. And since only
* lookup_or_create exists as an interface to get at the vma
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
goto err;
}
 
/* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas);
list_del_init(&obj->obj_exec_link);
 
vma->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = obj;
eb->lut[i] = vma;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
obj->exec_handle = handle;
hlist_add_head(&obj->exec_node,
vma->exec_handle = handle;
hlist_add_head(&vma->exec_node,
&eb->buckets[handle & eb->and]);
}
++i;
}
spin_unlock(&file->table_lock);
 
return 0;
 
 
err:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
drm_gem_object_unreference(&obj->base);
}
/*
* Objects already transfered to the vmas list will be unreferenced by
* eb_destroy.
*/
 
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
return ret;
}
 
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
154,11 → 206,11
 
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
 
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
vma = hlist_entry(node, struct i915_vma, exec_node);
if (vma->exec_handle == handle)
return vma;
}
return NULL;
}
165,16 → 217,36
}
 
static void
eb_destroy(struct eb_objects *eb)
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
while (!list_empty(&eb->objects)) {
struct drm_i915_gem_object *obj;
struct drm_i915_gem_exec_object2 *entry;
struct drm_i915_gem_object *obj = vma->obj;
 
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
if (!drm_mm_node_allocated(&vma->node))
return;
 
entry = vma->exec_entry;
 
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
 
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
 
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
 
static void eb_destroy(struct eb_vmas *eb)
{
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
 
vma = list_first_entry(&eb->vmas,
struct i915_vma,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
}
181,7 → 253,8
 
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
!obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
194,9 → 267,9
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
int ret = -EINVAL;
int ret;
 
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
return ret;
 
215,7 → 288,7
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
int ret = -EINVAL;
int ret;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
239,7 → 312,7
 
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
246,16 → 319,18
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
uint32_t target_offset;
int ret = -EINVAL;
int ret;
 
/* we've already hold a reference to all valid objects */
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
if (unlikely(target_obj == NULL))
target_vma = eb_get_vma(eb, reloc->target_handle);
if (unlikely(target_vma == NULL))
return -ENOENT;
target_i915_obj = target_vma->obj;
target_obj = &target_vma->obj->base;
 
target_i915_obj = to_intel_bo(target_obj);
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
target_offset = target_vma->node.start;
 
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
276,7 → 351,7
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
return -EINVAL;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
287,7 → 362,7
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
return -EINVAL;
}
 
target_obj->pending_read_domains |= reloc->read_domains;
300,13 → 375,14
return 0;
 
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
if (unlikely(reloc->offset >
obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
return ret;
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
313,7 → 389,7
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
return ret;
return -EINVAL;
}
 
/* We can't wait for rendering with pagefaults disabled */
334,14 → 410,13
}
 
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct i915_address_space *vm)
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct eb_vmas *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
 
user_relocs = to_user_ptr(entry->relocs_ptr);
359,8 → 434,8
do {
u64 offset = r->presumed_offset;
 
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
vm);
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
vma->vm);
if (ret)
return ret;
 
381,17 → 456,16
}
 
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs,
struct i915_address_space *vm)
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int i, ret;
 
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
vm);
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
vma->vm);
if (ret)
return ret;
}
400,10 → 474,9
}
 
static int
i915_gem_execbuffer_relocate(struct eb_objects *eb,
struct i915_address_space *vm)
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret = 0;
 
/* This is the fast path and we cannot handle a pagefault whilst
414,8 → 487,8
* lockdep complains vehemently.
*/
// pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
424,26 → 497,24
return ret;
}
 
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
 
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
need_reloc_mappable(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
return entry->relocation_count && !use_cpu_reloc(obj);
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
i915_is_ggtt(vma->vm);
}
 
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_ring_buffer *ring,
struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
struct drm_i915_gem_object *obj = vma->obj;
int ret;
 
need_fence =
450,9 → 521,9
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
 
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
480,8 → 551,8
obj->has_aliasing_ppgtt_mapping = 1;
}
 
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
entry->offset = i915_gem_obj_offset(obj, vm);
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
*need_reloc = true;
}
 
497,62 → 568,48
return 0;
}
 
static void
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
 
if (!i915_gem_obj_bound_any(obj))
return;
 
entry = obj->exec_entry;
 
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
 
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
 
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
 
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
struct i915_address_space *vm,
struct list_head *vmas,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
struct i915_vma *vma;
struct i915_address_space *vm;
struct list_head ordered_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
 
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
if (list_empty(vmas))
return 0;
 
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
INIT_LIST_HEAD(&ordered_vmas);
while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
 
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
vma = list_first_entry(vmas, struct i915_vma, exec_list);
obj = vma->obj;
entry = vma->exec_entry;
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
 
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
list_move(&vma->exec_list, &ordered_vmas);
else
list_move_tail(&obj->exec_list, &ordered_objects);
list_move_tail(&vma->exec_list, &ordered_vmas);
 
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
list_splice(&ordered_vmas, vmas);
 
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
571,52 → 628,53
int ret = 0;
 
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
u32 obj_offset;
 
if (!i915_gem_obj_bound(obj, vm))
obj = vma->obj;
 
if (!drm_mm_node_allocated(&vma->node))
continue;
 
obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
 
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vm));
!i915_is_ggtt(vma->vm));
 
if ((entry->alignment &&
obj_offset & (entry->alignment - 1)) ||
vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
 
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (i915_gem_obj_bound(obj, vm))
list_for_each_entry(vma, vmas, exec_list) {
if (drm_mm_node_allocated(&vma->node))
continue;
 
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
 
err: /* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list)
i915_gem_execbuffer_unreserve_object(obj);
 
err:
if (ret != -ENOSPC || retry++)
return ret;
 
// ret = i915_gem_evict_everything(ring->dev);
/* Decrement pin count for bound objects */
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
 
// ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
} while (1);
627,24 → 685,28
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_vma *vma;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
int count = args->buffer_count;
unsigned count = args->buffer_count;
 
if (WARN_ON(list_empty(&eb->vmas)))
return 0;
 
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
/* We may process another execbuffer during the unlock... */
while (!list_empty(&eb->objects)) {
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
 
mutex_unlock(&dev->struct_mutex);
708,20 → 770,19
 
/* reacquire the objects */
eb_reset(eb);
ret = eb_lookup_objects(eb, exec, args, file);
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
 
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
 
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset],
vm);
list_for_each_entry(vma, &eb->vmas, exec_list) {
int offset = vma->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
reloc + reloc_offset[offset]);
if (ret)
goto err;
}
740,14 → 801,15
 
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
struct list_head *vmas)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
 
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
784,8 → 846,8
int count)
{
int i;
int relocs_total = 0;
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
unsigned relocs_total = 0;
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
809,20 → 871,37
* to read, but since we may need to update the presumed
* offsets during execution, check for full write access.
*/
}
 
return 0;
}
 
static int
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
const u32 ctx_id)
{
struct i915_ctx_hang_stats *hs;
 
hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
if (IS_ERR(hs))
return PTR_ERR(hs);
 
if (hs->banned) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
return -EIO;
}
 
return 0;
}
 
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct i915_address_space *vm,
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
 
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
 
832,9 → 911,7
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
/* FIXME: This lookup gets fixed later <-- danvet */
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
892,11 → 969,11
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
int ret, mode, i;
1005,7 → 1082,8
return -EINVAL;
}
 
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
1020,6 → 1098,8
}
}
 
intel_runtime_pm_get(dev_priv);
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
1030,6 → 1110,12
goto pre_mutex_err;
}
 
ret = i915_gem_validate_context(dev, file, ctx_id);
if (ret) {
mutex_unlock(&dev->struct_mutex);
goto pre_mutex_err;
}
 
eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
1038,28 → 1124,26
}
 
/* Look up object handles */
ret = eb_lookup_objects(eb, exec, args, file);
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
 
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
 
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
ret = i915_gem_execbuffer_relocate(eb, vm);
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec, vm);
eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
1076,12 → 1160,11
 
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but let's be paranoid and do it
* unconditionally for now. */
* hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
 
1136,7 → 1219,7
 
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
err:
1146,6 → 1229,10
 
pre_mutex_err:
kfree(cliprects);
 
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
return ret;
}
 
/drivers/video/drm/i915/i915_gem_gtt.c
37,6 → 37,8
 
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
62,8 → 64,45
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
 
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
#define GEN8_LEGACY_PDPS 4
 
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
 
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
{
gen8_gtt_pte_t pte = valid ? 1 | 2 : 0;
pte |= addr;
if (level != I915_CACHE_NONE)
pte |= PPAT_CACHED_INDEX;
else
pte |= PPAT_UNCACHED_INDEX;
return pte;
}
 
static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
else
pde |= PPAT_UNCACHED_INDEX;
return pde;
}
 
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
155,10 → 194,10
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
pte |= HSW_WT_ELLC_LLC_AGE0;
pte |= HSW_WT_ELLC_LLC_AGE3;
break;
default:
pte |= HSW_WB_ELLC_LLC_AGE0;
pte |= HSW_WB_ELLC_LLC_AGE3;
break;
}
 
165,6 → 204,269
return pte;
}
 
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
uint64_t val)
{
int ret;
 
BUG_ON(entry >= 4);
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, (u32)(val >> 32));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, (u32)(val));
intel_ring_advance(ring);
 
return 0;
}
 
static int gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i, j, ret;
 
/* bit of a hack to find the actual last used pd */
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
 
for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
 
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
for_each_ring(ring, dev_priv, j) {
ret = gen8_write_pdp(ring, i, addr);
if (ret)
goto err_out;
}
}
return 0;
 
err_out:
for_each_ring(ring, dev_priv, j)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
return ret;
}
 
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
unsigned last_pte, i;
 
pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096);
if(pt_vaddr == NULL)
return;
 
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
I915_CACHE_LLC, use_scratch);
 
while (num_entries) {
struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
 
last_pte = first_pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
 
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
 
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pt++;
}
FreeKernelSpace(pt_vaddr);
}
 
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
struct sg_page_iter sg_iter;
 
pt_vaddr = AllocKernelSpace(4096);
if(pt_vaddr == NULL)
return;
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
 
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 
pt_vaddr[act_pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++act_pte == GEN8_PTES_PER_PAGE) {
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
act_pte = 0;
}
}
FreeKernelSpace(pt_vaddr);
}
 
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
 
drm_mm_takedown(&vm->mm);
 
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
if (ppgtt->pd_dma_addr[i]) {
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pd_dma_addr[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
if (addr)
pci_unmap_page(ppgtt->base.dev->pdev,
addr,
PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
 
}
}
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
 
// __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
// __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
 
/**
* GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
* net effect resembling a 2-level page table in normal x86 terms. Each PDP
* represents 1GB of memory
* 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
*
* TODO: Do something with the size parameter
**/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
struct page *pt_pages;
int i, j, ret = -ENOMEM;
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
 
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
 
/* FIXME: split allocation into smaller pieces. For now we only ever do
* this once, but with full PPGTT, the multiple contiguous allocations
* will be bad.
*/
ppgtt->pd_pages = AllocPages(max_pdp);
if (!ppgtt->pd_pages)
return -ENOMEM;
 
pt_pages = AllocPages(num_pt_pages);
if (!pt_pages) {
// __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
return -ENOMEM;
}
 
ppgtt->gen8_pt_pages = pt_pages;
ppgtt->num_pd_pages = max_pdp;
ppgtt->num_pt_pages = num_pt_pages;
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
ppgtt->enable = gen8_ppgtt_enable;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 
/*
* - Create a mapping for the page directories.
* - For each page directory:
* allocate space for page table mappings.
* map each page table
*/
for (i = 0; i < max_pdp; i++) {
dma_addr_t temp;
temp = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[i], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
ppgtt->pd_dma_addr[i] = temp;
 
ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
goto err_out;
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
temp = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
 
ppgtt->gen8_pt_dma_addr[i][j] = temp;
}
}
 
/* For now, the PPGTT helper functions all require that the PDEs are
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again */
 
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = AllocKernelSpace(4096);
 
for (i = 0; i < max_pdp; i++) {
MapPage(pd_vaddr,(addr_t)(ppgtt->pd_pages[i]), 3);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
}
FreeKernelSpace(pd_vaddr);
 
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
true);
 
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
ppgtt->num_pt_pages,
(ppgtt->num_pt_pages - num_pt_pages) +
size % (1<<30));
return 0;
 
err_out:
ppgtt->base.cleanup(&ppgtt->base);
return ret;
}
 
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
304,10 → 606,10
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
dma_addr_t page_addr;
 
page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
360,7 → 662,9
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
ppgtt->base.start = 0;
ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
return -ENOMEM;
371,7 → 675,7
goto err_pt_alloc;
}
 
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
423,6 → 727,8
 
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev))
ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
 
586,7 → 892,56
return 0;
}
 
static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
#else
iowrite32((u32)pte, addr);
iowrite32(pte >> 32, addr + 4);
#endif
}
 
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen8_gtt_pte_t __iomem *gtt_entries =
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr;
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT);
gen8_set_pte(&gtt_entries[i],
gen8_pte_encode(addr, level, true));
i++;
}
 
/*
* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true));
 
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
 
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
628,6 → 983,30
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
 
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = gen8_pte_encode(vm->scratch.addr,
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
}
 
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
651,7 → 1030,6
readl(gtt_base);
}
 
 
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
733,6 → 1111,7
*end -= 4096;
}
}
 
void i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
772,7 → 1151,6
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
list_add(&vma->vma_link, &obj->vma_list);
}
 
dev_priv->gtt.base.start = start;
830,6 → 1208,7
 
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->gtt.base.mm);
if (INTEL_INFO(dev)->gen < 8)
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
880,6 → 1259,20
return snb_gmch_ctl << 20;
}
 
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
if (bdw_gmch_ctl > 4) {
WARN_ON(!i915_preliminary_hw_support);
return 4<<20;
}
 
return bdw_gmch_ctl << 20;
}
 
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
887,6 → 1280,108
return snb_gmch_ctl << 25; /* 32 MB units */
}
 
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
return bdw_gmch_ctl << 25; /* 32 MB units */
}
 
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_phys_addr;
int ret;
 
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
 
dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
 
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(dev_priv->gtt.gsm);
}
 
return ret;
}
 
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
{
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
/* FIXME(BDW): Bspec is completely confused about cache control bits. */
#define GEN8_PPAT_LLC (1<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
uint64_t pat;
 
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT, pat);
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}
 
static int gen8_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
 
/* TODO: We're not aware of mappable constraints on gen8 yet */
*mappable_base = pci_resource_start(dev->pdev, 2);
*mappable_end = pci_resource_len(dev->pdev, 2);
 
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
 
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
 
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
 
gen8_setup_private_ppat(dev_priv);
 
ret = ggtt_probe_common(dev, gtt_size);
 
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
 
return ret;
}
 
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
894,7 → 1389,6
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
914,25 → 1408,14
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
 
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
 
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
ret = ggtt_probe_common(dev, gtt_size);
 
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
 
ret = setup_scratch_page(dev);
if (ret)
DRM_ERROR("Scratch setup failed\n");
 
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
 
968,6 → 1451,9
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
return 0;
}
 
985,7 → 1471,7
if (INTEL_INFO(dev)->gen <= 5) {
gtt->gtt_probe = i915_gmch_probe;
gtt->base.cleanup = i915_gmch_remove;
} else {
} else if (INTEL_INFO(dev)->gen < 8) {
gtt->gtt_probe = gen6_gmch_probe;
gtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
998,6 → 1484,9
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = snb_pte_encode;
} else {
dev_priv->gtt.gtt_probe = gen8_gmch_probe;
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
}
 
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
/drivers/video/drm/i915/i915_gem_stolen.c
250,7 → 250,7
}
 
sg = st->sgl;
sg->offset = offset;
sg->offset = 0;
sg->length = size;
 
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
395,7 → 395,7
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
 
vma = i915_gem_vma_create(obj, ggtt);
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
420,6 → 420,7
 
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
i915_gem_object_pin_pages(obj);
 
return obj;
 
/drivers/video/drm/i915/i915_gem_tiling.c
308,7 → 308,7
return -EINVAL;
}
 
if (obj->pin_count) {
if (obj->pin_count || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
393,7 → 393,7
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
505,7 → 505,7
int i;
 
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
523,5 → 523,4
i++;
}
}
 
#endif
/drivers/video/drm/i915/i915_gpu_error.c
215,6 → 215,24
}
}
 
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
{
switch (a) {
case HANGCHECK_IDLE:
return "idle";
case HANGCHECK_WAIT:
return "wait";
case HANGCHECK_ACTIVE:
return "active";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
return "hung";
}
 
return "unknown";
}
 
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
221,6 → 239,9
unsigned ring)
{
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
if (!error->ring[ring].valid)
return;
 
err_printf(m, "%s command stream:\n", ring_str(ring));
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
229,11 → 250,11
err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
 
if (INTEL_INFO(dev)->gen >= 4)
if (INTEL_INFO(dev)->gen >= 4) {
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
}
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
255,6 → 276,9
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(error->hangcheck_action[ring]),
error->hangcheck_score[ring]);
}
 
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
272,7 → 296,6
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
 
if (!error) {
283,7 → 306,7
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
290,6 → 313,7
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
 
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
306,7 → 330,7
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
for_each_ring(ring, dev_priv, i)
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
 
if (error->active_bo)
363,8 → 387,7
}
}
 
obj = error->ring[i].ctx;
if (obj) {
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
601,6 → 624,7
 
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 8:
case 7:
case 6:
for (i = 0; i < dev_priv->num_fence_regs; i++)
644,7 → 668,8
return NULL;
 
obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
if (obj != NULL &&
acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
701,8 → 726,10
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
if (INTEL_INFO(dev)->gen >= 8)
error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
720,6 → 747,9
 
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
 
error->hangcheck_score[ring->id] = ring->hangcheck.score;
error->hangcheck_action[ring->id] = ring->hangcheck.action;
}
 
 
747,11 → 777,17
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
 
for_each_ring(ring, dev_priv, i) {
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
 
if (ring->dev == NULL)
continue;
 
error->ring[i].valid = true;
 
i915_record_ring_state(dev, error, ring);
 
error->ring[i].batchbuffer =
769,7 → 805,7
 
error->ring[i].num_requests = count;
error->ring[i].requests =
kmalloc(count*sizeof(struct drm_i915_error_request),
kcalloc(count, sizeof(*error->ring[i].requests),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
811,7 → 847,7
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
if (i) {
active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
if (active_bo)
pinned_bo = active_bo + error->active_bo_count[ndx];
}
885,8 → 921,12
return;
}
 
DRM_INFO("capturing error event; look for more information in "
"/sys/class/drm/card%d/error\n", dev->primary->index);
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
dev->primary->index);
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 
kref_init(&error->ref);
error->eir = I915_READ(EIR);
988,6 → 1028,7
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT";
default: return "";
}
}
1013,6 → 1054,7
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
/drivers/video/drm/i915/i915_irq.c
62,7 → 62,7
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
 
static const u32 hpd_status_gen4[] = {
static const u32 hpd_status_g4x[] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
282,6 → 282,21
}
}
 
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (enable)
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
else
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
}
 
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
394,6 → 409,8
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN8(dev))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
454,7 → 471,7
 
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
471,7 → 488,7
}
 
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
499,9 → 516,10
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_ENABLE);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
531,6 → 549,12
}
}
 
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
{
/* Gen2 doesn't have a hardware frame counter */
return 0;
}
 
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
539,7 → 563,7
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low;
u32 high1, high2, low, pixel, vbl_start;
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
547,6 → 571,24
return 0;
}
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
&intel_crtc->config.adjusted_mode;
 
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
u32 htotal;
 
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
 
vbl_start *= htotal;
}
 
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
 
557,13 → 599,20
*/
do {
high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
low = I915_READ(low_frame);
high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
 
high1 >>= PIPE_FRAME_HIGH_SHIFT;
pixel = low & PIPE_PIXEL_MASK;
low >>= PIPE_FRAME_LOW_SHIFT;
return (high1 << 8) | low;
 
/*
* The frame counter increments at beginning of active.
* Cook up a vblank counter by also checking the pixel
* counter against vblank start.
*/
return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
 
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
580,67 → 629,164
return I915_READ(reg);
}
 
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
 
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
 
if (INTEL_INFO(dev)->gen < 7) {
status = pipe == PIPE_A ?
DE_PIPEA_VBLANK :
DE_PIPEB_VBLANK;
} else {
switch (pipe) {
default:
case PIPE_A:
status = DE_PIPEA_VBLANK_IVB;
break;
case PIPE_B:
status = DE_PIPEB_VBLANK_IVB;
break;
case PIPE_C:
status = DE_PIPEC_VBLANK_IVB;
break;
}
}
 
return __raw_i915_read32(dev_priv, DEISR) & status;
}
 
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int *vpos, int *hpos)
unsigned int flags, int *vpos, int *hpos,
void *stime, void *etime)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 vbl = 0, position = 0;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
int position;
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe)) {
if (!intel_crtc->active) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
 
/* Get vtotal. */
vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
htotal = mode->crtc_htotal;
vtotal = mode->crtc_vtotal;
vbl_start = mode->crtc_vblank_start;
vbl_end = mode->crtc_vblank_end;
 
if (INTEL_INFO(dev)->gen >= 4) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
vbl_start = DIV_ROUND_UP(vbl_start, 2);
vbl_end /= 2;
vtotal /= 2;
}
 
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 
/*
* Lock uncore.lock, as we will do multiple timing critical raw
* register reads, potentially with preemption disabled, so the
* following code must not block on uncore.lock.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 
 
if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
position = I915_READ(PIPEDSL(pipe));
if (IS_GEN2(dev))
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
/* Decode into vertical scanout position. Don't have
* horizontal scanout position.
if (HAS_PCH_SPLIT(dev)) {
/*
* The scanline counter increments at the leading edge
* of hsync, ie. it completely misses the active portion
* of the line. Fix up the counter at both edges of vblank
* to get a more accurate picture whether we're in vblank
* or not.
*/
*vpos = position & 0x1fff;
*hpos = 0;
in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
if ((in_vbl && position == vbl_start - 1) ||
(!in_vbl && position == vbl_end - 1))
position = (position + 1) % vtotal;
} else {
/*
* ISR vblank status bits don't work the way we'd want
* them to work on non-PCH platforms (for
* ilk_pipe_in_vblank_locked()), and there doesn't
* appear any other way to determine if we're currently
* in vblank.
*
* Instead let's assume that we're already in vblank if
* we got called from the vblank interrupt and the
* scanline counter value indicates that we're on the
* line just prior to vblank start. This should result
* in the correct answer, unless the vblank interrupt
* delivery really got delayed for almost exactly one
* full frame/field.
*/
if (flags & DRM_CALLED_FROM_VBLIRQ &&
position == vbl_start - 1) {
position = (position + 1) % vtotal;
 
/* Signal this correction as "applied". */
ret |= 0x8;
}
}
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
* scanout position.
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
/* convert to pixel counts */
vbl_start *= htotal;
vbl_end *= htotal;
vtotal *= htotal;
}
 
/* Query vblank area. */
vbl = I915_READ(VBLANK(cpu_transcoder));
 
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
 
if ((*vpos < vbl_start) || (*vpos > vbl_end))
in_vbl = false;
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
/* Inside "upper part" of vblank area? Apply corrective offset: */
if (in_vbl && (*vpos >= vbl_start))
*vpos = *vpos - vtotal;
in_vbl = position >= vbl_start && position < vbl_end;
 
/* Readouts valid? */
if (vbl > 0)
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
/*
* While in vblank, position will be negative
* counting up towards 0 at vbl_end. And outside
* vblank, position will be positive counting
* up since vbl_end.
*/
if (position >= vbl_start)
position -= vbl_end;
else
position += vtotal - vbl_end;
 
if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
*vpos = position;
*hpos = 0;
} else {
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
 
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
675,10 → 821,12
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc);
crtc,
&to_intel_crtc(crtc)->config.adjusted_mode);
}
 
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
static bool intel_hpd_irq_event(struct drm_device *dev,
struct drm_connector *connector)
{
enum drm_connector_status old_status;
 
686,11 → 834,16
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
if (old_status == connector->status)
return false;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
return (old_status != connector->status);
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
 
return true;
}
 
/*
814,7 → 967,7
if (ring->obj == NULL)
return;
 
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
trace_i915_gem_request_complete(ring);
 
wake_up_all(&ring->irq_queue);
}
824,7 → 977,7
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
u32 pm_iir;
u8 new_delay;
int new_delay, adj;
 
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
841,41 → 994,48
 
mutex_lock(&dev_priv->rps.hw_lock);
 
adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
new_delay = dev_priv->rps.cur_delay + 1;
if (adj > 0)
adj *= 2;
else
adj = 1;
new_delay = dev_priv->rps.cur_delay + adj;
 
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (IS_VALLEYVIEW(dev_priv->dev) &&
dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
if (new_delay < dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
} else
new_delay = dev_priv->rps.cur_delay - 1;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
else
new_delay = dev_priv->rps.min_delay;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
else
adj = -1;
new_delay = dev_priv->rps.cur_delay + adj;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_delay;
}
 
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
if (new_delay >= dev_priv->rps.min_delay &&
new_delay <= dev_priv->rps.max_delay) {
new_delay = clamp_t(int, new_delay,
dev_priv->rps.min_delay, dev_priv->rps.max_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
else
gen6_set_rps(dev_priv->dev, new_delay);
}
 
if (IS_VALLEYVIEW(dev_priv->dev)) {
/*
* On VLV, when we enter RC6 we may not be at the minimum
* voltage level, so arm a timer to check. It should only
* fire when there's activity or once after we've entered
* RC6, and then won't be re-armed until the next RPS interrupt.
*/
// mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
// msecs_to_jiffies(100));
}
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
894,9 → 1054,10
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
char *parity_event[6];
uint32_t misccpctl;
unsigned long flags;
uint8_t slice = 0;
 
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
904,43 → 1065,67
*/
mutex_lock(&dev_priv->dev->struct_mutex);
 
/* If we've screwed up tracking, just let the interrupt fire again */
if (WARN_ON(!dev_priv->l3_parity.which_slice))
goto out;
 
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
 
error_status = I915_READ(GEN7_L3CDERRST1);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
u32 reg;
 
slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
break;
 
dev_priv->l3_parity.which_slice &= ~(1<<slice);
 
reg = GEN7_L3CDERRST1 + (slice * 0x200);
 
error_status = I915_READ(reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 
I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
GEN7_L3CDERRST1_ENABLE);
POSTING_READ(GEN7_L3CDERRST1);
I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
POSTING_READ(reg);
 
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
slice, row, bank, subbank);
 
}
 
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
mutex_unlock(&dev_priv->dev->struct_mutex);
 
DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
row, bank, subbank);
 
}
 
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
if (!HAS_L3_GPU_CACHE(dev))
if (!HAS_L3_DPF(dev))
return;
 
spin_lock(&dev_priv->irq_lock);
ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
 
iir &= GT_PARITY_ERROR(dev);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
dev_priv->l3_parity.which_slice |= 1 << 1;
 
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
dev_priv->l3_parity.which_slice |= 1 << 0;
 
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
 
975,10 → 1160,60
i915_handle_error(dev, false);
}
 
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
ivybridge_parity_error_irq_handler(dev);
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
 
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
{
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
 
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
I915_WRITE(GEN8_GT_IIR(0), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
 
if (master_ctl & GEN8_GT_VCS1_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
I915_WRITE(GEN8_GT_IIR(1), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
 
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
I915_WRITE(GEN8_GT_IIR(3), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
 
return ret;
}
 
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
 
996,9 → 1231,10
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
 
WARN(((hpd[i] & hotplug_trigger) &&
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
"Received HPD interrupt although disabled\n");
WARN_ONCE(hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
hotplug_trigger, i, hpd[i]);
 
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1050,6 → 1286,102
wake_up_all(&dev_priv->gmbus_wait_queue);
}
 
#if defined(CONFIG_DEBUG_FS)
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
int head, tail;
 
spin_lock(&pipe_crc->lock);
 
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
DRM_ERROR("spurious interrupt\n");
return;
}
 
head = pipe_crc->head;
tail = pipe_crc->tail;
 
if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
spin_unlock(&pipe_crc->lock);
DRM_ERROR("CRC buffer overflowing\n");
return;
}
 
entry = &pipe_crc->entries[head];
 
entry->frame = dev->driver->get_vblank_counter(dev, pipe);
entry->crc[0] = crc0;
entry->crc[1] = crc1;
entry->crc[2] = crc2;
entry->crc[3] = crc3;
entry->crc[4] = crc4;
 
head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
pipe_crc->head = head;
 
spin_unlock(&pipe_crc->lock);
 
wake_up_interruptible(&pipe_crc->wq);
}
#else
static inline void
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4) {}
#endif
 
 
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
display_pipe_crc_irq_handler(dev, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
 
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
display_pipe_crc_irq_handler(dev, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
}
 
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t res1, res2;
 
if (INTEL_INFO(dev)->gen >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
 
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
 
display_pipe_crc_irq_handler(dev, pipe,
I915_READ(PIPE_CRC_RES_RED(pipe)),
I915_READ(PIPE_CRC_RES_GREEN(pipe)),
I915_READ(PIPE_CRC_RES_BLUE(pipe)),
res1, res2);
}
 
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
1116,17 → 1448,18
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
#if 0
for_each_pipe(pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
drm_handle_vblank(dev, pipe);
// if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
// drm_handle_vblank(dev, pipe);
 
if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip(dev, pipe);
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip(dev, pipe);
}
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
}
#endif
 
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1138,6 → 1471,9
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
dp_aux_irq_handler(dev);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
1214,22 → 1550,27
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
enum pipe pipe;
 
if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n");
 
if (err_int & ERR_INT_FIFO_UNDERRUN_A)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
for_each_pipe(pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
pipe_name(pipe));
}
 
if (err_int & ERR_INT_FIFO_UNDERRUN_B)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
if (IS_IVYBRIDGE(dev))
ivb_pipe_crc_irq_handler(dev, pipe);
else
hsw_pipe_crc_irq_handler(dev, pipe);
}
}
 
if (err_int & ERR_INT_FIFO_UNDERRUN_C)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
 
I915_WRITE(GEN7_ERR_INT, err_int);
}
 
1299,6 → 1640,7
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
 
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
1306,35 → 1648,27
if (de_iir & DE_GSE)
intel_opregion_asle_intr(dev);
 
#if 0
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
 
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
#endif
 
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
 
if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
for_each_pipe(pipe) {
// if (de_iir & DE_PIPE_VBLANK(pipe))
// drm_handle_vblank(dev, pipe);
 
if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
#if 0
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
}
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
pipe_name(pipe));
 
if (de_iir & DE_PLANEB_FLIP_DONE) {
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip_plane(dev, 1);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
#endif
}
 
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
1356,7 → 1690,7
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum pipe i;
 
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
1366,16 → 1700,17
 
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
#if 0
for (i = 0; i < 3; i++) {
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
 
for_each_pipe(i) {
// if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
// drm_handle_vblank(dev, i);
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
// intel_prepare_page_flip(dev, i);
// intel_finish_page_flip_plane(dev, i);
}
}
#endif
 
/* check event from PCH */
if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1394,7 → 1729,6
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
bool err_int_reenable = false;
 
atomic_inc(&dev_priv->irq_received);
 
1418,17 → 1752,6
POSTING_READ(SDEIER);
}
 
/* On Haswell, also mask ERR_INT because we don't want to risk
* generating "unclaimed register" interrupts from inside the interrupt
* handler. */
if (IS_HASWELL(dev)) {
spin_lock(&dev_priv->irq_lock);
err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
if (err_int_reenable)
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
 
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
if (INTEL_INFO(dev)->gen >= 6)
1458,13 → 1781,6
}
}
 
if (err_int_reenable) {
spin_lock(&dev_priv->irq_lock);
if (ivb_can_enable_err_int(dev))
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
 
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
if (!HAS_PCH_NOP(dev)) {
1475,6 → 1791,117
return ret;
}
 
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
enum pipe pipe;
 
atomic_inc(&dev_priv->irq_received);
 
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
return IRQ_NONE;
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
if (tmp & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev);
else if (tmp)
DRM_ERROR("Unexpected DE Misc interrupt\n");
else
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
 
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
}
}
 
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
if (tmp & GEN8_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
else if (tmp)
DRM_ERROR("Unexpected DE Port interrupt\n");
else
DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
 
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
}
}
 
for_each_pipe(pipe) {
uint32_t pipe_iir;
 
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
 
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
// if (pipe_iir & GEN8_PIPE_VBLANK)
// drm_handle_vblank(dev, pipe);
 
if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
 
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
pipe_name(pipe));
}
 
if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
}
 
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
 
if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
 
cpt_irq_handler(dev, pch_iir);
 
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
}
}
 
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
 
return ret;
}
 
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
1557,7 → 1984,7
atomic_inc(&dev_priv->gpu_error.reset_counter);
 
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
atomic_set_mask(I915_WEDGED, &error->reset_counter);
}
 
/*
1787,7 → 2214,7
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK_ILK(pipe);
DE_PIPE_VBLANK(pipe);
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
1810,7 → 2237,7
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
imr = I915_READ(VLV_IMR);
if (pipe == 0)
if (pipe == PIPE_A)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1822,6 → 2249,22
return 0;
}
 
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
 
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
1845,7 → 2288,7
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK_ILK(pipe);
DE_PIPE_VBLANK(pipe);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, bit);
1862,7 → 2305,7
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
if (pipe == 0)
if (pipe == PIPE_A)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1870,6 → 2313,21
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
{
2048,6 → 2506,7
acthd);
 
switch (ring->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
2063,6 → 2522,8
}
}
} else {
ring->hangcheck.action = HANGCHECK_ACTIVE;
 
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
2175,6 → 2636,55
POSTING_READ(VLV_IER);
}
 
static void gen8_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
/* IIR can theoretically queue up two events. Be paranoid */
#define GEN8_IRQ_INIT_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IMR(which)); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
} while (0)
 
#define GEN8_IRQ_INIT(type) do { \
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
POSTING_READ(GEN8_##type##_IMR); \
I915_WRITE(GEN8_##type##_IER, 0); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
} while (0)
 
GEN8_IRQ_INIT_NDX(GT, 0);
GEN8_IRQ_INIT_NDX(GT, 1);
GEN8_IRQ_INIT_NDX(GT, 2);
GEN8_IRQ_INIT_NDX(GT, 3);
 
for_each_pipe(pipe) {
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
}
 
GEN8_IRQ_INIT(DE_PORT);
GEN8_IRQ_INIT(DE_MISC);
GEN8_IRQ_INIT(PCU);
#undef GEN8_IRQ_INIT
#undef GEN8_IRQ_INIT_NDX
 
POSTING_READ(GEN8_PCU_IIR);
 
ibx_irq_preinstall(dev);
}
 
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2239,10 → 2749,10
pm_irqs = gt_irqs = 0;
 
dev_priv->gt_irq_mask = ~0;
if (HAS_L3_GPU_CACHE(dev)) {
if (HAS_L3_DPF(dev)) {
/* L3 parity interrupt is always unmasked. */
dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
gt_irqs |= GT_PARITY_ERROR(dev);
}
 
gt_irqs |= GT_RENDER_USER_INTERRUPT;
2291,8 → 2801,10
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
DE_AUX_CHANNEL_A |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
}
 
2326,7 → 2838,8
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
PIPE_CRC_DONE_ENABLE;
unsigned long irqflags;
 
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2356,9 → 2869,9
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
I915_WRITE(VLV_IIR, 0xffffffff);
2377,6 → 2890,117
return 0;
}
 
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
int i;
 
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
0,
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
 
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
u32 tmp = I915_READ(GEN8_GT_IIR(i));
if (tmp)
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
i, tmp);
I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
}
POSTING_READ(GEN8_GT_IER(0));
}
 
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
for_each_pipe(pipe) {
u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (tmp)
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
pipe, tmp);
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
}
POSTING_READ(GEN8_DE_PIPE_ISR(0));
 
I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
POSTING_READ(GEN8_DE_PORT_IER);
}
 
static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
 
ibx_irq_postinstall(dev);
 
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
 
return 0;
}
 
static void gen8_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
if (!dev_priv)
return;
 
atomic_set(&dev_priv->irq_received, 0);
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
 
#define GEN8_IRQ_FINI_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
} while (0)
 
#define GEN8_IRQ_FINI(type) do { \
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
I915_WRITE(GEN8_##type##_IER, 0); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
} while (0)
 
GEN8_IRQ_FINI_NDX(GT, 0);
GEN8_IRQ_FINI_NDX(GT, 1);
GEN8_IRQ_FINI_NDX(GT, 2);
GEN8_IRQ_FINI_NDX(GT, 3);
 
for_each_pipe(pipe) {
GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
}
 
GEN8_IRQ_FINI(DE_PORT);
GEN8_IRQ_FINI(DE_MISC);
GEN8_IRQ_FINI(PCU);
#undef GEN8_IRQ_FINI
#undef GEN8_IRQ_FINI_NDX
 
POSTING_READ(GEN8_PCU_IIR);
}
 
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2451,6 → 3075,7
static int i8xx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2471,6 → 3096,13
I915_USER_INTERRUPT);
POSTING_READ16(IER);
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
2478,10 → 3110,10
* Returns true when a page flip has completed.
*/
static bool i8xx_handle_vblank(struct drm_device *dev,
int pipe, u16 iir)
int plane, int pipe, u32 iir)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
// if (!drm_handle_vblank(dev, pipe))
return false;
2557,14 → 3189,19
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
 
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
i8xx_handle_vblank(dev, 0, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
for_each_pipe(pipe) {
int plane = pipe;
if (HAS_FBC(dev))
plane = !plane;
 
if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
i8xx_handle_vblank(dev, 1, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
i8xx_handle_vblank(dev, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
}
 
iir = new_iir;
}
 
2612,6 → 3249,7
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
unsigned long irqflags;
 
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
2647,6 → 3285,13
 
i915_enable_asle_pipestat(dev);
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
2749,7 → 3394,7
 
for_each_pipe(pipe) {
int plane = pipe;
if (IS_MOBILE(dev))
if (HAS_FBC(dev))
plane = !plane;
 
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2758,6 → 3403,9
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
}
 
if (blc_event || (iir & I915_ASLE_INTERRUPT))
2856,7 → 3504,9
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
/*
2981,8 → 3631,12
hotplug_status);
 
intel_hpd_irq_handler(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
 
if (IS_G4X(dev) &&
(hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
dp_aux_irq_handler(dev);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
3002,6 → 3656,9
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
}
 
 
3106,18 → 3763,22
setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
(unsigned long) dev_priv);
 
 
if (IS_GEN2(dev)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
} else {
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
else
dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
 
if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
3127,6 → 3788,14
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_GEN8(dev)) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_preinstall;
dev->driver->irq_postinstall = gen8_irq_postinstall;
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
3196,8 → 3865,8
dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
 
ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
ironlake_disable_display_irq(dev_priv, 0xffffffff);
ibx_disable_display_interrupt(dev_priv, 0xffffffff);
ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff);
 
3211,34 → 3880,26
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t val, expected;
uint32_t val;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
val = I915_READ(DEIMR);
expected = ~DE_PCH_EVENT_IVB;
WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
 
val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
expected = ~SDE_HOTPLUG_MASK_CPT;
WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
val, expected);
val = I915_READ(SDEIMR);
WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
 
val = I915_READ(GTIMR);
expected = 0xffffffff;
WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
 
val = I915_READ(GEN6_PMIMR);
expected = 0xffffffff;
WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
expected);
WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
 
dev_priv->pc8.irqs_disabled = false;
 
ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
ibx_enable_display_interrupt(dev_priv,
~dev_priv->pc8.regsave.sdeimr &
~SDE_HOTPLUG_MASK_CPT);
ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
/drivers/video/drm/i915/i915_reg.h
26,6 → 26,7
#define _I915_REG_H_
 
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
109,6 → 110,9
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
#define PP_DIR_DCLV_2G 0xffffffff
 
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
 
#define GAM_ECOCHK 0x4090
#define ECOCHK_SNB_BIT (1<<10)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
189,10 → 193,13
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
#define MI_ARB_ENABLE (1<<0)
#define MI_ARB_DISABLE (0<<0)
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
208,10 → 215,24
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
#define MI_ARB_ENABLE (1<<0)
#define MI_ARB_DISABLE (0<<0)
 
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
#define MI_SEMAPHORE_REGISTER (1<<18)
#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
231,6 → 252,7
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
246,24 → 268,13
#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
#define MI_SEMAPHORE_REGISTER (1<<18)
#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
 
 
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
#define LOWER_SLICE_DISABLED (0<<0)
 
/*
* 3D instructions used by the kernel
*/
343,15 → 354,38
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
#define IOSF_PORT_BUNIT 0x3
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
#define IOSF_PORT_GPIO_NC 0x13
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
#define IOSF_PORT_GPS_CORE 0x48
#define IOSF_PORT_FLISDSI 0x1B
#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
 
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
 
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
 
#define PUNIT_REG_DSPFREQ 0x36
#define DSPFREQSTAT_SHIFT 30
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_CLK_GATE 1
#define PUNIT_PWR_RESET 2
#define PUNIT_PWR_GATE 3
#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
 
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
372,6 → 406,41
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
 
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
#define CCK_REG_DSI_PLL_FUSE 0x44
#define CCK_REG_DSI_PLL_CONTROL 0x48
#define DSI_PLL_VCO_EN (1 << 31)
#define DSI_PLL_LDO_GATE (1 << 30)
#define DSI_PLL_P1_POST_DIV_SHIFT 17
#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17)
#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13)
#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12)
#define DSI_PLL_MUX_MASK (3 << 9)
#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10)
#define DSI_PLL_MUX_DSI0_CCK (1 << 10)
#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9)
#define DSI_PLL_MUX_DSI1_CCK (1 << 9)
#define DSI_PLL_CLK_GATE_MASK (0xf << 5)
#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8)
#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7)
#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6)
#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5)
#define DSI_PLL_LOCK (1 << 0)
#define CCK_REG_DSI_PLL_DIVIDER 0x4c
#define DSI_PLL_LFSR (1 << 31)
#define DSI_PLL_FRACTION_EN (1 << 30)
#define DSI_PLL_FRAC_COUNTER_SHIFT 27
#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27)
#define DSI_PLL_USYNC_CNT_SHIFT 18
#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18)
#define DSI_PLL_N1_DIV_SHIFT 16
#define DSI_PLL_N1_DIV_MASK (3 << 16)
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
 
/*
* DPIO - a special bus for various display related registers to hide behind
*
387,17 → 456,15
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_RESET (1<<0)
#define DPIO_CMNRST (1<<0)
 
#define _DPIO_TX3_SWING_CTL4_A 0x690
#define _DPIO_TX3_SWING_CTL4_B 0x2a90
#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
_DPIO_TX3_SWING_CTL4_B)
#define DPIO_PHY(pipe) ((pipe) >> 1)
#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
 
/*
* Per pipe/PLL DPIO regs
*/
#define _DPIO_DIV_A 0x800c
#define _VLV_PLL_DW3_CH0 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_POST_DIV_DAC 0
#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
410,10 → 477,10
#define DPIO_ENABLE_CALIBRATION (1<<11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _DPIO_DIV_B 0x802c
#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
#define _VLV_PLL_DW3_CH1 0x802c
#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
 
#define _DPIO_REFSFR_A 0x8014
#define _VLV_PLL_DW5_CH0 0x8014
#define DPIO_REFSEL_OVERRIDE 27
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
421,119 → 488,113
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
#define _VLV_PLL_DW5_CH1 0x8034
#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
 
#define _DPIO_CORE_CLK_A 0x801c
#define _DPIO_CORE_CLK_B 0x803c
#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
#define _VLV_PLL_DW7_CH0 0x801c
#define _VLV_PLL_DW7_CH1 0x803c
#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
 
#define _DPIO_IREF_CTL_A 0x8040
#define _DPIO_IREF_CTL_B 0x8060
#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
#define _VLV_PLL_DW8_CH0 0x8040
#define _VLV_PLL_DW8_CH1 0x8060
#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
 
#define DPIO_IREF_BCAST 0xc044
#define _DPIO_IREF_A 0x8044
#define _DPIO_IREF_B 0x8064
#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
#define VLV_PLL_DW9_BCAST 0xc044
#define _VLV_PLL_DW9_CH0 0x8044
#define _VLV_PLL_DW9_CH1 0x8064
#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
 
#define _DPIO_PLL_CML_A 0x804c
#define _DPIO_PLL_CML_B 0x806c
#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
#define _VLV_PLL_DW10_CH0 0x8048
#define _VLV_PLL_DW10_CH1 0x8068
#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
 
#define _DPIO_LPF_COEFF_A 0x8048
#define _DPIO_LPF_COEFF_B 0x8068
#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
#define _VLV_PLL_DW11_CH0 0x804c
#define _VLV_PLL_DW11_CH1 0x806c
#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
 
#define DPIO_CALIBRATION 0x80ac
/* Spec for ref block start counts at DW10 */
#define VLV_REF_DW13 0x80ac
 
#define DPIO_FASTCLK_DISABLE 0x8100
#define VLV_CMN_DW0 0x8100
 
/*
* Per DDI channel DPIO regs
*/
 
#define _DPIO_PCS_TX_0 0x8200
#define _DPIO_PCS_TX_1 0x8400
#define _VLV_PCS_DW0_CH0 0x8200
#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
#define _DPIO_PCS_CLK_0 0x8204
#define _DPIO_PCS_CLK_1 0x8404
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
 
#define _DPIO_PCS_CTL_OVR1_A 0x8224
#define _DPIO_PCS_CTL_OVR1_B 0x8424
#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
_DPIO_PCS_CTL_OVR1_B)
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
 
#define _DPIO_PCS_STAGGER0_A 0x822c
#define _DPIO_PCS_STAGGER0_B 0x842c
#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
_DPIO_PCS_STAGGER0_B)
#define _VLV_PCS01_DW8_CH0 0x0220
#define _VLV_PCS23_DW8_CH0 0x0420
#define _VLV_PCS01_DW8_CH1 0x2620
#define _VLV_PCS23_DW8_CH1 0x2820
#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
 
#define _DPIO_PCS_STAGGER1_A 0x8230
#define _DPIO_PCS_STAGGER1_B 0x8430
#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
_DPIO_PCS_STAGGER1_B)
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
#define _DPIO_PCS_CLOCKBUF0_A 0x8238
#define _DPIO_PCS_CLOCKBUF0_B 0x8438
#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
_DPIO_PCS_CLOCKBUF0_B)
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
#define _DPIO_PCS_CLOCKBUF8_A 0x825c
#define _DPIO_PCS_CLOCKBUF8_B 0x845c
#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
_DPIO_PCS_CLOCKBUF8_B)
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
 
#define _DPIO_TX_SWING_CTL2_A 0x8288
#define _DPIO_TX_SWING_CTL2_B 0x8488
#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
_DPIO_TX_SWING_CTL2_B)
#define _VLV_PCS_DW14_CH0 0x8238
#define _VLV_PCS_DW14_CH1 0x8438
#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
 
#define _DPIO_TX_SWING_CTL3_A 0x828c
#define _DPIO_TX_SWING_CTL3_B 0x848c
#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
_DPIO_TX_SWING_CTL3_B)
#define _VLV_PCS_DW23_CH0 0x825c
#define _VLV_PCS_DW23_CH1 0x845c
#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
 
#define _DPIO_TX_SWING_CTL4_A 0x8290
#define _DPIO_TX_SWING_CTL4_B 0x8490
#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
_DPIO_TX_SWING_CTL4_B)
#define _VLV_TX_DW2_CH0 0x8288
#define _VLV_TX_DW2_CH1 0x8488
#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
 
#define _DPIO_TX_OCALINIT_0 0x8294
#define _DPIO_TX_OCALINIT_1 0x8494
#define DPIO_TX_OCALINIT_EN (1<<31)
#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
_DPIO_TX_OCALINIT_1)
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
 
#define _DPIO_TX_CTL_0 0x82ac
#define _DPIO_TX_CTL_1 0x84ac
#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
#define _VLV_TX_DW4_CH0 0x8290
#define _VLV_TX_DW4_CH1 0x8490
#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
 
#define _DPIO_TX_LANE_0 0x82b8
#define _DPIO_TX_LANE_1 0x84b8
#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
#define _VLV_TX3_DW4_CH0 0x690
#define _VLV_TX3_DW4_CH1 0x2a90
#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
 
#define _DPIO_DATA_CHANNEL1 0x8220
#define _DPIO_DATA_CHANNEL2 0x8420
#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
#define _VLV_TX_DW5_CH0 0x8294
#define _VLV_TX_DW5_CH1 0x8494
#define DPIO_TX_OCALINIT_EN (1<<31)
#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
 
#define _DPIO_PORT0_PCS0 0x0220
#define _DPIO_PORT0_PCS1 0x0420
#define _DPIO_PORT1_PCS2 0x2620
#define _DPIO_PORT1_PCS3 0x2820
#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
#define _VLV_TX_DW11_CH0 0x82ac
#define _VLV_TX_DW11_CH1 0x84ac
#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
 
#define _VLV_TX_DW14_CH0 0x82b8
#define _VLV_TX_DW14_CH1 0x84b8
#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
 
/*
* Fence registers
*/
602,6 → 663,9
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define GAMTARBMODE 0x04a08
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define RING_FAULT_GTTSEL_MASK (1<<11)
609,6 → 673,7
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
#define RING_FAULT_VALID (1<<0)
#define DONE_REG 0x40b0
#define GEN8_PRIVATE_PAT 0x40e0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
669,13 → 734,20
#define NOPID 0x02094
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
#define RING_BBSTATE(base) ((base)+0x110)
#define RING_BBADDR(base) ((base)+0x140)
#define RING_BBADDR_UDW(base) ((base)+0x168) /* gen8+ */
 
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERR_INT_POISON (1<<31)
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
 
683,6 → 755,7
#define FPGA_DBG_RM_NOCLAIM (1<<31)
 
#define DERRMR 0x44050
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
716,6 → 789,7
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
 
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
853,7 → 927,6
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
890,6 → 963,7
#define GT_BLT_USER_INTERRUPT (1 << 22)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
900,6 → 974,10
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
 
#define GT_PARITY_ERROR(dev) \
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
(IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
 
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
925,6 → 1003,7
 
#define GEN7_FF_THREAD_MODE 0x20a0
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
952,7 → 1031,7
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
#define FBC_CTL_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND 0x0320c
#define FBC_CMD_COMPRESS (1<<0)
#define FBC_STATUS 0x03210
959,7 → 1038,7
#define FBC_STAT_COMPRESSING (1<<31)
#define FBC_STAT_COMPRESSED (1<<30)
#define FBC_STAT_MODIFIED (1<<29)
#define FBC_STAT_CURRENT_LINE (1<<0)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 0x03214
#define FBC_CTL_FENCE_DBL (0<<4)
#define FBC_CTL_IDLE_IMM (0<<2)
1048,9 → 1127,6
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
 
#define HSW_CLKGATE_DISABLE_PART_1 0x46500
#define HSW_DPFC_GATING_DISABLE (1<<23)
 
/*
* GPIO regs
*/
1387,6 → 1463,12
 
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
 
#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
 
/*
* Palette regs
*/
1404,6 → 1486,8
* device 0 function 0's pci config register 0x44 or 0x48 and matches it in
* every way. It is not accessible from the CP register read instructions.
*
* Starting from Haswell, you can't write registers using the MCHBAR mirror,
* just read.
*/
#define MCHBAR_MIRROR_BASE 0x10000
 
1410,7 → 1494,7
#define MCHBAR_MIRROR_BASE_SNB 0x140000
 
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK 0x5e04
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
1705,9 → 1789,9
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
#define GEN6_GT_PERF_STATUS 0x145948
#define GEN6_RP_STATE_LIMITS 0x145994
#define GEN6_RP_STATE_CAP 0x145998
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
 
/*
* Logical Context regs
1752,7 → 1836,13
* on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
 
 
#define VLV_CLK_CTL2 0x101104
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
 
/*
* Overlay regs
*/
1771,6 → 1861,83
* Display engine regs
*/
 
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
#define PIPE_CRC_ENABLE (1 << 31)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
/* ilk+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
/* embedded DP port on the north display block, reserved on ivb */
#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
/* vlv source selection */
#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
/* with DP port the pipe source is invalid */
#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
/* gen3+ source selection */
#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
/* with DP/TV port the pipe source is invalid */
#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
/* gen2 doesn't have source selection bits */
#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
 
#define _PIPE_CRC_RES_1_A_IVB 0x60064
#define _PIPE_CRC_RES_2_A_IVB 0x60068
#define _PIPE_CRC_RES_3_A_IVB 0x6006c
#define _PIPE_CRC_RES_4_A_IVB 0x60070
#define _PIPE_CRC_RES_5_A_IVB 0x60074
 
#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
 
/* Pipe B CRC regs */
#define _PIPE_CRC_RES_1_B_IVB 0x61064
#define _PIPE_CRC_RES_2_B_IVB 0x61068
#define _PIPE_CRC_RES_3_B_IVB 0x6106c
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
 
#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
#define PIPE_CRC_RES_1_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
#define PIPE_CRC_RES_2_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
#define PIPE_CRC_RES_3_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
#define PIPE_CRC_RES_4_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
#define PIPE_CRC_RES_5_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
 
#define PIPE_CRC_RES_RED(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
#define PIPE_CRC_RES_GREEN(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
#define PIPE_CRC_RES_BLUE(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
#define PIPE_CRC_RES_RES1_I915(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
#define PIPE_CRC_RES_RES2_G4X(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
 
/* Pipe A timing regs */
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
1793,7 → 1960,6
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
 
 
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
1803,8 → 1969,9
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
/* HSW eDP PSR registers */
#define EDP_PSR_CTL 0x64800
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
1827,16 → 1994,16
#define EDP_PSR_TP1_TIME_0us (3<<4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
 
#define EDP_PSR_AUX_CTL 0x64810
#define EDP_PSR_AUX_DATA1 0x64814
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
#define EDP_PSR_DPCD_COMMAND 0x80060000
#define EDP_PSR_AUX_DATA2 0x64818
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
#define EDP_PSR_AUX_DATA3 0x6481c
#define EDP_PSR_AUX_DATA4 0x64820
#define EDP_PSR_AUX_DATA5 0x64824
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
 
#define EDP_PSR_STATUS_CTL 0x64840
#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
1860,10 → 2027,10
#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
 
#define EDP_PSR_PERF_CNT 0x64844
#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
 
#define EDP_PSR_DEBUG_CTL 0x64860
#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
1955,9 → 2122,13
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
/* VLV DP/HDMI bits again match Bspec */
#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
1968,6 → 2139,11
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
#define DP_AUX_CHANNEL_D_INT_STATUS_G4X (1 << 6)
#define DP_AUX_CHANNEL_C_INT_STATUS_G4X (1 << 5)
#define DP_AUX_CHANNEL_B_INT_STATUS_G4X (1 << 4)
#define DP_AUX_CHANNEL_MASK_INT_STATUS_G4X (7 << 4)
 
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
2006,6 → 2182,14
#define PCH_HDMIC 0xe1150
#define PCH_HDMID 0xe1160
 
#define PORT_DFT_I9XX 0x61150
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X 0x61154
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
 
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
2034,6 → 2218,7
 
/* Gen 4 SDVO/HDMI bits: */
#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
#define SDVO_COLOR_FORMAT_MASK (7 << 26)
#define SDVO_ENCODING_SDVO (0 << 10)
#define SDVO_ENCODING_HDMI (2 << 10)
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
2238,6 → 2423,21
 
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
_VLV_BLC_PWM_CTL2_B)
 
#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
_VLV_BLC_PWM_CTL_B)
 
#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
_VLV_BLC_HIST_CTL_B)
 
/* Backlight control */
#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
2986,6 → 3186,7
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
3068,6 → 3269,18
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_DITHER_BPC_MASK (7<<5)
#define PIPEMISC_DITHER_8_BPC (0<<5)
#define PIPEMISC_DITHER_10_BPC (1<<5)
#define PIPEMISC_DITHER_6_BPC (2<<5)
#define PIPEMISC_DITHER_12_BPC (3<<5)
#define PIPEMISC_DITHER_ENABLE (1<<4)
#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
#define PIPEMISC_DITHER_TYPE_SP (0<<2)
#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
 
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
3184,11 → 3397,11
 
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPE_PLANE_MASK (0x7f<<16)
#define WM0_PIPE_PLANE_MASK (0xffff<<16)
#define WM0_PIPE_PLANE_SHIFT 16
#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
#define WM0_PIPE_SPRITE_MASK (0xff<<8)
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0x1f)
#define WM0_PIPE_CURSOR_MASK (0xff)
 
#define WM0_PIPEB_ILK 0x45104
#define WM0_PIPEC_IVB 0x45200
3198,9 → 3411,10
#define WM1_LP_LATENCY_MASK (0x7f<<24)
#define WM1_LP_FBC_MASK (0xf<<20)
#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_FBC_SHIFT_BDW 19
#define WM1_LP_SR_MASK (0x7ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK 0x4510c
#define WM2_LP_EN (1<<31)
#define WM3_LP_ILK 0x45110
3221,43 → 3435,7
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
 
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
#define ILK_DISPLAY_MAXWM 64
#define ILK_DISPLAY_DFTWM 8
#define ILK_CURSOR_FIFO 32
#define ILK_CURSOR_MAXWM 16
#define ILK_CURSOR_DFTWM 8
 
#define ILK_DISPLAY_SR_FIFO 512
#define ILK_DISPLAY_MAX_SRWM 0x1ff
#define ILK_DISPLAY_DFT_SRWM 0x3f
#define ILK_CURSOR_SR_FIFO 64
#define ILK_CURSOR_MAX_SRWM 0x3f
#define ILK_CURSOR_DFT_SRWM 8
 
#define ILK_FIFO_LINE_SIZE 64
 
/* define the WM info on Sandybridge */
#define SNB_DISPLAY_FIFO 128
#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
#define SNB_DISPLAY_DFTWM 8
#define SNB_CURSOR_FIFO 32
#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
#define SNB_CURSOR_DFTWM 8
 
#define SNB_DISPLAY_SR_FIFO 512
#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
#define SNB_DISPLAY_DFT_SRWM 0x3f
#define SNB_CURSOR_SR_FIFO 64
#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
#define SNB_CURSOR_DFT_SRWM 8
 
#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
 
#define SNB_FIFO_LINE_SIZE 64
 
 
/* the address where we get all kinds of latency value */
#define SSKPD 0x5d10
#define SSKPD_WM_MASK 0x3f
3281,17 → 3459,17
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_GM45 0x70040
#define _PIPEA_FLIPCOUNT_GM45 0x70044
#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
/* Cursor A & B regs */
3400,8 → 3578,6
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
 
/* VBIOS flags */
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
3422,10 → 3598,10
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
 
 
/* Display B control */
3587,7 → 3763,7
 
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
#define SP_ENABLE (1<<31)
#define SP_GEAMMA_ENABLE (1<<30)
#define SP_GAMMA_ENABLE (1<<30)
#define SP_PIXFORMAT_MASK (0xf<<26)
#define SP_FORMAT_YUV422 (0<<26)
#define SP_FORMAT_BGR565 (5<<26)
3780,6 → 3956,7
#define DE_SPRITEA_FLIP_DONE (1 << 28)
#define DE_PLANEB_FLIP_DONE (1 << 27)
#define DE_PLANEA_FLIP_DONE (1 << 26)
#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
#define DE_PCU_EVENT (1 << 25)
#define DE_GTT_FAULT (1 << 24)
#define DE_POISON (1 << 23)
3793,13 → 3970,18
#define DE_PIPEB_ODD_FIELD (1 << 13)
#define DE_PIPEB_LINE_COMPARE (1 << 12)
#define DE_PIPEB_VSYNC (1 << 11)
#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
#define DE_PIPEA_CRC_DONE (1 << 2)
#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
 
/* More Ivybridge lolz */
#define DE_ERR_INT_IVB (1<<30)
3815,9 → 3997,8
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
#define DE_PIPEA_VBLANK_IVB (1<<0)
 
#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
 
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
3833,6 → 4014,71
#define GTIIR 0x44018
#define GTIER 0x4401c
 
#define GEN8_MASTER_IRQ 0x44200
#define GEN8_MASTER_IRQ_CONTROL (1<<31)
#define GEN8_PCU_IRQ (1<<30)
#define GEN8_DE_PCH_IRQ (1<<23)
#define GEN8_DE_MISC_IRQ (1<<22)
#define GEN8_DE_PORT_IRQ (1<<20)
#define GEN8_DE_PIPE_C_IRQ (1<<18)
#define GEN8_DE_PIPE_B_IRQ (1<<17)
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
#define GEN8_GT_VECS_IRQ (1<<6)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
#define GEN8_GT_BCS_IRQ (1<<1)
#define GEN8_GT_RCS_IRQ (1<<0)
 
#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
 
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_VCS2_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
#define GEN8_VECS_IRQ_SHIFT 0
 
#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
#define GEN8_PIPE_FLIP_DONE (1 << 4)
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
GEN8_PIPE_PRIMARY_FAULT)
 
#define GEN8_DE_PORT_ISR 0x44440
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define GEN8_AUX_CHANNEL_A (1 << 0)
 
#define GEN8_DE_MISC_ISR 0x44460
#define GEN8_DE_MISC_IMR 0x44464
#define GEN8_DE_MISC_IIR 0x44468
#define GEN8_DE_MISC_IER 0x4446c
#define GEN8_DE_MISC_GSE (1 << 27)
 
#define GEN8_PCU_ISR 0x444e0
#define GEN8_PCU_IMR 0x444e4
#define GEN8_PCU_IIR 0x444e8
#define GEN8_PCU_IER 0x444ec
 
#define ILK_DISPLAY_CHICKEN2 0x42004
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
3858,11 → 4104,19
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
 
#define CHICKEN_PAR1_1 0x42080
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
 
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 0x45004
#define DISP_DATA_PARTITION_5_6 (1<<6)
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
3870,6 → 4124,8
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
 
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
3881,6 → 4137,10
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
 
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_NON_COHERENT (1<<4)
 
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
4416,6 → 4676,8
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
 
4447,7 → 4709,6
#define PANEL_PORT_SELECT_MASK (3 << 30)
#define PANEL_PORT_SELECT_LVDS (0 << 30)
#define PANEL_PORT_SELECT_DPA (1 << 30)
#define EDP_PANEL (1 << 30)
#define PANEL_PORT_SELECT_DPC (2 << 30)
#define PANEL_PORT_SELECT_DPD (3 << 30)
#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
4456,11 → 4717,6
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
 
#define PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
#define PANEL_POWER_PORT_LVDS (0 << 30)
#define PANEL_POWER_PORT_DP_A (1 << 30)
#define PANEL_POWER_PORT_DP_C (2 << 30)
#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
4569,6 → 4825,8
#define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
#define VLV_GTLC_PW_STATUS 0x130094
#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
4577,12 → 4835,16
#define FORCEWAKE_MT_ENABLE (1<<5)
 
#define GTFIFODBG 0x120000
#define GT_FIFO_CPU_ERROR_MASK 7
#define GT_FIFO_SBDROPERR (1<<6)
#define GT_FIFO_BLOBDROPERR (1<<5)
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
#define GT_FIFO_DROPERR (1<<3)
#define GT_FIFO_OVFERR (1<<2)
#define GT_FIFO_IAWRERR (1<<1)
#define GT_FIFO_IARDERR (1<<0)
 
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GTFIFOCTL 0x120008
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
 
#define HSW_IDICR 0x9008
4616,6 → 4878,7
#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
#define GEN7_RC_CTL_TO_MODE (1<<28)
#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
#define GEN6_RC_CTL_HW_ENABLE (1<<31)
4638,7 → 4901,7
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
4683,6 → 4946,10
GEN6_PM_RP_DOWN_TIMEOUT)
 
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
4694,8 → 4961,11
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define DISPLAY_IPS_CONTROL 0x19
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
4713,6 → 4983,7
 
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
#define GEN7_PARITY_ERROR_VALID (1<<13)
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
4726,11 → 4997,13
#define GEN7_L3CDERRST1_ENABLE (1<<7)
 
#define GEN7_L3LOG_BASE 0xB070
#define HSW_L3LOG_BASE_SLICE1 0xB270
#define GEN7_L3LOG_SIZE 0x80
 
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
#define GEN7_ROW_CHICKEN2 0xe4f4
4740,6 → 5013,10
#define HSW_ROW_CHICKEN3 0xe49c
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
 
#define HALF_SLICE_CHICKEN3 0xe184
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
 
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
4781,6 → 5058,18
CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
 
#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
VLV_HDMIW_HDMIEDID_A, \
VLV_HDMIW_HDMIEDID_B)
#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
VLV_AUD_CNTL_ST_A, \
VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
 
/* These are the 4 32-bit write offset registers for each stream
* output buffer. It determines the offset from the
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
4797,6 → 5086,12
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
CPT_AUD_CONFIG_A, \
CPT_AUD_CONFIG_B)
#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
VLV_AUD_CONFIG_A, \
VLV_AUD_CONFIG_B)
 
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
4804,7 → 5099,17
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
 
/* HSW Audio */
4929,6 → 5234,7
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
4938,6 → 5244,16
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
/* Broadwell */
#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
5047,6 → 5363,9
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
#define LCPLL_CLK_FREQ_675_BDW (3<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
5128,4 → 5447,414
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
/* VLV MIPI registers */
 
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
#define DUAL_LINK_MODE_MASK (1 << 26)
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
#define DITHERING_ENABLE (1 << 25) /* A + B */
#define FLOPPED_HSTX (1 << 23)
#define DE_INVERT (1 << 19) /* XXX */
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
#define AFE_LATCHOUT (1 << 17)
#define LP_OUTPUT_HOLD (1 << 16)
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define CSB_SHIFT 9
#define CSB_MASK (3 << 9)
#define CSB_20MHZ (0 << 9)
#define CSB_10MHZ (1 << 9)
#define CSB_40MHZ (2 << 9)
#define BANDGAP_MASK (1 << 8)
#define BANDGAP_PNW_CIRCUIT (0 << 8)
#define BANDGAP_LNC_CIRCUIT (1 << 8)
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
#define TEARING_EFFECT_SHIFT 2 /* A + B */
#define TEARING_EFFECT_MASK (3 << 2)
#define TEARING_EFFECT_OFF (0 << 2)
#define TEARING_EFFECT_DSI (1 << 2)
#define TEARING_EFFECT_GPIO (2 << 2)
#define LANE_CONFIGURATION_SHIFT 0
#define LANE_CONFIGURATION_MASK (3 << 0)
#define LANE_CONFIGURATION_4LANE (0 << 0)
#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
 
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
 
/* XXX: all bits reserved */
#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
 
/* MIPI DSI Controller and D-PHY registers */
 
#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
#define ULPS_STATE_EXIT (1 << 1)
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
 
#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
#define RX_PROT_VIOLATION (1 << 26)
#define RX_INVALID_TX_LENGTH (1 << 25)
#define ACK_WITH_NO_ERROR (1 << 24)
#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
#define LP_RX_TIMEOUT (1 << 22)
#define HS_TX_TIMEOUT (1 << 21)
#define DPI_FIFO_UNDERRUN (1 << 20)
#define LOW_CONTENTION (1 << 19)
#define HIGH_CONTENTION (1 << 18)
#define TXDSI_VC_ID_INVALID (1 << 17)
#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
#define TXCHECKSUM_ERROR (1 << 15)
#define TXECC_MULTIBIT_ERROR (1 << 14)
#define TXECC_SINGLE_BIT_ERROR (1 << 13)
#define TXFALSE_CONTROL_ERROR (1 << 12)
#define RXDSI_VC_ID_INVALID (1 << 11)
#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
#define RXCHECKSUM_ERROR (1 << 9)
#define RXECC_MULTIBIT_ERROR (1 << 8)
#define RXECC_SINGLE_BIT_ERROR (1 << 7)
#define RXFALSE_CONTROL_ERROR (1 << 6)
#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
#define RX_LP_TX_SYNC_ERROR (1 << 4)
#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
#define RXEOT_SYNC_ERROR (1 << 2)
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
 
#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
#define VID_MODE_FORMAT_MASK (0xf << 7)
#define VID_MODE_NOT_SUPPORTED (0 << 7)
#define VID_MODE_FORMAT_RGB565 (1 << 7)
#define VID_MODE_FORMAT_RGB666 (2 << 7)
#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
#define VID_MODE_FORMAT_RGB888 (4 << 7)
#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
 
#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
 
#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
 
#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
 
#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
 
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
 
#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
 
#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
 
#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
 
#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
 
#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
 
#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
 
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
 
#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
#define COLOR_MODE_OFF (1 << 3)
#define COLOR_MODE_ON (1 << 2)
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
 
#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
 
#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
 
#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
 
#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
 
#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
 
#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
 
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
 
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
 
#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
#define VIRTUAL_CHANNEL_SHIFT 6
#define VIRTUAL_CHANNEL_MASK (3 << 6)
#define DATA_TYPE_SHIFT 0
#define DATA_TYPE_MASK (3f << 0)
/* data type values, see include/video/mipi_display.h */
 
#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
#define LP_CTRL_FIFO_FULL (1 << 24)
#define HS_CTRL_FIFO_EMPTY (1 << 18)
#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
#define HS_CTRL_FIFO_FULL (1 << 16)
#define LP_DATA_FIFO_EMPTY (1 << 10)
#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
#define LP_DATA_FIFO_FULL (1 << 8)
#define HS_DATA_FIFO_EMPTY (1 << 2)
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
 
#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
 
#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
#define TRAIL_COUNT_MASK (0x1f << 16)
#define CLK_ZERO_COUNT_SHIFT 8
#define CLK_ZERO_COUNT_MASK (0xff << 8)
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
 
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
 
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
 
#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
 
#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
 
/* XXX: only pipe A ?!? */
#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
#define DBI_TYPEC_OPTION_MASK (3 << 28)
#define DBI_TYPEC_FREQ_SHIFT 24
#define DBI_TYPEC_FREQ_MASK (0xf << 24)
#define DBI_TYPEC_OVERRIDE (1 << 8)
#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
 
 
/* MIPI adapter registers */
 
#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
#define READ_REQUEST_PRIORITY_SHIFT 3
#define READ_REQUEST_PRIORITY_MASK (3 << 3)
#define READ_REQUEST_PRIORITY_LOW (0 << 3)
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
 
#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
 
#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
 
#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
 
#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
 
#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
#define MIPI_READ_DATA_RETURN(pipe, n) \
(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
 
#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/i915_trace.h
16,7 → 16,7
#define trace_i915_gem_request_retire(a, b)
#define trace_i915_gem_request_wait_begin(a, b)
#define trace_i915_gem_request_wait_end(a, b)
#define trace_i915_gem_request_complete(a, b)
#define trace_i915_gem_request_complete(a)
#define trace_intel_gpu_freq_change(a)
#define trace_i915_reg_rw(a, b, c, d, e)
#define trace_i915_ring_wait_begin(a)
23,6 → 23,7
#define trace_i915_gem_object_pwrite(a, b, c)
#define trace_i915_gem_request_add(a, b)
#define trace_i915_gem_ring_dispatch(a, b, c)
#define trace_i915_gem_ring_sync_to(a, b, c)
#define trace_i915_vma_bind(a, b)
#define trace_i915_vma_unbind(a)
#define trace_i915_gem_object_clflush(a)
/drivers/video/drm/i915/intel_bios.c
280,6 → 280,34
}
}
 
static void
parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
 
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
 
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
backlight_data->entry_size);
return;
}
 
entry = &backlight_data->data[panel_type];
 
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
entry->min_brightness,
backlight_data->level[panel_type]);
}
 
/* Try to find sdvo panel data */
static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
326,12 → 354,12
{
switch (INTEL_INFO(dev)->gen) {
case 2:
return alternate ? 66 : 48;
return alternate ? 66667 : 48000;
case 3:
case 4:
return alternate ? 100 : 96;
return alternate ? 100000 : 96000;
default:
return alternate ? 100 : 120;
return alternate ? 100000 : 120000;
}
}
 
388,7 → 416,7
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
struct child_device_config *p_child;
union child_device_config *p_child;
int i, child_device_num, count;
u16 block_size;
 
415,12 → 443,12
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->device_type) {
if (!p_child->old.device_type) {
/* skip the device block if device type is invalid */
continue;
}
if (p_child->slave_addr != SLAVE_ADDR1 &&
p_child->slave_addr != SLAVE_ADDR2) {
if (p_child->old.slave_addr != SLAVE_ADDR1 &&
p_child->old.slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
427,8 → 455,8
*/
continue;
}
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
p_child->old.dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
435,16 → 463,16
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
" %s port\n",
p_child->slave_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
p_child->old.slave_addr,
(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
if (!p_mapping->initialized) {
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
p_mapping->dvo_port = p_child->old.dvo_port;
p_mapping->slave_addr = p_child->old.slave_addr;
p_mapping->dvo_wiring = p_child->old.dvo_wiring;
p_mapping->ddc_pin = p_child->old.ddc_pin;
p_mapping->i2c_pin = p_child->old.i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
456,7 → 484,7
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (p_child->slave2_addr) {
if (p_child->old.slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
476,7 → 504,6
parse_driver_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
struct bdb_driver_features *driver;
 
driver = find_section(bdb, BDB_DRIVER_FEATURES);
483,8 → 510,7
if (!driver)
return;
 
if (SUPPORTS_EDP(dev) &&
driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->vbt.edp_support = 1;
 
if (driver->dual_frequency)
500,7 → 526,7
 
edp = find_section(bdb, BDB_EDP);
if (!edp) {
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
if (dev_priv->vbt.edp_support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
568,11 → 594,149
}
 
static void
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_mipi *mipi;
 
mipi = find_section(bdb, BDB_MIPI);
if (!mipi) {
DRM_DEBUG_KMS("No MIPI BDB found");
return;
}
 
/* XXX: add more info */
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
}
 
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
struct bdb_header *bdb)
{
union child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
uint8_t hdmi_level_shift;
int i, j;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel;
/* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port and abort if more
* than one is found. */
int dvo_ports[][2] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA},
{DVO_PORT_HDMIB, DVO_PORT_DPB},
{DVO_PORT_HDMIC, DVO_PORT_DPC},
{DVO_PORT_HDMID, DVO_PORT_DPD},
{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
};
 
/* Find the child device to use, abort if more than one found. */
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
 
for (j = 0; j < 2; j++) {
if (dvo_ports[port][j] == -1)
break;
 
if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
port_name(port));
return;
}
child = it;
}
}
}
if (!child)
return;
 
aux_channel = child->raw[25];
 
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
 
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
 
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
 
if (is_edp && is_dvi)
DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
port_name(port));
if (is_crt && port != PORT_E)
DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
if (is_crt && (is_dvi || is_dp))
DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
port_name(port));
if (is_dvi && (port == PORT_A || port == PORT_E))
DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
if (!is_dvi && !is_dp && !is_crt)
DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
port_name(port));
if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
if (is_dvi) {
if (child->common.ddc_pin == 0x05 && port != PORT_B)
DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
if (child->common.ddc_pin == 0x04 && port != PORT_C)
DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
if (child->common.ddc_pin == 0x06 && port != PORT_D)
DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
}
 
if (is_dp) {
if (aux_channel == 0x40 && port != PORT_A)
DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
if (aux_channel == 0x10 && port != PORT_B)
DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
if (aux_channel == 0x20 && port != PORT_C)
DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
if (aux_channel == 0x30 && port != PORT_D)
DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
}
 
if (bdb->version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
hdmi_level_shift = child->raw[7] & 0xF;
if (hdmi_level_shift < 0xC) {
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
}
}
}
 
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
enum port port;
 
if (!HAS_DDI(dev))
return;
 
if (!dev_priv->vbt.child_dev_num)
return;
 
if (bdb->version < 155)
return;
 
for (port = PORT_A; port < I915_MAX_PORTS; port++)
parse_ddi_port(dev_priv, port, bdb);
}
 
static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *p_defs;
struct child_device_config *p_child, *child_dev_ptr;
union child_device_config *p_child, *child_dev_ptr;
int i, child_device_num, count;
u16 block_size;
 
600,7 → 764,7
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->device_type) {
if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
620,7 → 784,7
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->device_type) {
if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
}
636,6 → 800,7
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
enum port port;
 
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
652,9 → 817,26
 
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
!HAS_PCH_SPLIT(dev));
DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
 
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
 
/* Recommended BSpec default: 800mV 0dB. */
info->hdmi_level_shift = 6;
 
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
info->supports_dp = (port != PORT_E);
}
}
 
 
/**
720,11 → 902,14
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
 
if (bios)
pci_unmap_rom(pdev, bios);
/drivers/video/drm/i915/intel_bios.h
39,7 → 39,7
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
} __attribute__((packed));
} __packed;
 
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
65,7 → 65,7
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
} __attribute__((packed));
} __packed;
 
/*
* There are several types of BIOS data blocks (BDBs), each block has
104,6 → 104,7
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
#define BDB_MIPI 50
#define BDB_SKIP 254 /* VBIOS private block, ignore */
 
struct bdb_general_features {
141,7 → 142,7
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
} __attribute__((packed));
} __packed;
 
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
201,7 → 202,10
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
 
struct child_device_config {
/* We used to keep this struct but without any version control. We should avoid
* using it in the future, but it should be safe to keep using it in the old
* code. */
struct old_child_dev_config {
u16 handle;
u16 device_type;
u8 device_id[10]; /* ascii string */
221,8 → 225,34
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
} __attribute__((packed));
} __packed;
 
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
* but at least the offsets are consistent. */
struct common_child_dev_config {
u16 handle;
u16 device_type;
u8 not_common1[12];
u8 dvo_port;
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
} __packed;
 
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
union child_device_config {
/* This one is safe to be used anywhere, but the code should still check
* the BDB version. */
u8 raw[33];
/* This one should only be kept for legacy code. */
struct old_child_dev_config old;
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
};
 
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
248,8 → 278,8
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
struct child_device_config devices[0];
} __attribute__((packed));
union child_device_config devices[0];
} __packed;
 
struct bdb_lvds_options {
u8 panel_type;
263,7 → 293,7
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
} __attribute__((packed));
} __packed;
 
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
273,12 → 303,12
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
} __attribute__((packed));
} __packed;
 
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
} __attribute__((packed));
} __packed;
 
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
295,7 → 325,7
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
} __attribute__((packed));
} __packed;
 
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
323,7 → 353,7
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
} __attribute__((packed));
} __packed;
 
struct lvds_pnp_id {
u16 mfg_name;
331,18 → 361,34
u32 serial;
u8 mfg_week;
u8 mfg_year;
} __attribute__((packed));
} __packed;
 
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
} __attribute__((packed));
} __packed;
 
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
} __attribute__((packed));
} __packed;
 
struct bdb_lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
u8 obsolete1:5;
u16 pwm_freq_hz;
u8 min_brightness;
u8 obsolete2;
u8 obsolete3;
} __packed;
 
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
} __packed;
 
struct aimdb_header {
char signature[16];
char oem_device[20];
349,12 → 395,12
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
} __attribute__((packed));
} __packed;
 
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
} __attribute__((packed));
} __packed;
 
struct vch_panel_data {
u16 fp_timing_offset;
365,12 → 411,12
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
} __attribute__((packed));
} __packed;
 
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
} __attribute__((packed));
} __packed;
 
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
386,7 → 432,7
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
} __attribute__((packed));
} __packed;
 
 
#define BDB_DRIVER_FEATURE_NO_LVDS 0
432,7 → 478,7
 
u8 hdmi_termination;
u8 custom_vbt_version;
} __attribute__((packed));
} __packed;
 
#define EDP_18BPP 0
#define EDP_24BPP 1
457,7 → 503,7
u16 t9;
u16 t10;
u16 t11_t12;
} __attribute__ ((packed));
} __packed;
 
struct edp_link_params {
u8 rate:4;
464,7 → 510,7
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
} __attribute__ ((packed));
} __packed;
 
struct bdb_edp {
struct edp_power_seq power_seqs[16];
475,7 → 521,7
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
} __attribute__ ((packed));
} __packed;
 
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
608,6 → 654,40
#define DEVICE_TYPE_DP 0x68C6
#define DEVICE_TYPE_eDP 0x78C6
 
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
 
/*
* Bits we care about when checking for DEVICE_TYPE_eDP
* Depending on the system, the other bits may or may not
* be set for eDP outputs.
*/
#define DEVICE_TYPE_eDP_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_NOT_HDMI_OUTPUT | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \
DEVICE_TYPE_LVDS_SINGALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_DIGITAL_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
 
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
618,4 → 698,57
#define PORT_IDPC 8
#define PORT_IDPD 9
 
/* Possible values for the "DVO Port" field for versions >= 155: */
#define DVO_PORT_HDMIA 0
#define DVO_PORT_HDMIB 1
#define DVO_PORT_HDMIC 2
#define DVO_PORT_HDMID 3
#define DVO_PORT_LVDS 4
#define DVO_PORT_TV 5
#define DVO_PORT_CRT 6
#define DVO_PORT_DPB 7
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
 
/* MIPI DSI panel info */
struct bdb_mipi {
u16 panel_id;
u16 bridge_revision;
 
/* General params */
u32 dithering:1;
u32 bpp_pixel_format:1;
u32 rsvd1:1;
u32 dphy_valid:1;
u32 resvd2:28;
 
u16 port_info;
u16 rsvd3:2;
u16 num_lanes:2;
u16 rsvd4:12;
 
/* DSI config */
u16 virt_ch_num:2;
u16 vtm:2;
u16 rsvd5:12;
 
u32 dsi_clock;
u32 bridge_ref_clk;
u16 rsvd_pwr;
 
/* Dphy Params */
u32 prepare_cnt:5;
u32 rsvd6:3;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
u32 rsvd7:3;
u32 exit_zero_cnt:6;
u32 rsvd8:2;
 
u32 hl_switch_cnt;
u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
} __packed;
 
#endif /* _I830_BIOS_H_ */
/drivers/video/drm/i915/intel_crt.c
106,7 → 106,17
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
int dotclock;
 
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
 
dotclock = pipe_config->port_clock;
 
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
 
static void hsw_crt_get_config(struct intel_encoder *encoder,
211,7 → 221,8
intel_modeset_check_state(connector->dev);
}
 
static int intel_crt_mode_valid(struct drm_connector *connector,
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
263,7 → 274,7
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
 
if (HAS_PCH_SPLIT(dev))
if (INTEL_INFO(dev)->gen >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
365,9 → 376,6
 
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
/* FIXME: debug force function and remove */
ret = true;
 
return ret;
}
 
669,7 → 677,6
 
static void intel_crt_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
753,7 → 760,7
if (!crt)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(crt);
return;
793,16 → 800,15
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
if (IS_HASWELL(dev))
crt->base.get_config = hsw_crt_get_config;
else
crt->base.get_config = intel_crt_get_config;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev))
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
/drivers/video/drm/i915/intel_ddi.c
42,7 → 42,6
0x80C30FFF, 0x000B0000,
0x00FFFFFF, 0x00040006,
0x80D75FFF, 0x000B0000,
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
 
static const u32 hsw_ddi_translations_fdi[] = {
55,10 → 54,64
0x00C30FFF, 0x001E0000,
0x00FFFFFF, 0x00060006,
0x00D75FFF, 0x001E0000,
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
 
static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
static const u32 hsw_ddi_translations_hdmi[] = {
/* Idx NT mV diff T mV diff db */
0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
};
 
static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
 
static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
 
static const u32 bdw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0001000E, /* FDI parameters */
0x00D75FFF, 0x0004000A,
0x00C30FFF, 0x00070006,
0x00AAAFFF, 0x000C0000,
0x00FFFFFF, 0x0004000A,
0x00D75FFF, 0x00090004,
0x00C30FFF, 0x000C0000,
0x00FFFFFF, 0x00070006,
0x00D75FFF, 0x000C0000,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
 
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
78,8 → 131,9
}
}
 
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
89,16 → 143,59
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const u32 *ddi_translations_fdi;
const u32 *ddi_translations_dp;
const u32 *ddi_translations_edp;
const u32 *ddi_translations;
 
if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
}
 
switch (port) {
case PORT_A:
ddi_translations = ddi_translations_edp;
break;
case PORT_B:
case PORT_C:
ddi_translations = ddi_translations_dp;
break;
case PORT_D:
if (intel_dp_is_edp(dev, PORT_D))
ddi_translations = ddi_translations_edp;
else
ddi_translations = ddi_translations_dp;
break;
case PORT_E:
ddi_translations = ddi_translations_fdi;
break;
default:
BUG();
}
 
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
/* Entry 9 is for HDMI: */
for (i = 0; i < 2; i++) {
I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
reg += 4;
}
}
 
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
* mode and port E for FDI.
296,9 → 393,6
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(&encoder->base, adjusted_mode);
}
 
intel_dp_init_link_config(intel_dp);
 
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
602,14 → 696,17
*n2_out = best.n2;
*p_out = best.p;
*r2_out = best.r2;
 
DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, *p_out, *n2_out, *r2_out);
}
 
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
/*
* Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
* stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
* steal the selected PLL. You need to call intel_ddi_pll_enable to actually
* enable the PLL.
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
616,11 → 713,8
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
uint32_t reg, val;
int clock = intel_crtc->config.port_clock;
 
/* TODO: reuse PLLs when possible (compare values) */
 
intel_ddi_put_crtc_pll(crtc);
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
642,44 → 736,53
return false;
}
 
/* We don't need to turn any PLL on because we'll use LCPLL. */
return true;
 
} else if (type == INTEL_OUTPUT_HDMI) {
uint32_t reg, val;
unsigned p, n2, r2;
 
if (plls->wrpll1_refcount == 0) {
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
 
if (val == I915_READ(WRPLL_CTL1)) {
DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL1;
} else if (val == I915_READ(WRPLL_CTL2)) {
DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL2;
} else if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
pipe_name(pipe));
plls->wrpll1_refcount++;
reg = WRPLL_CTL1;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
pipe_name(pipe));
plls->wrpll2_refcount++;
reg = WRPLL_CTL2;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
} else {
DRM_ERROR("No WRPLLs available!\n");
return false;
}
 
WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
"WRPLL already enabled\n");
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, p, n2, r2);
 
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
if (reg == WRPLL_CTL1) {
plls->wrpll1_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else {
plls->wrpll2_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
}
 
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
 
} else if (type == INTEL_OUTPUT_ANALOG) {
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
pipe_name(pipe));
plls->spll_refcount++;
reg = SPLL_CTL;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
} else {
DRM_ERROR("SPLL already in use\n");
686,22 → 789,92
return false;
}
 
WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
"SPLL already enabled\n");
 
val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
 
} else {
WARN(1, "Invalid DDI encoder type %d\n", type);
return false;
}
 
I915_WRITE(reg, val);
udelay(20);
 
return true;
}
 
/*
* To be called after intel_ddi_pll_select(). That one selects the PLL to be
* used, this one actually enables the PLL.
*/
void intel_ddi_pll_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int clock = crtc->config.port_clock;
uint32_t reg, cur_val, new_val;
int refcount;
const char *pll_name;
uint32_t enable_bit = (1 << 31);
unsigned int p, n2, r2;
 
BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
 
switch (crtc->ddi_pll_sel) {
case PORT_CLK_SEL_LCPLL_2700:
case PORT_CLK_SEL_LCPLL_1350:
case PORT_CLK_SEL_LCPLL_810:
/*
* LCPLL should always be enabled at this point of the mode set
* sequence, so nothing to do.
*/
return;
 
case PORT_CLK_SEL_SPLL:
pll_name = "SPLL";
reg = SPLL_CTL;
refcount = plls->spll_refcount;
new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
SPLL_PLL_SSC;
break;
 
case PORT_CLK_SEL_WRPLL1:
case PORT_CLK_SEL_WRPLL2:
if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
pll_name = "WRPLL1";
reg = WRPLL_CTL1;
refcount = plls->wrpll1_refcount;
} else {
pll_name = "WRPLL2";
reg = WRPLL_CTL2;
refcount = plls->wrpll2_refcount;
}
 
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) |
WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
 
break;
 
case PORT_CLK_SEL_NONE:
WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
return;
default:
WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
return;
}
 
cur_val = I915_READ(reg);
 
WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
if (refcount == 1) {
WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
I915_WRITE(reg, new_val);
POSTING_READ(reg);
udelay(20);
} else {
WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
}
}
 
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
739,7 → 912,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
767,18 → 941,19
BUG();
}
 
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
 
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (intel_crtc->config.pch_pfit.enabled)
/* On Haswell, can only use the always-on power well for
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
1025,9 → 1200,7
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
}
 
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
1068,9 → 1241,9
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
 
if (type == INTEL_OUTPUT_EDP) {
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
}
 
1145,19 → 1318,30
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
if (lcpll & LCPLL_CD_SOURCE_FCLK) {
return 800000;
else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
} else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
return 450000;
else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
} else if (freq == LCPLL_CLK_FREQ_450) {
return 450000;
else if (IS_ULT(dev_priv->dev))
} else if (IS_HASWELL(dev)) {
if (IS_ULT(dev))
return 337500;
else
return 540000;
} else {
if (freq == LCPLL_CLK_FREQ_54O_BDW)
return 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
return 337500;
else
return 675000;
}
}
 
void intel_ddi_pll_init(struct drm_device *dev)
{
1208,7 → 1392,7
 
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
1292,6 → 1476,20
break;
}
 
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->has_dp_encoder = true;
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
break;
}
 
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
1340,6 → 1538,41
.destroy = intel_ddi_destroy,
};
 
static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
enum port port = intel_dig_port->port;
 
connector = kzalloc(sizeof(*connector), GFP_KERNEL);
if (!connector)
return NULL;
 
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
if (!intel_dp_init_connector(intel_dig_port, connector)) {
kfree(connector);
return NULL;
}
 
return connector;
}
 
static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
enum port port = intel_dig_port->port;
 
connector = kzalloc(sizeof(*connector), GFP_KERNEL);
if (!connector)
return NULL;
 
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_hdmi_init_connector(intel_dig_port, connector);
 
return connector;
}
 
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1348,17 → 1581,22
struct drm_encoder *encoder;
struct intel_connector *hdmi_connector = NULL;
struct intel_connector *dp_connector = NULL;
bool init_hdmi, init_dp;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
port_name(port));
init_hdmi = true;
init_dp = true;
}
 
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
 
dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!dp_connector) {
kfree(intel_dig_port);
return;
}
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
1378,7 → 1616,6
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1385,21 → 1622,16
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_ddi_hot_plug;
 
if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
if (init_dp)
dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
 
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
 
if (!dp_connector && !hdmi_connector) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(dp_connector);
return;
}
 
if (intel_encoder->type != INTEL_OUTPUT_EDP) {
hdmi_connector = kzalloc(sizeof(struct intel_connector),
GFP_KERNEL);
if (!hdmi_connector) {
return;
}
 
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
}
}
/drivers/video/drm/i915/intel_display.c
44,13 → 44,23
#define MAX_ERRNO 4095
phys_addr_t get_bus_addr(void);
 
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
 
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
72,9 → 82,6
intel_p2_t p2;
};
 
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
 
int
intel_pch_rawclk(struct drm_device *dev)
{
97,8 → 104,8
 
static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
110,8 → 117,8
 
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
123,8 → 130,8
 
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
316,45 → 323,47
.p2_slow = 7, .p2_fast = 7 },
};
 
static const intel_limit_t intel_limits_vlv_dac = {
.dot = { .min = 25000, .max = 270000 },
static const intel_limit_t intel_limits_vlv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
* clock and actual rate limits are more relaxed, so checking
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 270000 * 5 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 22, .max = 450 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 1, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
};
 
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
 
static const intel_limit_t intel_limits_vlv_dp = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 22, .max = 450 },
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 1, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
};
static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
 
/**
* Returns whether any output on the specified pipe is of the specified type
*/
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->type == type)
return true;
 
return false;
}
 
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
415,12 → 424,7
else
limit = &intel_limits_pineview_sdvo;
} else if (IS_VALLEYVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
limit = &intel_limits_vlv_dac;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
limit = &intel_limits_vlv_hdmi;
else
limit = &intel_limits_vlv_dp;
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
442,8 → 446,10
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / clock->n;
clock->dot = clock->vco / clock->p;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
 
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
455,25 → 461,12
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
 
/**
* Returns whether any output on the specified pipe is of the specified type
*/
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->type == type)
return true;
 
return false;
}
 
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
484,20 → 477,26
const intel_limit_t *limit,
const intel_clock_t *clock)
{
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid("n out of range\n");
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
INTELPllInvalid("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
 
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
 
if (!IS_VALLEYVIEW(dev)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid("m out of range\n");
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid("n out of range\n");
}
 
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
INTELPllInvalid("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
691,67 → 690,73
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
u32 m, n, fastclk;
u32 updrate, minupdate, p;
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
int max_n = min(limit->n.max, refclk / 19200);
bool found = false;
 
flag = 0;
dotclk = target * 1000;
bestppm = 1000000;
ppm = absppm = 0;
fastclk = dotclk / (2*100);
updrate = 0;
minupdate = 19200;
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
bestm1 = bestm2 = bestp1 = bestp2 = 0;
target *= 5; /* fast clock */
 
memset(best_clock, 0, sizeof(*best_clock));
 
/* based on hardware requirement, prefer smaller n to precision */
for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
updrate = refclk / n;
for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
if (p2 > 10)
p2 = p2 - 1;
p = p1 * p2;
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
m2 = (((2*(fastclk * p * n / m1 )) +
refclk) / (2*refclk));
m = m1 * m2;
vco = updrate * m;
if (vco >= limit->vco.min && vco < limit->vco.max) {
ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
absppm = (ppm > 0) ? ppm : (-ppm);
if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
unsigned int ppm, diff;
 
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
 
vlv_clock(refclk, &clock);
 
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
 
diff = abs(clock.dot - target);
ppm = div_u64(1000000ULL * diff, target);
 
if (ppm < 100 && clock.p > best_clock->p) {
bestppm = 0;
flag = 1;
*best_clock = clock;
found = true;
}
if (absppm < bestppm - 10) {
bestppm = absppm;
flag = 1;
 
if (bestppm >= 10 && ppm < bestppm - 10) {
bestppm = ppm;
*best_clock = clock;
found = true;
}
if (flag) {
bestn = n;
bestm1 = m1;
bestm2 = m2;
bestp1 = p1;
bestp2 = p2;
flag = 0;
}
}
}
}
 
return found;
}
}
best_clock->n = bestn;
best_clock->m1 = bestm1;
best_clock->m2 = bestm2;
best_clock->p1 = bestp1;
best_clock->p2 = bestp2;
 
return true;
bool intel_crtc_active(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
* We can ditch the crtc->fb check as soon as we can
* properly reconstruct framebuffers.
*/
return intel_crtc->active && crtc->fb &&
intel_crtc->config.adjusted_mode.crtc_clock;
}
 
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
763,10 → 768,10
return intel_crtc->config.cpu_transcoder;
}
 
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 frame, frame_reg = PIPEFRAME(pipe);
u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
 
frame = I915_READ(frame_reg);
 
787,8 → 792,8
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
 
if (INTEL_INFO(dev)->gen >= 5) {
ironlake_wait_for_vblank(dev, pipe);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
g4x_wait_for_vblank(dev, pipe);
return;
}
 
815,6 → 820,25
DRM_DEBUG_KMS("vblank wait timed out\n");
}
 
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
 
if (IS_GEN2(dev))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
 
line1 = I915_READ(reg) & line_mask;
mdelay(5);
line2 = I915_READ(reg) & line_mask;
 
return line1 == line2;
}
 
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
846,22 → 870,8
100))
WARN(1, "pipe_off wait timed out\n");
} else {
u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100);
 
if (IS_GEN2(dev))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
 
/* Wait for the display line to settle */
do {
last_line = I915_READ(reg) & line_mask;
mdelay(5);
} while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, GetTimerTicks()));
if (time_after(GetTimerTicks(), timeout))
if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
932,6 → 942,24
state_string(state), state_string(cur_state));
}
 
/* XXX: the dsi pll is shared between MIPI DSI ports */
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
{
u32 val;
bool cur_state;
 
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
 
cur_state = val & DSI_PLL_VCO_EN;
WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
 
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
1072,6 → 1100,26
pipe_name(pipe));
}
 
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
struct drm_device *dev = dev_priv->dev;
bool cur_state;
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
else if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
 
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
 
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
1177,15 → 1225,12
}
}
 
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
bool enabled;
 
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
return;
}
WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
 
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1326,6 → 1371,44
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
 
static void intel_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_VALLEYVIEW(dev))
return;
 
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
}
 
static void intel_reset_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_VALLEYVIEW(dev))
return;
 
/*
* Enable the CRI clock source so we can get at the display and the
* reference clock for VGA hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV |
DPLL_INTEGRATED_CRI_CLK_VLV);
 
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all be set
* to 0.
*
* This should only be done on init and resume from S3 with both
* PLLs disabled, or we risk losing DPIO and PLL synchronization.
*/
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
}
 
static void vlv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
1439,25 → 1522,35
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
 
/* Leave integrated clock source enabled */
/*
* Leave integrated clock source and reference clock enabled for pipe B.
* The latter is needed for VGA hotplug / manual detection.
*/
if (pipe == PIPE_B)
val = DPLL_INTEGRATED_CRI_CLK_VLV;
val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
}
 
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport)
{
u32 port_mask;
 
if (!port)
switch (dport->port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
else
break;
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
break;
default:
BUG();
}
 
if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
'B' + port, I915_READ(DPLL(0)));
port_name(dport->port), I915_READ(DPLL(0)));
}
 
/**
1678,7 → 1771,7
* returning.
*/
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
bool pch_port, bool dsi)
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
1687,6 → 1780,7
u32 val;
 
assert_planes_disabled(dev_priv, pipe);
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
 
if (HAS_PCH_LPT(dev_priv->dev))
1700,6 → 1794,9
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
if (dsi)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
else {
if (pch_port) {
1745,6 → 1842,7
* or we might hang the display.
*/
assert_planes_disabled(dev_priv, pipe);
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
 
/* Don't disable pipe A or pipe A PLLs if needed */
1764,17 → 1862,17
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
if (dev_priv->info->gen >= 4)
I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
else
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
 
I915_WRITE(reg, I915_READ(reg));
POSTING_READ(reg);
}
 
/**
* intel_enable_plane - enable a display plane on a given pipe
* intel_enable_primary_plane - enable the primary plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
1781,9 → 1879,11
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
static void intel_enable_plane(struct drm_i915_private *dev_priv,
static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
 
1790,6 → 1890,10
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
 
WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
 
intel_crtc->primary_enabled = true;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
if (val & DISPLAY_PLANE_ENABLE)
1796,12 → 1900,12
return;
 
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_display_plane(dev_priv, plane);
intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
 
/**
* intel_disable_plane - disable a display plane
* intel_disable_primary_plane - disable the primary plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
1808,12 → 1912,18
*
* Disable @plane; should be an independent operation.
*/
static void intel_disable_plane(struct drm_i915_private *dev_priv,
static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
 
WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
 
intel_crtc->primary_enabled = false;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
1820,7 → 1930,7
return;
 
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_display_plane(dev_priv, plane);
intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
 
1856,10 → 1966,7
alignment = 0;
break;
case I915_TILING_Y:
/* Despite that we check this in framebuffer_init userspace can
* screw us over and change the tiling after the fact. Only
* pinned buffers can't change their tiling. */
DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
WARN(1, "Y tiled bo slipped through, driver bug!\n");
return -EINVAL;
default:
BUG();
2021,7 → 2128,7
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
2094,7 → 2201,7
else
dspcntr &= ~DISPPLANE_TILED;
 
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
else
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2112,9 → 2219,9
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2172,7 → 2279,12
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
mutex_lock(&crtc->mutex);
if (intel_crtc->active)
/*
* FIXME: Once we have proper support for primary planes (and
* disabling them without disabling the entire crtc) allow again
* a NULL crtc->fb.
*/
if (intel_crtc->active && crtc->fb)
dev_priv->display.update_plane(crtc, crtc->fb,
crtc->x, crtc->y);
mutex_unlock(&crtc->mutex);
2263,11 → 2375,26
return ret;
}
 
/* Update pipe size and adjust fitter if needed */
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
* mode) to see if we can flip rather than do a full mode set. In the
* fastboot case, we'll flip, but if we don't update the pipesrc and
* pfit state, we'll end up with a big fb scanned out into the wrong
* sized surface.
*
* To fix this properly, we need to hoist the checks up into
* compute_mode_changes (or above), check the actual pfit state and
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
if (i915_fastboot) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
 
I915_WRITE(PIPESRC(intel_crtc->pipe),
((crtc->mode.hdisplay - 1) << 16) |
(crtc->mode.vdisplay - 1));
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2275,6 → 2402,8
I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
}
intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
 
ret = dev_priv->display.update_plane(crtc, fb, x, y);
2892,6 → 3021,7
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
 
2909,14 → 3039,14
SBI_ICLK);
 
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
if (clock == 20000) {
auxdiv = 1;
divsel = 0x41;
phaseinc = 0x20;
} else {
/* The iCLK virtual clock root frequency is in MHz,
* but the crtc->mode.clock in in KHz. To get the divisors,
* it is necessary to divide one by another, so we
* but the adjusted_mode->crtc_clock in in KHz. To get the
* divisors, it is necessary to divide one by another, so we
* convert the virtual clock precision to KHz here for higher
* precision.
*/
2924,7 → 3054,7
u32 iclk_pi_range = 64;
u32 desired_divisor, msb_divisor_value, pi_value;
 
desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
desired_divisor = (iclk_virtual_root_freq / clock);
msb_divisor_value = desired_divisor / iclk_pi_range;
pi_value = desired_divisor % iclk_pi_range;
 
2940,7 → 3070,7
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 
DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
crtc->mode.clock,
clock,
auxdiv,
divsel,
phasedir,
3305,6 → 3435,108
intel_plane_disable(&intel_plane->base);
}
 
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
/* We can only enable IPS after we enable a plane and wait for a vblank.
* We guarantee that the plane is enabled by calling intel_enable_ips
* only after intel_enable_plane. And intel_enable_plane already waits
* for a vblank, so all we need to do here is to enable the IPS bit. */
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(crtc->base.dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
* so we need to just enable it and continue on.
*/
} else {
I915_WRITE(IPS_CTL, IPS_ENABLE);
/* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
DRM_ERROR("Timed out waiting for IPS enable\n");
}
}
 
void hsw_disable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(crtc->base.dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
I915_WRITE(IPS_CTL, 0);
POSTING_READ(IPS_CTL);
}
 
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev, crtc->pipe);
}
 
/** Loads the palette/gamma unit for the CRTC with the prepared values */
static void intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
int palreg = PALETTE(pipe);
int i;
bool reenable_ips = false;
 
/* The clocks have to be on to load the palette. */
if (!crtc->enabled || !intel_crtc->active)
return;
 
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
}
 
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
palreg = LGC_PALETTE(pipe);
 
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
}
 
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
}
 
if (reenable_ips)
hsw_enable_ips(intel_crtc);
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3324,8 → 3556,6
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
intel_update_watermarks(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
3348,9 → 3578,10
*/
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
intel_crtc->config.has_pch_encoder, false);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
 
3384,34 → 3615,74
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
 
static void hsw_enable_ips(struct intel_crtc *crtc)
static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
if (!crtc->config.ips_enabled)
return;
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
 
/* We can only enable IPS after we enable a plane and wait for a vblank.
* We guarantee that the plane is enabled by calling intel_enable_ips
* only after intel_enable_plane. And intel_enable_plane already waits
* for a vblank, so all we need to do here is to enable the IPS bit. */
assert_plane_enabled(dev_priv, crtc->plane);
I915_WRITE(IPS_CTL, IPS_ENABLE);
hsw_enable_ips(intel_crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void hsw_disable_ips(struct intel_crtc *crtc)
static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
if (!crtc->config.ips_enabled)
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
 
/* FBC must be disabled before disabling the plane on HSW. */
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
hsw_disable_ips(intel_crtc);
 
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_plane(dev_priv, plane, pipe);
}
 
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_crtc *crtc_it, *other_active_crtc = NULL;
 
/* We want to get the other_active_crtc only if there's only 1 other
* active crtc. */
list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
if (!crtc_it->active || crtc_it == crtc)
continue;
 
if (other_active_crtc)
return;
 
assert_plane_enabled(dev_priv, crtc->plane);
I915_WRITE(IPS_CTL, 0);
other_active_crtc = crtc_it;
}
if (!other_active_crtc)
return;
 
/* We need to wait for a vblank before we can disable the plane. */
intel_wait_for_vblank(dev, crtc->pipe);
intel_wait_for_vblank(dev, other_active_crtc->pipe);
intel_wait_for_vblank(dev, other_active_crtc->pipe);
}
 
static void haswell_crtc_enable(struct drm_crtc *crtc)
3421,7 → 3692,6
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
3434,8 → 3704,6
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
intel_update_watermarks(dev);
 
if (intel_crtc->config.has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
 
3456,24 → 3724,23
intel_ddi_set_pipe_settings(crtc);
intel_ddi_enable_transcoder_func(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder);
intel_enable_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc->config.has_pch_encoder, false);
 
hsw_enable_ips(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
}
 
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
haswell_crtc_enable_planes(crtc);
 
/*
* There seems to be a race in PCH platform hw (at least on some
* outputs) where an enabled pipe still completes any pageflip right
3525,7 → 3792,7
 
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_primary_plane(dev_priv, plane, pipe);
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3566,7 → 3833,7
}
 
intel_crtc->active = false;
intel_update_watermarks(dev);
intel_update_watermarks(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
3580,26 → 3847,18
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
if (!intel_crtc->active)
return;
 
for_each_encoder_on_crtc(dev, crtc, encoder)
haswell_crtc_disable_planes(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
 
 
/* FBC must be disabled before disabling the plane on HSW. */
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
hsw_disable_ips(intel_crtc);
 
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
intel_disable_pipe(dev_priv, pipe);
3621,7 → 3880,7
}
 
intel_crtc->active = false;
intel_update_watermarks(dev);
intel_update_watermarks(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
3705,6 → 3964,174
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
 
int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 
/* Obtain SKU information */
mutex_lock(&dev_priv->dpio_lock);
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
 
return vco_freq[hpll_freq];
}
 
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
 
if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
cmd = 2;
else if (cdclk == 266)
cmd = 1;
else
cmd = 0;
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
if (cdclk == 400) {
u32 divider, vco;
 
vco = valleyview_get_vco(dev_priv);
divider = ((vco << 1) / cdclk) - 1;
 
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~0xf;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
mutex_unlock(&dev_priv->dpio_lock);
}
 
mutex_lock(&dev_priv->dpio_lock);
/* adjust self-refresh exit latency value */
val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
val &= ~0x7f;
 
/*
* For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns.
*/
if (cdclk == 400)
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
 
/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
intel_i2c_reset(dev);
}
 
static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
{
int cur_cdclk, vco;
int divider;
 
vco = valleyview_get_vco(dev_priv);
 
mutex_lock(&dev_priv->dpio_lock);
divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
 
divider &= 0xf;
 
cur_cdclk = (vco << 1) / (divider + 1);
 
return cur_cdclk;
}
 
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int cur_cdclk;
 
cur_cdclk = valleyview_cur_cdclk(dev_priv);
 
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320MHz
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
*/
if (max_pixclk > 288000) {
return 400;
} else if (max_pixclk > 240000) {
return 320;
} else
return 266;
/* Looks like the 200MHz CDclk freq doesn't work on some configs */
}
 
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
unsigned modeset_pipes,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc;
int max_pixclk = 0;
 
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
if (modeset_pipes & (1 << intel_crtc->pipe))
max_pixclk = max(max_pixclk,
pipe_config->adjusted_mode.crtc_clock);
else if (intel_crtc->base.enabled)
max_pixclk = max(max_pixclk,
intel_crtc->config.adjusted_mode.crtc_clock);
}
 
return max_pixclk;
}
 
static void valleyview_modeset_global_pipes(struct drm_device *dev,
unsigned *prepare_pipes,
unsigned modeset_pipes,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
pipe_config);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
 
if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
return;
 
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head)
if (intel_crtc->base.enabled)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
 
static void valleyview_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 
if (req_cdclk != cur_cdclk)
valleyview_set_cdclk(dev, req_cdclk);
}
 
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3713,6 → 4140,7
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
bool is_dsi;
 
WARN_ON(!crtc->enabled);
 
3720,12 → 4148,14
return;
 
intel_crtc->active = true;
intel_update_watermarks(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
 
if (!is_dsi)
vlv_enable_pll(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
3736,8 → 4166,9
 
intel_crtc_load_lut(crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe, false, is_dsi);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
 
3762,7 → 4193,6
return;
 
intel_crtc->active = true;
intel_update_watermarks(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
3774,8 → 4204,9
 
intel_crtc_load_lut(crtc);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe, false, false);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
3831,7 → 4262,7
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_primary_plane(dev_priv, plane, pipe);
 
intel_disable_pipe(dev_priv, pipe);
 
3841,14 → 4272,15
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
vlv_disable_pll(dev_priv, pipe);
else
else if (!IS_VALLEYVIEW(dev))
i9xx_disable_pll(dev_priv, pipe);
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
 
intel_update_fbc(dev);
intel_update_watermarks(dev);
}
 
static void i9xx_crtc_off(struct drm_crtc *crtc)
3926,6 → 4358,7
dev_priv->display.off(crtc);
 
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
if (crtc->fb) {
4053,7 → 4486,7
return false;
}
 
if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
4115,8 → 4548,7
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
fdi_dotclock = adjusted_mode->clock;
fdi_dotclock /= pipe_config->pixel_multiplier;
fdi_dotclock = adjusted_mode->crtc_clock;
 
lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
pipe_config->pipe_bpp);
4158,13 → 4590,39
struct drm_device *dev = crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (pipe_config->requested_mode.clock * 3
> IRONLAKE_FDI_FREQ * 4)
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
struct drm_i915_private *dev_priv = dev->dev_private;
int clock_limit =
dev_priv->display.get_display_clock_speed(dev);
 
/*
* Enable pixel doubling when the dot clock
* is > 90% of the (display) core speed.
*
* GDG double wide on either pipe,
* otherwise pipe A only.
*/
if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
clock_limit *= 2;
pipe_config->double_wide = true;
}
 
if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
return -EINVAL;
}
 
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
* - LVDS dual channel mode
* - Double wide pipe
*/
if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
 
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
4328,28 → 4786,6
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
 
static int vlv_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk = 27000; /* for DP & HDMI */
 
return 100000; /* only one validated so far */
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
refclk = 96000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv))
refclk = 100000;
else
refclk = 96000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
refclk = 100000;
}
 
return refclk;
}
 
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
4357,12 → 4793,11
int refclk;
 
if (IS_VALLEYVIEW(dev)) {
refclk = vlv_get_refclk(crtc);
refclk = 100000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
} else if (!IS_GEN2(dev)) {
refclk = 96000;
} else {
4415,7 → 4850,8
}
}
 
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
pipe)
{
u32 reg_val;
 
4423,24 → 4859,24
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
reg_val &= 0x8cffffff;
reg_val = 0x8c000000;
vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
 
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4506,18 → 4942,18
 
/* PLL B needs special handling */
if (pipe)
vlv_pllb_recal_opamp(dev_priv);
vlv_pllb_recal_opamp(dev_priv, pipe);
 
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
 
/* Disable target IRef on PLL */
reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
reg_val &= 0x00ffffff;
vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
 
/* Disable fast lock */
vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
 
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4531,19 → 4967,19
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
mdiv |= DPIO_ENABLE_CALIBRATION;
vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4550,31 → 4986,35
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
else
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
else
vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
}
 
coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
 
/* Enable DPIO clock input */
/*
* Enable DPIO clock input. We should never disable the reference
* clock for pipe B, since VGA hotplug / manual detection depends
* on it.
*/
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
/* We should never disable this, set it here for state tracking */
4717,7 → 5157,6
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
 
/* We need to be careful not to changed the adjusted mode, for otherwise
4770,7 → 5209,8
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
((intel_crtc->config.pipe_src_w - 1) << 16) |
(intel_crtc->config.pipe_src_h - 1));
}
 
static void intel_get_pipe_timings(struct intel_crtc *crtc,
4808,8 → 5248,11
}
 
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
 
pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
}
 
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4829,7 → 5272,7
 
crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
crtc->mode.clock = pipe_config->adjusted_mode.clock;
crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
}
 
4845,17 → 5288,8
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
pipeconf |= PIPECONF_ENABLE;
 
if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
*
* XXX: No double-wide on 915GM pipe B. Is that the only reason for the
* pipe == 0 check?
*/
if (intel_crtc->config.requested_mode.clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
if (intel_crtc->config.double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
}
 
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
4909,7 → 5343,6
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
4916,7 → 5349,7
intel_clock_t clock, reduced_clock;
u32 dspcntr;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
4926,23 → 5359,31
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
}
 
num_connectors++;
}
 
if (is_dsi)
goto skip_dpll;
 
if (!intel_crtc->config.clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
 
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
* Returns a set of divisors for the desired target clock with
* the given refclk, or FALSE. The returned values represent
* the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
* 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
intel_crtc->config.port_clock,
refclk, NULL, &clock);
if (!ok && !intel_crtc->config.clock_set) {
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
4949,10 → 5390,10
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
* If the clocks don't match, we can't switch the display clock
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
* Ensure we match the reduced clock's P to the target
* clock. If the clocks don't match, we can't switch
* the display clock by using the FP0/FP1. In such case
* we will disable the LVDS downclock feature.
*/
has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
4961,7 → 5402,6
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
if (!intel_crtc->config.clock_set) {
intel_crtc->config.dpll.n = clock.n;
intel_crtc->config.dpll.m1 = clock.m1;
intel_crtc->config.dpll.m2 = clock.m2;
4969,17 → 5409,19
intel_crtc->config.dpll.p2 = clock.p2;
}
 
if (IS_GEN2(dev))
if (IS_GEN2(dev)) {
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
else if (IS_VALLEYVIEW(dev))
} else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
else
} else {
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
 
skip_dpll:
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
4996,8 → 5438,8
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
 
i9xx_set_pipeconf(intel_crtc);
5007,8 → 5449,6
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
intel_update_watermarks(dev);
 
return ret;
}
 
5019,6 → 5459,9
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
return;
 
tmp = I915_READ(PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
5050,7 → 5493,7
int refclk = 100000;
 
mutex_lock(&dev_priv->dpio_lock);
mdiv = vlv_dpio_read(dev_priv, DPIO_DIV(pipe));
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
 
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5059,10 → 5502,10
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
vlv_clock(refclk, &clock);
 
pipe_config->adjusted_mode.clock = clock.dot / 10;
/* clock.dot is the fast clock */
pipe_config->port_clock = clock.dot / 5;
}
 
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5095,6 → 5538,9
}
}
 
if (INTEL_INFO(dev)->gen < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
 
intel_get_pipe_timings(crtc, pipe_config);
 
i9xx_get_pfit_config(crtc, pipe_config);
5127,6 → 5573,11
DPLL_PORTB_READY_MASK);
}
 
if (IS_VALLEYVIEW(dev))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
 
return true;
}
 
5499,9 → 5950,9
}
 
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
dev_priv->vbt.lvds_ssc_freq);
return dev_priv->vbt.lvds_ssc_freq * 1000;
return dev_priv->vbt.lvds_ssc_freq;
}
 
return 120000;
5615,14 → 6066,16
 
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t val;
 
val = 0;
 
if (intel_crtc->config.dither)
if (IS_HASWELL(dev) && intel_crtc->config.dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5635,8 → 6088,35
 
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
 
if (IS_BROADWELL(dev)) {
val = 0;
 
switch (intel_crtc->config.pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
case 24:
val |= PIPEMISC_DITHER_8_BPC;
break;
case 30:
val |= PIPEMISC_DITHER_10_BPC;
break;
case 36:
val |= PIPEMISC_DITHER_12_BPC;
break;
default:
/* Case prevented by pipe_config_set_bpp. */
BUG();
}
 
if (intel_crtc->config.dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
 
I915_WRITE(PIPEMISC(pipe), val);
}
}
 
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
intel_clock_t *clock,
bool *has_reduced_clock,
5734,7 → 6214,7
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100) ||
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (intel_crtc->config.sdvo_tv_clock)
5869,11 → 6349,6
else
intel_crtc->lowfreq_avail = false;
 
if (intel_crtc->config.has_pch_encoder) {
pll = intel_crtc_to_shared_dpll(intel_crtc);
 
}
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
5889,27 → 6364,69
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
intel_update_watermarks(dev);
 
return ret;
}
 
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder transcoder = pipe_config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
 
pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
& ~TU_SIZE_MASK;
pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
 
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = crtc->pipe;
 
if (INTEL_INFO(dev)->gen >= 5) {
m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
& ~TU_SIZE_MASK;
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
} else {
m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
& ~TU_SIZE_MASK;
m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
}
 
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
if (crtc->config.has_pch_encoder)
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->dp_m_n);
}
 
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n);
}
 
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
5996,6 → 6513,8
pipe_config->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
 
ironlake_pch_clock_get(crtc, pipe_config);
} else {
pipe_config->pixel_multiplier = 1;
}
6036,7 → 6555,7
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
WARN((val & ~DE_PCH_EVENT_IVB) != val,
WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
"Unexpected DEIMR bits enabled: 0x%x\n", val);
val = I915_READ(SDEIMR);
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6052,7 → 6571,7
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
uint32_t val;
6081,9 → 6600,12
 
val = I915_READ(D_COMP);
val |= D_COMP_COMP_DISABLE;
I915_WRITE(D_COMP, val);
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
DRM_ERROR("Failed to disable D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
udelay(100);
delay(1);
 
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
6100,7 → 6622,7
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
6112,7 → 6634,7
 
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
gen6_gt_force_wake_get(dev_priv);
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
6123,7 → 6645,10
val = I915_READ(D_COMP);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
I915_WRITE(D_COMP, val);
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
DRM_ERROR("Failed to enable D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
 
val = I915_READ(LCPLL_CTL);
6143,7 → 6668,7
DRM_ERROR("Switching back to LCPLL failed\n");
}
 
gen6_gt_force_wake_put(dev_priv);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
void hsw_enable_pc8_work(struct work_struct *__work)
6154,6 → 6679,8
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
WARN_ON(!HAS_PC8(dev));
 
if (dev_priv->pc8.enabled)
return;
 
6170,6 → 6697,8
lpt_disable_clkout_dp(dev);
hsw_pc8_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
 
intel_runtime_pm_put(dev_priv);
}
 
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6199,6 → 6728,8
if (dev_priv->pc8.disable_count != 1)
return;
 
WARN_ON(!HAS_PC8(dev));
 
cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (!dev_priv->pc8.enabled)
return;
6205,6 → 6736,8
 
DRM_DEBUG_KMS("Disabling package C8+\n");
 
intel_runtime_pm_get(dev_priv);
 
hsw_restore_lcpll(dev_priv);
hsw_pc8_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
6225,6 → 6758,9
 
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
6232,6 → 6768,9
 
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
6269,6 → 6808,9
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
 
if (!HAS_PC8(dev_priv->dev))
return;
 
if (!i915_enable_pc8)
return;
 
6292,36 → 6834,103
 
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
hsw_enable_package_c8(dev_priv);
__hsw_enable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
 
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
hsw_disable_package_c8(dev_priv);
__hsw_disable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
 
static unsigned long get_pipe_power_domains(struct drm_device *dev,
enum pipe pipe, bool pfit_enabled)
{
bool enable = false;
unsigned long mask;
enum transcoder transcoder;
 
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (pfit_enabled)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
return mask;
}
 
void intel_display_set_init_power(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->power_domains.init_power_on == enable)
return;
 
if (enable)
intel_display_power_get(dev, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev, POWER_DOMAIN_INIT);
 
dev_priv->power_domains.init_power_on = enable;
}
 
static void modeset_update_power_wells(struct drm_device *dev)
{
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
 
/*
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
 
if (!crtc->base.enabled)
continue;
 
if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
crtc->config.cpu_transcoder != TRANSCODER_EDP)
enable = true;
pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
crtc->pipe,
crtc->config.pch_pfit.enabled);
 
for_each_power_domain(domain, pipe_domains[crtc->pipe])
intel_display_power_get(dev, domain);
}
 
intel_set_power_well(dev, enable);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
 
for_each_power_domain(domain, crtc->enabled_power_domains)
intel_display_power_put(dev, domain);
 
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
 
intel_display_set_init_power(dev, false);
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
{
modeset_update_power_wells(dev);
hsw_update_package_c8(dev);
}
 
6335,8 → 6944,9
int plane = intel_crtc->plane;
int ret;
 
if (!intel_ddi_pll_mode_set(crtc))
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
intel_ddi_pll_enable(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
6360,8 → 6970,6
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
intel_update_watermarks(dev);
 
return ret;
}
 
6429,6 → 7037,7
if (intel_display_power_enabled(dev, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
 
if (IS_HASWELL(dev))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
 
6469,6 → 7078,44
return 0;
}
 
static struct {
int clock;
u32 config;
} hdmi_audio_clock[] = {
{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
};
 
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
if (mode->clock == hdmi_audio_clock[i].clock)
break;
}
 
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
i = 1;
}
 
DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
 
return hdmi_audio_clock[i].config;
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
6499,7 → 7146,8
}
 
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
6539,7 → 7187,8
}
 
static void haswell_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
6592,8 → 7241,9
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else
I915_WRITE(aud_config, 0);
} else {
I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
}
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
6626,7 → 7276,8
}
 
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
6644,6 → 7295,11
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(connector->dev)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
aud_config = CPT_AUD_CFG(pipe);
6653,8 → 7309,19
 
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
if (IS_VALLEYVIEW(connector->dev)) {
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
 
intel_encoder = intel_attached_encoder(connector);
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
i = intel_dig_port->port;
} else {
i = I915_READ(aud_cntl_st);
i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
i = (i >> 29) & DIP_PORT_SEL_MASK;
/* DIP_Port_Select, 0x1 = PortB */
}
 
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
6670,8 → 7337,9
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else
I915_WRITE(aud_config, 0);
} else {
I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
}
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
6721,53 → 7389,9
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
if (dev_priv->display.write_eld)
dev_priv->display.write_eld(connector, crtc);
dev_priv->display.write_eld(connector, crtc, mode);
}
 
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
int palreg = PALETTE(pipe);
int i;
bool reenable_ips = false;
 
/* The clocks have to be on to load the palette. */
if (!crtc->enabled || !intel_crtc->active)
return;
 
if (!HAS_PCH_SPLIT(dev_priv->dev))
assert_pll_enabled(dev_priv, pipe);
 
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
palreg = LGC_PALETTE(pipe);
 
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (intel_crtc->config.ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
}
 
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
}
 
if (reenable_ips)
hsw_enable_ips(intel_crtc);
}
 
#if 0
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
6797,7 → 7421,6
 
intel_crtc->cursor_visible = visible;
}
#endif
 
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
6844,7 → 7467,7
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
}
6868,23 → 7491,20
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
u32 base, pos;
u32 base = 0, pos = 0;
bool visible;
 
pos = 0;
if (on)
base = intel_crtc->cursor_addr;
 
if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
if (x >= intel_crtc->config.pipe_src_w)
base = 0;
 
if (y > (int) crtc->fb->height)
if (y >= intel_crtc->config.pipe_src_h)
base = 0;
} else
base = 0;
 
if (x < 0) {
if (x + intel_crtc->cursor_width < 0)
if (x + intel_crtc->cursor_width <= 0)
base = 0;
 
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6893,7 → 7513,7
pos |= x << CURSOR_X_SHIFT;
 
if (y < 0) {
if (y + intel_crtc->cursor_height < 0)
if (y + intel_crtc->cursor_height <= 0)
base = 0;
 
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6905,7 → 7525,7
if (!visible && !intel_crtc->cursor_visible)
return;
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
7035,8 → 7655,8
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
 
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7044,27 → 7664,6
return 0;
}
 
/** Sets the color ramps on behalf of RandR */
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
 
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
 
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
7086,7 → 7685,7
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
 
static struct drm_framebuffer *
struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
7100,16 → 7699,23
return ERR_PTR(-ENOMEM);
}
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err;
 
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (ret)
goto err;
 
return &intel_fb->base;
err:
drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
 
return ERR_PTR(ret);
}
 
return &intel_fb->base;
}
 
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
7139,6 → 7745,7
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
7159,6 → 7766,9
return NULL;
 
return fb;
#else
return NULL;
#endif
}
 
bool intel_get_load_detect_pipe(struct drm_connector *connector,
7302,6 → 7912,22
mutex_unlock(&crtc->mutex);
}
 
static int i9xx_pll_refclk(struct drm_device *dev,
const struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll = pipe_config->dpll_hw_state.dpll;
 
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev))
return 120000;
else if (!IS_GEN2(dev))
return 96000;
else
return 48000;
}
 
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
7309,14 → 7935,15
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = pipe_config->cpu_transcoder;
u32 dpll = I915_READ(DPLL(pipe));
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
 
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = I915_READ(FP0(pipe));
fp = pipe_config->dpll_hw_state.fp0;
else
fp = I915_READ(FP1(pipe));
fp = pipe_config->dpll_hw_state.fp1;
 
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
7347,28 → 7974,25
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
pipe_config->adjusted_mode.clock = 0;
return;
}
 
if (IS_PINEVIEW(dev))
pineview_clock(96000, &clock);
pineview_clock(refclk, &clock);
else
i9xx_clock(96000, &clock);
i9xx_clock(refclk, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
 
if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
 
if (lvds & LVDS_CLKB_POWER_UP)
clock.p2 = 7;
else
clock.p2 = 14;
 
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
i9xx_clock(66000, &clock);
} else
i9xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
7380,59 → 8004,55
clock.p2 = 4;
else
clock.p2 = 2;
}
 
i9xx_clock(48000, &clock);
i9xx_clock(refclk, &clock);
}
}
 
pipe_config->adjusted_mode.clock = clock.dot;
/*
* This value includes pixel_multiplier. We will use
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
pipe_config->port_clock = clock.dot;
}
 
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
int intel_dotclock_calculate(int link_freq,
const struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
int link_freq, repeat;
u64 clock;
u32 link_m, link_n;
 
repeat = pipe_config->pixel_multiplier;
 
/*
* The calculation for the data clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
* But we want to avoid losing precison if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
* and the link clock is simpler:
* link_clock = (m * link_clock * repeat) / n
* link_clock = (m * link_clock) / n
*/
 
/*
* We need to get the FDI or DP link clock here to derive
* the M/N dividers.
*
* For FDI, we read it from the BIOS or use a fixed 2.7GHz.
* For DP, it's either 1.62GHz or 2.7GHz.
* We do our calculations in 10*MHz since we don't need much precison.
*/
if (pipe_config->has_pch_encoder)
link_freq = intel_fdi_link_freq(dev) * 10000;
else
link_freq = pipe_config->port_clock;
if (!m_n->link_n)
return 0;
 
link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
}
 
if (!link_m || !link_n)
return;
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
 
clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
do_div(clock, link_n);
/* read out port_clock from the DPLL */
i9xx_crtc_clock_get(crtc, pipe_config);
 
pipe_config->adjusted_mode.clock = clock;
/*
* This value does not include pixel_multiplier.
* We will check that port_clock and adjusted_mode.crtc_clock
* agree once we know their relationship in the encoder's
* get_config() function.
*/
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
&pipe_config->fdi_m_n);
}
 
/** Returns the currently programmed mode of the given pipe. */
7448,6 → 8068,7
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
int vsync = I915_READ(VSYNC(cpu_transcoder));
enum pipe pipe = intel_crtc->pipe;
 
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
7460,11 → 8081,14
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
pipe_config.cpu_transcoder = (enum transcoder) pipe;
pipe_config.pixel_multiplier = 1;
pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
 
mode->clock = pipe_config.adjusted_mode.clock;
mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
7570,6 → 8194,9
 
intel_decrease_pllclock(crtc);
}
 
if (dev_priv->info->gen >= 6)
gen6_rps_idle(dev->dev_private);
}
 
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
7757,7 → 8384,7
intel_ring_emit(ring, 0); /* aux display base address, unused */
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
7799,7 → 8426,7
intel_ring_emit(ring, MI_NOOP);
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
7848,7 → 8475,7
intel_ring_emit(ring, pf | pipesrc);
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
7893,7 → 8520,7
intel_ring_emit(ring, pf | pipesrc);
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
7961,7 → 8588,8
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
7972,7 → 8600,7
intel_ring_emit(ring, (MI_NOOP));
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
8017,7 → 8645,7
fb->pitches[0] != crtc->fb->pitches[0]))
return -EINVAL;
 
work = kzalloc(sizeof *work, GFP_KERNEL);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
 
8100,28 → 8728,6
.load_lut = intel_crtc_load_lut,
};
 
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *crtc)
{
struct drm_device *dev;
struct drm_crtc *tmp;
int crtc_mask = 1;
 
WARN(!crtc, "checking null crtc?\n");
 
dev = crtc->dev;
 
list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
if (tmp == crtc)
break;
crtc_mask <<= 1;
}
 
if (encoder->possible_crtcs & crtc_mask)
return true;
return false;
}
 
/**
* intel_modeset_update_staged_output_state
*
8253,6 → 8859,17
return bpp;
}
 
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
"type: 0x%x flags: 0x%x\n",
mode->crtc_clock,
mode->crtc_hdisplay, mode->crtc_hsync_start,
mode->crtc_hsync_end, mode->crtc_htotal,
mode->crtc_vdisplay, mode->crtc_vsync_start,
mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
}
 
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
const char *context)
8269,10 → 8886,19
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
pipe_config->has_dp_encoder,
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
intel_dump_crtc_timings(&pipe_config->adjusted_mode);
DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
DRM_DEBUG_KMS("pipe src size: %dx%d\n",
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
8282,6 → 8908,7
pipe_config->pch_pfit.size,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
}
 
static bool check_encoder_cloning(struct drm_crtc *crtc)
8325,6 → 8952,7
 
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
 
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8351,6 → 8979,18
if (plane_bpp < 0)
goto fail;
 
/*
* Determine the real pipe dimensions. Note that stereo modes can
* increase the actual pipe size due to the frame doubling and
* insertion of additional space for blanks between the frame. This
* is stored in the crtc timings. We use the requested mode to do this
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
 
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
8357,7 → 8997,7
pipe_config->pixel_multiplier = 1;
 
/* Fill in default crtc timings, allow encoders to overwrite them. */
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
 
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
8378,7 → 9018,8
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
pipe_config->port_clock = pipe_config->adjusted_mode.clock;
pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
 
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret < 0) {
8565,14 → 9206,10
 
}
 
static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
struct intel_crtc_config *new)
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int clock1, clock2, diff;
int diff;
 
clock1 = cur->adjusted_mode.clock;
clock2 = new->adjusted_mode.clock;
 
if (clock1 == clock2)
return true;
 
8625,6 → 9262,15
return false; \
}
 
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
DRM_ERROR("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
return false; \
}
 
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
 
8638,6 → 9284,13
PIPE_CONF_CHECK_I(fdi_m_n.link_n);
PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
PIPE_CONF_CHECK_I(has_dp_encoder);
PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
PIPE_CONF_CHECK_I(dp_m_n.link_m);
PIPE_CONF_CHECK_I(dp_m_n.link_n);
PIPE_CONF_CHECK_I(dp_m_n.tu);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
8668,8 → 9321,8
DRM_MODE_FLAG_NVSYNC);
}
 
PIPE_CONF_CHECK_I(requested_mode.hdisplay);
PIPE_CONF_CHECK_I(requested_mode.vdisplay);
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
 
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
8682,8 → 9335,12
PIPE_CONF_CHECK_I(pch_pfit.size);
}
 
/* BDW+ don't expose a synchronous way to read the state */
if (IS_HASWELL(dev))
PIPE_CONF_CHECK_I(ips_enabled);
 
PIPE_CONF_CHECK_I(double_wide);
 
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8693,20 → 9350,17
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
 
if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
}
 
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
 
if (!IS_HASWELL(dev)) {
if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
current_config->adjusted_mode.clock,
pipe_config->adjusted_mode.clock);
return false;
}
}
 
return true;
}
 
8833,14 → 9487,10
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
if (encoder->get_config &&
encoder->get_hw_state(encoder, &pipe))
if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
 
if (dev_priv->display.get_clock)
dev_priv->display.get_clock(crtc, &pipe_config);
 
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
8915,6 → 9565,18
check_shared_dpll_state(dev);
}
 
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
int dotclock)
{
/*
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
pipe_config->adjusted_mode.crtc_clock, dotclock);
}
 
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
8921,21 → 9583,19
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode, *saved_hwmode;
struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
 
saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
saved_hwmode = saved_mode + 1;
 
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
 
*saved_hwmode = crtc->hwmode;
*saved_mode = crtc->mode;
 
/* Hack: Because we don't (yet) support global modeset on multiple
8955,6 → 9615,21
"[modeset]");
}
 
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
* here so we can get the modeset_pipe updated config for the new
* mode set on this crtc. For other crtcs we need to use the
* adjusted_mode bits in the crtc directly.
*/
if (IS_VALLEYVIEW(dev)) {
valleyview_modeset_global_pipes(dev, &prepare_pipes,
modeset_pipes, pipe_config);
 
/* may have added more to prepare_pipes than we should */
prepare_pipes &= ~disable_pipes;
}
 
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
8971,6 → 9646,14
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
 
/*
* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc,
&pipe_config->adjusted_mode);
}
 
/* Only after disabling all output pipelines that will be changed can we
8994,23 → 9677,10
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
dev_priv->display.crtc_enable(&intel_crtc->base);
 
if (modeset_pipes) {
/* Store real post-adjustment hardware mode. */
crtc->hwmode = pipe_config->adjusted_mode;
 
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc);
}
 
/* FIXME: add subpixel order */
done:
if (ret && crtc->enabled) {
crtc->hwmode = *saved_hwmode;
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
}
 
out:
kfree(pipe_config);
9232,7 → 9902,7
}
 
/* Make sure the new CRTC will work with the encoder */
if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
new_crtc)) {
return -EINVAL;
}
9247,17 → 9917,21
/* Check for any encoders that needs to be disabled. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
 
goto next_encoder;
num_connectors++;
}
}
 
if (num_connectors == 0)
encoder->new_crtc = NULL;
next_encoder:
else if (num_connectors > 1)
return -EINVAL;
 
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
9328,6 → 10002,16
 
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
* the first update, but we don't have a nice way of doing that
* (and really, set_config isn't used much for high freq page
* flipping, so increasing its cost here shouldn't be a big
* deal).
*/
if (i915_fastboot && ret == 0)
intel_modeset_check_state(set->crtc->dev);
}
 
if (ret) {
9388,7 → 10072,7
struct intel_shared_dpll *pll)
{
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
ibx_assert_pch_refclk_enabled(dev_priv);
 
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
 
9456,8 → 10140,6
dev_priv->num_shared_dpll = 0;
 
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
DRM_DEBUG_KMS("%i shared PLLs initialized\n",
dev_priv->num_shared_dpll);
}
 
static void intel_crtc_init(struct drm_device *dev, int pipe)
9466,7 → 10148,7
struct intel_crtc *intel_crtc;
int i;
 
intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
 
9479,10 → 10161,13
intel_crtc->lut_b[i] = i;
}
 
/* Swap pipes & planes for FBC on pre-965 */
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
* is hooked to plane B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
9495,6 → 10180,18
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
}
 
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
{
struct drm_encoder *encoder = connector->base.encoder;
 
WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
 
if (!encoder)
return INVALID_PIPE;
 
return to_intel_crtc(encoder->crtc)->pipe;
}
 
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
9510,7 → 10207,7
 
if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
return -EINVAL;
return -ENOENT;
}
 
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
9559,6 → 10256,28
return true;
}
 
const char *intel_output_name(int output)
{
static const char *names[] = {
[INTEL_OUTPUT_UNUSED] = "Unused",
[INTEL_OUTPUT_ANALOG] = "Analog",
[INTEL_OUTPUT_DVO] = "DVO",
[INTEL_OUTPUT_SDVO] = "SDVO",
[INTEL_OUTPUT_LVDS] = "LVDS",
[INTEL_OUTPUT_TVOUT] = "TV",
[INTEL_OUTPUT_HDMI] = "HDMI",
[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
[INTEL_OUTPUT_EDP] = "eDP",
[INTEL_OUTPUT_DSI] = "DSI",
[INTEL_OUTPUT_UNKNOWN] = "Unknown",
};
 
if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
return "Invalid";
 
return names[output];
}
 
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
9591,7 → 10310,7
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
dpd_is_edp = intel_dpd_is_edp(dev);
dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
 
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
9617,21 → 10336,21
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
 
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
PORT_C);
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
 
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
 
9696,9 → 10415,12
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int aligned_height, tile_height;
int pitch_limit;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
9787,8 → 10509,16
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
 
tile_height = IS_GEN2(dev) ? 16 : 8;
aligned_height = ALIGN(mode_cmd->height,
obj->tiling_mode ? tile_height : 1);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
 
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
intel_fb->obj->framebuffer_references++;
 
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
9799,10 → 10529,15
return 0;
}
 
#ifndef CONFIG_DRM_I915_FBDEV
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
#endif
 
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL /*intel_user_framebuffer_create*/,
.output_poll_changed = intel_fb_output_poll_changed,
.fb_create = NULL,
.output_poll_changed = intel_fbdev_output_poll_changed,
};
 
/* Set up chip specific display functions */
9828,7 → 10563,6
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
9836,7 → 10570,6
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_clock = vlv_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
9844,7 → 10577,6
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
9894,7 → 10626,7
dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
dev_priv->display.modeset_global_resources =
9902,6 → 10634,10
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
dev_priv->display.write_eld = ironlake_write_eld;
}
 
/* Default just returns -ENODEV to indicate unsupported */
9910,6 → 10646,7
 
 
 
intel_panel_init_backlight_funcs(dev);
}
 
/*
9946,17 → 10683,6
DRM_INFO("applying inverted panel brightness quirk\n");
}
 
/*
* Some machines (Dell XPS13) suffer broken backlight controls if
* BLM_PCH_PWM_ENABLE is set.
*/
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
}
 
struct intel_quirk {
int device;
int subsystem_vendor;
10002,8 → 10728,7
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
/* 830/845 need to leave pipe A & dpll A up */
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* 830 needs to leave pipe A & dpll A up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
/* Lenovo U160 cannot use SSC on LVDS */
10026,11 → 10751,6
 
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
 
static void intel_init_quirks(struct drm_device *dev)
10058,9 → 10778,9
u32 vga_reg = i915_vgacntrl_reg(dev);
 
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
out8(SR01, VGA_SR_INDEX);
sr1 = in8(VGA_SR_DATA);
out8(sr1 | 1<<5, VGA_SR_DATA);
outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
 
10070,18 → 10790,11
 
void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_init_power_well(dev);
 
intel_prepare_ddi(dev);
 
intel_init_clock_gating(dev);
 
/* Enable the CRI clock source so we can get at the display */
if (IS_VALLEYVIEW(dev))
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_INTEGRATED_CRI_CLK_VLV);
intel_reset_dpio(dev);
 
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
10143,6 → 10856,9
}
}
 
intel_init_dpio(dev);
intel_reset_dpio(dev);
 
intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
 
10346,11 → 11062,11
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (HAS_POWER_WELL(dev) &&
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
return;
 
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
}
10373,6 → 11089,7
&crtc->config);
 
crtc->base.enabled = crtc->active;
crtc->primary_enabled = crtc->active;
 
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
10406,7 → 11123,6
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
if (encoder->get_config)
encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
10413,22 → 11129,13
}
 
encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
encoder->base.crtc ? "enabled" : "disabled",
pipe);
pipe_name(pipe));
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (!crtc->active)
continue;
if (dev_priv->display.get_clock)
dev_priv->display.get_clock(crtc,
&crtc->config);
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
10453,7 → 11160,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
int i;
10500,7 → 11206,12
pll->on = false;
}
 
if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
 
if (force_restore) {
i915_redisable_vga(dev);
 
/*
* We need to use raw interfaces for restoring state to avoid
* checking (bogus) intermediate states.
10512,17 → 11223,11
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->fb);
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
intel_plane_restore(plane);
 
i915_redisable_vga(dev);
} else {
intel_modeset_update_staged_output_state(dev);
}
 
intel_modeset_check_state(dev);
 
drm_mode_config_reset(dev);
}
 
void intel_modeset_gem_init(struct drm_device *dev)
10532,6 → 11237,7
// intel_setup_overlay(dev);
 
mutex_lock(&dev->mode_config.mutex);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
mutex_unlock(&dev->mode_config.mutex);
}
10541,6 → 11247,7
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_connector *connector;
 
/*
* Interrupts and polling as the first thing to avoid creating havoc.
10553,11 → 11260,11
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
// drm_kms_helper_poll_fini(dev);
drm_kms_helper_poll_fini(dev);
 
mutex_lock(&dev->struct_mutex);
 
// intel_unregister_dsm_handler();
intel_unregister_dsm_handler();
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
10578,8 → 11285,11
/* flush any delayed tasks or pending work */
flush_scheduled_work();
 
/* destroy backlight, if any, before the connectors */
intel_panel_destroy_backlight(dev);
/* destroy the backlight and sysfs files before encoders/connectors */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
intel_panel_destroy_backlight(connector);
drm_sysfs_connector_remove(connector);
}
 
drm_mode_config_cleanup(dev);
#endif
10620,7 → 11330,6
}
 
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
 
struct intel_display_error_state {
 
10636,6 → 11345,7
} cursor[I915_MAX_PIPES];
 
struct intel_pipe_error_state {
bool power_domain_on;
u32 source;
} pipe[I915_MAX_PIPES];
 
10650,6 → 11360,7
} plane[I915_MAX_PIPES];
 
struct intel_transcoder_error_state {
bool power_domain_on;
enum transcoder cpu_transcoder;
 
u32 conf;
10679,14 → 11390,19
if (INTEL_INFO(dev)->num_pipes == 0)
return NULL;
 
error = kmalloc(sizeof(*error), GFP_ATOMIC);
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
 
if (HAS_POWER_WELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
for_each_pipe(i) {
error->pipe[i].power_domain_on =
intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
 
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
10720,6 → 11436,12
for (i = 0; i < error->num_transcoders; i++) {
enum transcoder cpu_transcoder = transcoders[i];
 
error->transcoder[i].power_domain_on =
intel_display_power_enabled_sw(dev,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
 
error->transcoder[i].cpu_transcoder = cpu_transcoder;
 
error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10731,12 → 11453,6
error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
 
/* In the code above we read the registers without checking if the power
* well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
* prevent the next I915_WRITE from detecting it and printing an error
* message. */
intel_uncore_clear_errors(dev);
 
return error;
}
 
10753,11 → 11469,13
return;
 
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " Power: %s\n",
error->pipe[i].power_domain_on ? "on" : "off");
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
 
err_printf(m, "Plane [%d]:\n", i);
10783,6 → 11501,8
for (i = 0; i < error->num_transcoders; i++) {
err_printf(m, " CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
error->transcoder[i].power_domain_on ? "on" : "off");
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
/drivers/video/drm/i915/intel_dp.c
38,6 → 38,32
 
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
 
struct dp_link_dpll {
int link_bw;
struct dpll dpll;
};
 
static const struct dp_link_dpll gen4_dpll[] = {
{ DP_LINK_BW_1_62,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
{ DP_LINK_BW_2_7,
{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
};
 
static const struct dp_link_dpll pch_dpll[] = {
{ DP_LINK_BW_1_62,
{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
{ DP_LINK_BW_2_7,
{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
};
 
static const struct dp_link_dpll vlv_dpll[] = {
{ DP_LINK_BW_1_62,
{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
{ DP_LINK_BW_2_7,
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
 
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
116,7 → 142,7
return (max_link_clock * max_lanes * 8) / 10;
}
 
static int
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
211,14 → 237,69
}
}
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *out);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *out);
 
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
enum pipe pipe;
 
/* modeset should have pipe */
if (crtc)
return to_intel_crtc(crtc)->pipe;
 
/* init time, try to find a pipe with this port selected */
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
return pipe;
if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
return pipe;
}
 
/* shrug */
return PIPE_A;
}
 
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (HAS_PCH_SPLIT(dev))
return PCH_PP_CONTROL;
else
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
}
 
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (HAS_PCH_SPLIT(dev))
return PCH_PP_STATUS;
else
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
 
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg;
 
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
return (I915_READ(pp_stat_reg) & PP_ON) != 0;
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
 
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
225,10 → 306,8
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_ctrl_reg;
 
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
 
static void
236,19 → 315,15
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
 
if (!is_edp(intel_dp))
return;
 
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
 
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
I915_READ(_pp_stat_reg(intel_dp)),
I915_READ(_pp_ctrl_reg(intel_dp)));
}
}
 
329,7 → 404,8
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
bool has_aux_irq = true;
uint32_t timeout;
 
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
344,6 → 420,11
else
precharge = 5;
 
if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 
intel_aux_display_runtime_get(dev_priv);
 
/* Try to wait for any previous AUX channel activity */
361,6 → 442,12
goto out;
}
 
/* Only 5 data registers! */
if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
 
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
373,7 → 460,7
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_400us |
timeout |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
451,10 → 538,11
int msg_bytes;
uint8_t ack;
 
if (WARN_ON(send_bytes > 16))
return -E2BIG;
 
intel_dp_check_edp(intel_dp);
if (send_bytes > 16)
return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
464,9 → 552,10
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
494,8 → 583,11
uint8_t ack;
int ret;
 
if (WARN_ON(recv_bytes > 19))
return -E2BIG;
 
intel_dp_check_edp(intel_dp);
msg[0] = AUX_NATIVE_READ << 4;
msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
510,12 → 602,12
return -EPROTO;
if (ret < 0)
return ret;
ack = reply[0];
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
ack = reply[0] >> 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
538,15 → 630,16
int reply_bytes;
int ret;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = AUX_I2C_READ << 4;
msg[0] = DP_AUX_I2C_READ << 4;
else
msg[0] = AUX_I2C_WRITE << 4;
msg[0] = DP_AUX_I2C_WRITE << 4;
 
if (!(mode & MODE_I2C_STOP))
msg[0] |= AUX_I2C_MOT << 4;
msg[0] |= DP_AUX_I2C_MOT << 4;
 
msg[1] = address >> 8;
msg[2] = address;
569,54 → 662,75
break;
}
 
for (retry = 0; retry < 5; retry++) {
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
* required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
*/
for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
goto out;
}
 
switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
case AUX_NATIVE_REPLY_ACK:
switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case AUX_NATIVE_REPLY_NACK:
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
udelay(500);
ret = -EREMOTEIO;
goto out;
case DP_AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
* could check the DPCD for I2C bit rate capabilities,
* and if available, adjust the interval. We could also
* be more careful with DP-to-Legacy adapters where a
* long legacy cable may force very low I2C bit rates.
*/
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
return -EREMOTEIO;
ret = -EREMOTEIO;
goto out;
}
 
switch (reply[0] & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
}
return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
ret = reply_bytes - 1;
goto out;
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
ret = -EREMOTEIO;
goto out;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
ret = -EREMOTEIO;
goto out;
}
}
 
DRM_ERROR("too many retries, giving up\n");
return -EREMOTEIO;
ret = -EREMOTEIO;
 
out:
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
 
static int
636,11 → 750,9
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 
ironlake_edp_panel_vdd_on(intel_dp);
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
 
649,43 → 761,32
struct intel_crtc_config *pipe_config, int link_bw)
{
struct drm_device *dev = encoder->base.dev;
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
 
if (IS_G4X(dev)) {
if (link_bw == DP_LINK_BW_1_62) {
pipe_config->dpll.p1 = 2;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.n = 2;
pipe_config->dpll.m1 = 23;
pipe_config->dpll.m2 = 8;
} else {
pipe_config->dpll.p1 = 1;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.n = 1;
pipe_config->dpll.m1 = 14;
pipe_config->dpll.m2 = 2;
}
pipe_config->clock_set = true;
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
if (link_bw == DP_LINK_BW_1_62) {
pipe_config->dpll.n = 1;
pipe_config->dpll.p1 = 2;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.m1 = 12;
pipe_config->dpll.m2 = 9;
} else {
pipe_config->dpll.n = 2;
pipe_config->dpll.p1 = 1;
pipe_config->dpll.p2 = 10;
pipe_config->dpll.m1 = 14;
pipe_config->dpll.m2 = 8;
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (IS_VALLEYVIEW(dev)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
 
if (divisor && count) {
for (i = 0; i < count; i++) {
if (link_bw == divisor[i].link_bw) {
pipe_config->dpll = divisor[i].dpll;
pipe_config->clock_set = true;
} else if (IS_VALLEYVIEW(dev)) {
/* FIXME: Need to figure out optimized DP clocks for vlv. */
break;
}
}
}
}
 
bool
intel_dp_compute_config(struct intel_encoder *encoder,
726,19 → 827,22
 
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], adjusted_mode->clock);
max_lane_count, bws[max_clock],
adjusted_mode->crtc_clock);
 
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
dev_priv->vbt.edp_bpp < bpp) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
bpp = dev_priv->vbt.edp_bpp;
}
 
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
 
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
783,7 → 887,8
mode_rate, link_avail);
 
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->clock, pipe_config->port_clock,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n);
 
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
791,21 → 896,6
return true;
}
 
void intel_dp_init_link_config(struct intel_dp *intel_dp)
{
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
}
}
 
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
878,8 → 968,6
intel_write_eld(&encoder->base, adjusted_mode);
}
 
intel_dp_init_link_config(intel_dp);
 
/* Split out the IBX/CPU vs CPT settings */
 
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
889,7 → 977,7
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
intel_dp->DP |= crtc->pipe << 29;
903,7 → 991,7
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
 
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
if (crtc->pipe == 1)
933,8 → 1021,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
 
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
mask, value,
946,6 → 1034,8
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
}
 
DRM_DEBUG_KMS("Wait complete\n");
}
 
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
976,11 → 1066,8
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 control;
u32 pp_ctrl_reg;
 
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
control = I915_READ(pp_ctrl_reg);
 
control = I915_READ(_pp_ctrl_reg(intel_dp));
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
return control;
995,7 → 1082,6
 
if (!is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP VDD on\n");
 
WARN(intel_dp->want_panel_vdd,
"eDP VDD already requested on\n");
1002,11 → 1088,13
 
intel_dp->want_panel_vdd = true;
 
if (ironlake_edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("eDP VDD already on\n");
if (ironlake_edp_have_panel_vdd(intel_dp))
return;
}
 
intel_runtime_pm_get(dev_priv);
 
DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
 
1013,8 → 1101,8
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
 
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
1039,11 → 1127,13
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("Turning eDP VDD off\n");
 
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
 
pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
1051,7 → 1141,11
/* Make sure sequencer is idle before allowing subsequent activity */
DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
msleep(intel_dp->panel_power_down_delay);
 
if ((pp & POWER_TARGET_ON) == 0)
msleep(intel_dp->panel_power_cycle_delay);
 
intel_runtime_pm_put(dev_priv);
}
}
 
1071,7 → 1165,6
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
 
intel_dp->want_panel_vdd = false;
1108,12 → 1201,13
 
ironlake_wait_panel_power_cycle(intel_dp);
 
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
 
pp |= POWER_TARGET_ON;
1120,8 → 1214,6
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
 
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
1129,8 → 1221,8
 
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
}
 
1146,20 → 1238,16
 
DRM_DEBUG_KMS("Turn eDP power off\n");
 
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
intel_dp->want_panel_vdd = false;
 
ironlake_wait_panel_off(intel_dp);
}
 
1168,7 → 1256,6
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
u32 pp_ctrl_reg;
 
1186,12 → 1273,12
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
 
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
intel_panel_enable_backlight(dev, pipe);
intel_panel_enable_backlight(intel_dp->attached_connector);
}
 
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1204,13 → 1291,13
if (!is_edp(intel_dp))
return;
 
intel_panel_disable_backlight(dev);
intel_panel_disable_backlight(intel_dp->attached_connector);
 
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
 
pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
1357,6 → 1444,7
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int dotclock;
 
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
tmp = I915_READ(intel_dp->output_reg);
1384,7 → 1472,11
 
pipe_config->adjusted_mode.flags |= flags;
 
if (dp_to_dig_port(intel_dp)->port == PORT_A) {
pipe_config->has_dp_encoder = true;
 
intel_dp_get_m_n(crtc, pipe_config);
 
if (port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
pipe_config->port_clock = 162000;
else
1391,6 → 1483,14
pipe_config->port_clock = 270000;
}
 
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
 
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
 
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
1412,10 → 1512,11
}
}
 
static bool is_edp_psr(struct intel_dp *intel_dp)
static bool is_edp_psr(struct drm_device *dev)
{
return is_edp(intel_dp) &&
intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
struct drm_i915_private *dev_priv = dev->dev_private;
 
return dev_priv->psr.sink_support;
}
 
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1422,10 → 1523,10
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_HASWELL(dev))
if (!HAS_PSR(dev))
return false;
 
return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
 
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1475,7 → 1576,7
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
intel_dp->psr_setup_done = true;
1500,9 → 1601,9
DP_PSR_MAIN_LINK_ACTIVE);
 
/* Setup AUX registers */
I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
I915_WRITE(EDP_PSR_AUX_CTL,
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1516,6 → 1617,7
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
val |= EDP_PSR_LINK_STANDBY;
1525,8 → 1627,8
} else
val |= EDP_PSR_LINK_DISABLE;
 
I915_WRITE(EDP_PSR_CTL, val |
EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
I915_WRITE(EDP_PSR_CTL(dev), val |
IS_BROADWELL(dev) ? 0 : link_entry_time |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
1542,9 → 1644,10
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
if (!IS_HASWELL(dev)) {
dev_priv->psr.source_ok = false;
 
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
dev_priv->no_psr_reason = PSR_NO_SOURCE;
return false;
}
 
1551,19 → 1654,11
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
(dig_port->port != PORT_A)) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
return false;
}
 
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
dev_priv->no_psr_reason = PSR_NO_SINK;
return false;
}
 
if (!i915_enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
dev_priv->no_psr_reason = PSR_MODULE_PARAM;
return false;
}
 
1570,14 → 1665,12
crtc = dig_port->base.base.crtc;
if (crtc == NULL) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
 
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
if (!intel_crtc_active(crtc)) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
return false;
}
 
1585,13 → 1678,11
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
dev_priv->no_psr_reason = PSR_NOT_TILED;
return false;
}
 
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
return false;
}
 
1598,16 → 1689,15
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
dev_priv->no_psr_reason = PSR_S3D_ENABLED;
return false;
}
 
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
return false;
}
 
dev_priv->psr.source_ok = true;
return true;
}
 
1646,10 → 1736,11
if (!intel_edp_is_psr_enabled(dev))
return;
 
I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
}
1663,7 → 1754,7
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
 
if (!is_edp_psr(intel_dp))
if (!is_edp_psr(dev))
return;
 
if (!intel_edp_psr_match_conditions(intel_dp))
1682,9 → 1773,8
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
 
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1722,14 → 1812,24
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
 
static void g4x_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
intel_enable_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
}
 
static void vlv_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
ironlake_edp_backlight_on(intel_dp);
}
 
static void intel_pre_enable_dp(struct intel_encoder *encoder)
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1745,13 → 1845,14
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
struct edp_power_seq power_seq;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
1758,33 → 1859,38
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 
mutex_unlock(&dev_priv->dpio_lock);
 
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
 
intel_enable_dp(encoder);
 
vlv_wait_port_ready(dev_priv, port);
vlv_wait_port_ready(dev_priv, dport);
}
 
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
if (!IS_VALLEYVIEW(dev))
return;
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1791,9 → 1897,9
DPIO_PCS_CLK_SOFT_RESET);
 
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
}
 
1835,18 → 1941,6
DP_LINK_STATUS_SIZE);
}
 
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
"0dB", "3.5dB", "6dB", "9.5dB"
};
static char *link_train_names[] = {
"pattern 1", "pattern 2", "idle", "off"
};
#endif
 
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1858,7 → 1952,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
1874,9 → 1968,20
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (HAS_DDI(dev)) {
if (IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_HASWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
1928,10 → 2033,13
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc =
to_intel_crtc(dport->base.base.crtc);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
int port = vlv_dport_to_channel(dport);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
2007,14 → 2115,14
}
 
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
uniqtranscale_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
 
return 0;
2021,7 → 2129,8
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
2182,6 → 2291,41
}
}
 
static uint32_t
intel_bdw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
 
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
 
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
 
case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
 
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2192,7 → 2336,10
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
 
if (HAS_DDI(dev)) {
if (IS_BROADWELL(dev)) {
signal_levels = intel_bdw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_HASWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_VALLEYVIEW(dev)) {
2216,7 → 2363,7
 
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint32_t *DP,
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2223,7 → 2370,8
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
 
if (HAS_DDI(dev)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
2252,62 → 2400,93
I915_WRITE(DP_TP_CTL(port), temp);
 
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
*DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
 
} else {
dp_reg_value &= ~DP_LINK_TRAIN_MASK;
*DP &= ~DP_LINK_TRAIN_MASK;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF;
*DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1;
*DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
}
 
I915_WRITE(intel_dp->output_reg, dp_reg_value);
I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
 
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
buf[0] = dp_train_pat;
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
/* don't write DP_TRAINING_LANEx_SET on disable */
len = 1;
} else {
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
len = intel_dp->lane_count + 1;
}
 
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
DP_TRAINING_PATTERN_DISABLE) {
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
buf, len);
 
return ret == len;
}
 
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
uint8_t dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
 
static bool
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
intel_get_adjust_train(intel_dp, link_status);
intel_dp_set_signal_levels(intel_dp, DP);
 
I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
 
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
intel_dp->train_set,
intel_dp->lane_count);
if (ret != intel_dp->lane_count)
return false;
}
 
return true;
return ret == intel_dp->lane_count;
}
 
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2351,33 → 2530,38
uint8_t voltage;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
uint8_t link_config[2];
 
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
 
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
DP_LINK_CONFIGURATION_SIZE);
link_config[0] = intel_dp->link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
 
DP |= DP_PORT_EN;
 
memset(intel_dp->train_set, 0, 4);
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
 
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
intel_dp_set_signal_levels(intel_dp, &DP);
 
/* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
 
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
2396,10 → 2580,12
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
DRM_ERROR("too many full retries, give up\n");
break;
}
memset(intel_dp->train_set, 0, 4);
intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
2408,7 → 2594,7
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
DRM_DEBUG_KMS("too many voltage retries, give up\n");
DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
2415,9 → 2601,12
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
/* Update training set as requested by target */
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
DRM_ERROR("failed to update link training\n");
break;
}
}
 
intel_dp->DP = DP;
}
2430,6 → 2619,13
uint32_t DP = intel_dp->DP;
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
}
 
tries = 0;
cr_tries = 0;
channel_eq = false;
2438,25 → 2634,21
 
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
intel_dp_link_down(intel_dp);
break;
}
 
intel_dp_set_signal_levels(intel_dp, &DP);
 
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE))
break;
 
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status))
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
2470,13 → 2662,19
if (tries > 5) {
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
 
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
/* Update training set as requested by target */
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
DRM_ERROR("failed to update link training\n");
break;
}
++tries;
}
 
2491,7 → 2689,7
 
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
intel_dp_set_link_train(intel_dp, intel_dp->DP,
intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_DISABLE);
}
 
2578,6 → 2776,10
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2593,11 → 2795,16
 
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) {
intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
if (is_edp_psr(intel_dp))
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
}
}
 
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
2679,13 → 2886,11
 
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
 
/* Now read the DPCD to see if it's actually running */
if (!intel_dp_get_dpcd(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
 
2717,7 → 2922,6
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
uint8_t *dpcd = intel_dp->dpcd;
bool hpd;
uint8_t type;
 
if (!intel_dp_get_dpcd(intel_dp))
2728,8 → 2932,8
return connector_status_connected;
 
/* If we're HPD-aware, SINK_COUNT changes dynamically */
hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
if (hpd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
2743,9 → 2947,18
return connector_status_connected;
 
/* Well we tried, say unknown for unreliable port types */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
if (type == DP_DS_PORT_TYPE_VGA ||
type == DP_DS_PORT_TYPE_NON_EDID)
return connector_status_unknown;
} else {
type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_TYPE_MASK;
if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
type == DP_DWN_STRM_PORT_TYPE_OTHER)
return connector_status_unknown;
}
 
/* Anything else is out of spec, warn and ignore */
DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2792,19 → 3005,35
return status;
}
 
if (IS_VALLEYVIEW(dev)) {
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS;
bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS;
bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS;
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
break;
default:
return connector_status_unknown;
}
} else {
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
return connector_status_unknown;
}
}
 
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
2819,19 → 3048,11
 
/* use cached edid if we have one */
if (intel_connector->edid) {
struct edid *edid;
int size;
 
/* invalid edid */
if (IS_ERR(intel_connector->edid))
return NULL;
 
size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
if (!edid)
return NULL;
 
return edid;
return drm_edid_duplicate(intel_connector->edid);
}
 
return drm_get_edid(connector, adapter);
2862,9 → 3083,12
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
struct edid *edid = NULL;
 
intel_runtime_pm_get(dev_priv);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
2876,7 → 3100,7
status = g4x_dp_detect(intel_dp);
 
if (status != connector_status_connected)
return status;
goto out;
 
intel_dp_probe_oui(intel_dp);
 
2892,7 → 3116,11
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
status = connector_status_connected;
 
out:
intel_runtime_pm_put(dev_priv);
return status;
}
 
static int intel_dp_get_modes(struct drm_connector *connector)
3039,7 → 3267,6
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
 
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
3107,12 → 3334,20
}
 
/* check the VBT to see whether the eDP is on DP-D port */
bool intel_dpd_is_edp(struct drm_device *dev)
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct child_device_config *p_child;
union child_device_config *p_child;
int i;
static const short port_mapping[] = {
[PORT_B] = PORT_IDPB,
[PORT_C] = PORT_IDPC,
[PORT_D] = PORT_IDPD,
};
 
if (port == PORT_A)
return true;
 
if (!dev_priv->vbt.child_dev_num)
return false;
 
3119,8 → 3354,9
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
 
if (p_child->dvo_port == PORT_IDPD &&
p_child->device_type == DEVICE_TYPE_eDP)
if (p_child->common.dvo_port == port_mapping[port] &&
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
}
return false;
3153,24 → 3389,26
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
u32 pp_on, pp_off, pp_div, pp;
int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
if (HAS_PCH_SPLIT(dev)) {
pp_control_reg = PCH_PP_CONTROL;
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
} else {
pp_control_reg = PIPEA_PP_CONTROL;
pp_on_reg = PIPEA_PP_ON_DELAYS;
pp_off_reg = PIPEA_PP_OFF_DELAYS;
pp_div_reg = PIPEA_PP_DIVISOR;
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
 
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp = ironlake_get_pp_control(intel_dp);
I915_WRITE(pp_control_reg, pp);
I915_WRITE(pp_ctrl_reg, pp);
 
pp_on = I915_READ(pp_on_reg);
pp_off = I915_READ(pp_off_reg);
3258,9 → 3496,11
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
} else {
pp_on_reg = PIPEA_PP_ON_DELAYS;
pp_off_reg = PIPEA_PP_OFF_DELAYS;
pp_div_reg = PIPEA_PP_DIVISOR;
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 
pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
 
/* And finally store the new values in the power sequencer. */
3277,12 → 3517,15
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev)) {
port_sel = I915_READ(pp_on_reg) & 0xc0000000;
if (dp_to_dig_port(intel_dp)->port == PORT_B)
port_sel = PANEL_PORT_SELECT_DPB_VLV;
else
port_sel = PANEL_PORT_SELECT_DPC_VLV;
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (dp_to_dig_port(intel_dp)->port == PORT_A)
port_sel = PANEL_POWER_PORT_DP_A;
port_sel = PANEL_PORT_SELECT_DPA;
else
port_sel = PANEL_POWER_PORT_DP_D;
port_sel = PANEL_PORT_SELECT_DPD;
}
 
pp_on |= port_sel;
3335,7 → 3578,6
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
 
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
3367,8 → 3609,6
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
ironlake_edp_panel_vdd_off(intel_dp, false);
 
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
 
3392,26 → 3632,10
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
 
if (intel_dp_is_edp(dev, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
/*
* FIXME : We need to initialize built-in panels before external panels.
* For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
*/
switch (port) {
case PORT_A:
type = DRM_MODE_CONNECTOR_eDP;
break;
case PORT_C:
if (IS_VALLEYVIEW(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
case PORT_D:
if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
break;
default: /* silence GCC warning */
break;
}
 
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3525,11 → 3749,11
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
3548,12 → 3772,12
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
} else {
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
}
 
intel_dig_port->port = port;
/drivers/video/drm/i915/intel_drv.h
70,8 → 70,8
#define wait_for_atomic_us(COND, US) _wait_for((COND), \
DIV_ROUND_UP((US), 1000), 0)
 
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
 
/*
* Display related stuff
82,7 → 82,6
/* the i915, i945 have a single sDVO i2c bus - which is different */
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
#define INTELFB_CONN_LIMIT 4
 
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
98,7 → 97,8
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
 
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
105,6 → 105,9
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
 
#define INTEL_DSI_COMMAND_MODE 0
#define INTEL_DSI_VIDEO_MODE 1
 
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
157,7 → 160,19
 
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *downclock_mode;
int fitting_mode;
 
/* backlight */
struct {
bool present;
u32 level;
u32 max;
bool enabled;
bool combination_mode; /* gen 2/4 only */
bool active_low_pwm;
struct backlight_device *device;
} backlight;
};
 
struct intel_connector {
212,8 → 227,21
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
 
/* User requested mode, only valid as a starting point to
* compute adjusted_mode, except in the case of (S)DVO where
* it's also for the output timings of the (S)DVO chip.
* adjusted_mode will then correspond to the S(DVO) chip's
* preferred input timings. */
struct drm_display_mode requested_mode;
/* Actual pipe timings ie. what we program into the pipe timing
* registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
struct drm_display_mode adjusted_mode;
 
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
int pipe_src_w, pipe_src_h;
 
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
267,7 → 295,8
 
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode.
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
int port_clock;
 
293,8 → 322,16
struct intel_link_m_n fdi_m_n;
 
bool ips_enabled;
 
bool double_wide;
};
 
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
};
 
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
306,8 → 343,9
* some outputs connected to this crtc.
*/
bool active;
unsigned long enabled_power_domains;
bool eld_vld;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
335,6 → 373,12
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
 
/* per-pipe watermark state */
struct {
/* watermarks currently being used */
struct intel_pipe_wm active;
} wm;
};
 
struct intel_plane_wm_parameters {
416,19 → 460,17
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len);
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
 
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
 
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
465,9 → 507,9
{
switch (dport->port) {
case PORT_B:
return 0;
return DPIO_CH0;
case PORT_C:
return 1;
return DPIO_CH1;
default:
BUG();
}
500,80 → 542,6
bool enable_stall_check;
};
 
int intel_pch_rawclk(struct drm_device *dev);
 
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int hdmi_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
extern void intel_mark_idle(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
extern bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
 
/* intel_panel.c */
extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
 
extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
extern void intel_panel_set_backlight(struct drm_device *dev,
u32 level, u32 max);
extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
582,18 → 550,14
bool mode_changed;
};
 
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
extern void intel_plane_restore(struct drm_plane *plane);
extern void intel_plane_disable(struct drm_plane *plane);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
int dpms_mode;
};
 
 
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
static inline struct intel_encoder *
intel_attached_encoder(struct drm_connector *connector)
{
return to_intel_connector(connector)->encoder;
}
621,73 → 585,99
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
 
 
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void hsw_pc8_disable_interrupts(struct drm_device *dev);
void hsw_pc8_restore_interrupts(struct drm_device *dev);
 
 
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
 
 
/* intel_ddi.c */
void intel_prepare_ddi(struct drm_device *dev);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
void intel_ddi_pll_init(struct drm_device *dev);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
void intel_ddi_pll_enable(struct intel_crtc *crtc);
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
 
 
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_modeset_check_state(struct drm_device *dev);
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
 
extern void intel_connector_attach_encoder(struct intel_connector *connector,
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
 
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
 
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
int dpms_mode;
};
extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
 
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
 
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
 
extern int intel_framebuffer_init(struct drm_device *dev,
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
 
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
 
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 
void intel_framebuffer_fini(struct intel_framebuffer *fb);
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
701,104 → 691,209
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_suspend_hw(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
 
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
void intel_display_handle_reset(struct drm_device *dev);
void hsw_enable_pc8_work(struct work_struct *__work);
void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
int dotclock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_device *dev, bool enable);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
 
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_on(struct intel_dp *intel_dp);
void ironlake_edp_panel_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
 
 
/* intel_dsi.c */
bool intel_dsi_init(struct drm_device *dev);
 
 
/* intel_dvo.c */
void intel_dvo_init(struct drm_device *dev);
 
 
/* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_I915_FBDEV
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
extern void intel_fbdev_restore_mode(struct drm_device *dev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
{
return 0;
}
 
static inline void intel_fbdev_initial_config(struct drm_device *dev)
{
}
 
static inline void intel_fbdev_fini(struct drm_device *dev)
{
}
 
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
{
}
 
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
#endif
 
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
 
 
/* intel_lvds.c */
void intel_lvds_init(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
 
 
/* intel_modes.c */
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
 
/* intel_overlay.c */
void intel_setup_overlay(struct drm_device *dev);
void intel_cleanup_overlay(struct drm_device *dev);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
/* Power-related functions, located in intel_pm.c */
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
 
/* Power well */
extern int i915_init_power_well(struct drm_device *dev);
extern void i915_remove_power_well(struct drm_device *dev);
/* intel_panel.c */
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
void intel_panel_init_backlight_funcs(struct drm_device *dev);
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
 
extern bool intel_display_power_enabled(struct drm_device *dev,
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
int intel_power_domains_init(struct drm_device *dev);
void intel_power_domains_remove(struct drm_device *dev);
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void ironlake_teardown_rc6(struct drm_device *dev);
bool intel_display_power_enabled_sw(struct drm_device *dev,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_device *dev,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_device *dev,
enum intel_display_power_domain domain);
void intel_power_domains_init_hw(struct drm_device *dev);
void intel_set_power_well(struct drm_device *dev, bool enable);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
 
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
extern void intel_ddi_pll_init(struct drm_device *dev);
extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
extern void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
 
extern void intel_display_handle_reset(struct drm_device *dev);
extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable);
extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 
extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
extern void intel_edp_psr_update(struct drm_device *dev);
extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down);
extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
uint32_t mask);
extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
uint32_t mask);
extern void hsw_enable_pc8_work(struct work_struct *__work);
extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
 
/* intel_sprite.c */
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
void intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
 
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_dsi.c
0,0 → 1,637
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <linux/slab.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
 
/* the sub-encoders aka panel drivers */
static const struct intel_dsi_device intel_dsi_devices[] = {
};
 
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
 
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
udelay(150);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dsi, base);
}
 
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
}
 
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
}
 
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
}
 
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
struct drm_display_mode *mode = &config->requested_mode;
 
DRM_DEBUG_KMS("\n");
 
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
if (intel_dsi->dev.dev_ops->mode_fixup)
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
mode, adjusted_mode);
 
return true;
}
 
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
 
vlv_enable_dsi_pll(encoder);
}
 
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
u32 val;
 
DRM_DEBUG_KMS("\n");
 
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
usleep_range(2000, 2500);
}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
DRM_DEBUG_KMS("\n");
 
if (intel_dsi->dev.dev_ops->panel_reset)
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
 
/* put device in ready state */
intel_dsi_device_ready(encoder);
 
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
}
 
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
u32 temp;
 
DRM_DEBUG_KMS("\n");
 
if (is_cmd_mode(intel_dsi))
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON);
msleep(100);
 
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
}
 
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
}
 
static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
u32 temp;
 
DRM_DEBUG_KMS("\n");
 
if (is_vid_mode(intel_dsi)) {
dpi_send_cmd(intel_dsi, SHUTDOWN);
msleep(10);
 
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
 
msleep(2);
}
 
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
}
 
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
u32 val;
 
DRM_DEBUG_KMS("\n");
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
usleep_range(2000, 2500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
 
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
 
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
 
vlv_disable_dsi_pll(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
DRM_DEBUG_KMS("\n");
 
intel_dsi_clear_device_ready(encoder);
 
if (intel_dsi->dev.dev_ops->disable_panel_power)
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
}
 
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 port, func;
enum pipe p;
 
DRM_DEBUG_KMS("\n");
 
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
func = I915_READ(MIPI_DSI_FUNC_PRG(p));
 
if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
*pipe = p;
return true;
}
}
}
 
return false;
}
 
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
DRM_DEBUG_KMS("\n");
 
/* XXX: read flags, set to adjusted_mode */
}
 
static enum drm_mode_status
intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
 
DRM_DEBUG_KMS("\n");
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
return MODE_NO_DBLESCAN;
}
 
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
 
return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
}
 
/* return txclkesc cycles in terms of divider and duration in us */
static u16 txclkesc(u32 divider, unsigned int us)
{
switch (divider) {
case ESCAPE_CLOCK_DIVIDER_1:
default:
return 20 * us;
case ESCAPE_CLOCK_DIVIDER_2:
return 10 * us;
case ESCAPE_CLOCK_DIVIDER_4:
return 5 * us;
}
}
 
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
{
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
}
 
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pipe = intel_crtc->pipe;
unsigned int bpp = intel_crtc->config.pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
 
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
 
hactive = mode->hdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
 
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
 
/* horizontal values are in terms of high speed byte clock */
hactive = txbyteclkhs(hactive, bpp, lane_count);
hfp = txbyteclkhs(hfp, bpp, lane_count);
hsync = txbyteclkhs(hsync, bpp, lane_count);
hbp = txbyteclkhs(hbp, bpp, lane_count);
 
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
 
/* meaningful for video mode non-burst sync pulse mode only, can be zero
* for non-burst sync events and burst modes */
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
 
/* vertical values are in terms of lines */
I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
}
 
static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
int pipe = intel_crtc->pipe;
unsigned int bpp = intel_crtc->config.pipe_bpp;
u32 val, tmp;
 
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
/* XXX: Location of the call */
band_gap_reset(dev_priv);
 
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(0));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
 
/* read request priority is per pipe */
tmp = I915_READ(MIPI_CTRL(pipe));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
 
/* XXX: why here, why like this? handling in irq handler?! */
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
 
I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
 
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
 
set_dsi_timings(encoder, adjusted_mode);
 
val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
if (is_cmd_mode(intel_dsi)) {
val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
} else {
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
 
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
 
/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
* stop state. */
 
/*
* In burst mode, value greater than one DPI line Time in byte clock
* (txbyteclkhs) To timeout this timer 1+ of the above said value is
* recommended.
*
* In non-burst mode, Value greater than one DPI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
*
* In DBI only mode, value greater than one DBI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
*/
 
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->htotal, bpp,
intel_dsi->lane_count) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->vtotal *
adjusted_mode->htotal,
bpp, intel_dsi->lane_count) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
 
/* dphy stuff */
 
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
 
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
 
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
intel_dsi->hs_to_lp_count);
 
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
* and txclkesc. txclkesc time / txbyteclk time * (105 +
* MIPI_STOP_STATE_STALL) / 105.???
*/
I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
 
/* the bw essential for transmitting 16 long packets containing 252
* bytes meant for dcs write memory command is programmed in this
* register in terms of byte clocks. based on dsi transfer rate and the
* number of lanes configured the time taken to transmit 16 long packets
* in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
 
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
 
if (is_vid_mode(intel_dsi))
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
intel_dsi->video_frmt_cfg_bits |
intel_dsi->video_mode_format);
}
 
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
DRM_DEBUG_KMS("\n");
return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
}
 
static int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *mode;
 
DRM_DEBUG_KMS("\n");
 
if (!intel_connector->panel.fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
return 0;
}
 
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (!mode) {
DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
return 0;
}
 
drm_mode_probed_add(connector, mode);
return 1;
}
 
static void intel_dsi_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
 
DRM_DEBUG_KMS("\n");
intel_panel_fini(&intel_connector->panel);
drm_connector_cleanup(connector);
kfree(connector);
}
 
static const struct drm_encoder_funcs intel_dsi_funcs = {
.destroy = intel_encoder_destroy,
};
 
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
.mode_valid = intel_dsi_mode_valid,
.best_encoder = intel_best_encoder,
};
 
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dsi_detect,
.destroy = intel_dsi_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
};
 
bool intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *fixed_mode = NULL;
const struct intel_dsi_device *dsi;
unsigned int i;
 
DRM_DEBUG_KMS("\n");
 
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
return false;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
return false;
}
 
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
 
connector = &intel_connector->base;
 
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
 
/* XXX: very likely not all of these are needed */
intel_encoder->hot_plug = intel_dsi_hot_plug;
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable;
intel_encoder->mode_set = intel_dsi_mode_set;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
 
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
intel_dsi->dev = *dsi;
 
if (dsi->dev_ops->init(&intel_dsi->dev))
break;
}
 
if (i == ARRAY_SIZE(intel_dsi_devices)) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
 
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */
 
intel_encoder->cloneable = false;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
 
drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
 
connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
 
drm_sysfs_connector_add(connector);
 
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
goto err;
}
 
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode);
 
return true;
 
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
 
return false;
}
/drivers/video/drm/i915/intel_dsi.h
0,0 → 1,123
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#ifndef _INTEL_DSI_H
#define _INTEL_DSI_H
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
 
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
int type;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
 
struct intel_dsi_dev_ops {
bool (*init)(struct intel_dsi_device *dsi);
 
void (*panel_reset)(struct intel_dsi_device *dsi);
 
void (*disable_panel_power)(struct intel_dsi_device *dsi);
 
/* one time programmable commands if needed */
void (*send_otp_cmds)(struct intel_dsi_device *dsi);
 
/* This callback must be able to assume DSI commands can be sent */
void (*enable)(struct intel_dsi_device *dsi);
 
/* This callback must be able to assume DSI commands can be sent */
void (*disable)(struct intel_dsi_device *dsi);
 
int (*mode_valid)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode);
 
bool (*mode_fixup)(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
void (*mode_set)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
 
bool (*get_hw_state)(struct intel_dsi_device *dev);
 
struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
 
void (*destroy) (struct intel_dsi_device *dsi);
};
 
struct intel_dsi {
struct intel_encoder base;
 
struct intel_dsi_device dev;
 
struct intel_connector *attached_connector;
 
/* if true, use HS mode, otherwise LP */
bool hs;
 
/* virtual channel */
int channel;
 
/* number of DSI lanes */
unsigned int lane_count;
 
/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
u32 pixel_format;
 
/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
u32 video_mode_format;
 
/* eot for MIPI_EOT_DISABLE register */
u32 eot_disable;
 
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
u32 video_frmt_cfg_bits;
u16 lp_byte_clk;
 
/* timeouts in byte clocks */
u16 lp_rx_timeout;
u16 turn_arnd_val;
u16 rst_timer_val;
u16 hs_to_lp_count;
u16 clk_lp_to_hs_count;
u16 clk_hs_to_lp_count;
};
 
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
}
 
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
 
#endif /* _INTEL_DSI_H */
/drivers/video/drm/i915/intel_dsi_cmd.c
0,0 → 1,427
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
 
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <linux/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
 
/*
* XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
* MIPI_COMMAND_ADDRESS registers.
*
* Apparently these registers provide a MIPI adapter level way to send (lots of)
* commands and data to the receiver, without having to write the commands and
* data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
*
* Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
* MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
* framebuffer in command mode displays) these are just an optimization that can
* come later.
*
* For memory writes, these should probably be used for performance.
*/
 
static void print_stat(struct intel_dsi *intel_dsi)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
val = I915_READ(MIPI_INTR_STAT(pipe));
 
#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
"\n", pipe, val,
STAT_BIT(val, TEARING_EFFECT),
STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
STAT_BIT(val, GEN_READ_DATA_AVAIL),
STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
STAT_BIT(val, RX_PROT_VIOLATION),
STAT_BIT(val, RX_INVALID_TX_LENGTH),
STAT_BIT(val, ACK_WITH_NO_ERROR),
STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
STAT_BIT(val, LP_RX_TIMEOUT),
STAT_BIT(val, HS_TX_TIMEOUT),
STAT_BIT(val, DPI_FIFO_UNDERRUN),
STAT_BIT(val, LOW_CONTENTION),
STAT_BIT(val, HIGH_CONTENTION),
STAT_BIT(val, TXDSI_VC_ID_INVALID),
STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
STAT_BIT(val, TXCHECKSUM_ERROR),
STAT_BIT(val, TXECC_MULTIBIT_ERROR),
STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
STAT_BIT(val, TXFALSE_CONTROL_ERROR),
STAT_BIT(val, RXDSI_VC_ID_INVALID),
STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
STAT_BIT(val, RXCHECKSUM_ERROR),
STAT_BIT(val, RXECC_MULTIBIT_ERROR),
STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
STAT_BIT(val, RXFALSE_CONTROL_ERROR),
STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
STAT_BIT(val, RXEOT_SYNC_ERROR),
STAT_BIT(val, RXSOT_SYNC_ERROR),
STAT_BIT(val, RXSOT_ERROR));
#undef STAT_BIT
}
 
enum dsi_type {
DSI_DCS,
DSI_GENERIC,
};
 
/* enable or disable command mode hs transmissions */
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 temp;
u32 mask = DBI_FIFO_EMPTY;
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
 
temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
temp &= DBI_HS_LP_MODE_MASK;
I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
 
intel_dsi->hs = enable;
}
 
static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
u8 data_type, u16 data)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 ctrl_reg;
u32 ctrl;
u32 mask;
 
DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
channel, data_type, data);
 
if (intel_dsi->hs) {
ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
mask = HS_CTRL_FIFO_FULL;
} else {
ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
mask = LP_CTRL_FIFO_FULL;
}
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
print_stat(intel_dsi);
}
 
/*
* Note: This function is also used for long packets, with length passed
* as data, since SHORT_PACKET_PARAM_SHIFT ==
* LONG_PACKET_WORD_COUNT_SHIFT.
*/
ctrl = data << SHORT_PACKET_PARAM_SHIFT |
channel << VIRTUAL_CHANNEL_SHIFT |
data_type << DATA_TYPE_SHIFT;
 
I915_WRITE(ctrl_reg, ctrl);
 
return 0;
}
 
static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
u8 data_type, const u8 *data, int len)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 data_reg;
int i, j, n;
u32 mask;
 
DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
channel, data_type, len);
 
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
mask = HS_DATA_FIFO_FULL;
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
mask = LP_DATA_FIFO_FULL;
}
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
 
for (i = 0; i < len; i += n) {
u32 val = 0;
n = min_t(int, len - i, 4);
 
for (j = 0; j < n; j++)
val |= *data++ << 8 * j;
 
I915_WRITE(data_reg, val);
/* XXX: check for data fifo full, once that is set, write 4
* dwords, then wait for not set, then continue. */
}
 
return dsi_vc_send_short(intel_dsi, channel, data_type, len);
}
 
static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
int channel, const u8 *data, int len,
enum dsi_type type)
{
int ret;
 
if (len == 0) {
BUG_ON(type == DSI_GENERIC);
ret = dsi_vc_send_short(intel_dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
0);
} else if (len == 1) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0]);
} else if (len == 2) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
(data[1] << 8) | data[0]);
} else {
ret = dsi_vc_send_long(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len);
}
 
return ret;
}
 
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
}
 
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
}
 
static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
{
return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
dcs_cmd);
}
 
static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 *reqdata,
int reqlen)
{
u16 data;
u8 data_type;
 
switch (reqlen) {
case 0:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
data = 0;
break;
case 1:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
data = reqdata[0];
break;
case 2:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
data = (reqdata[1] << 8) | reqdata[0];
break;
default:
BUG();
}
 
return dsi_vc_send_short(intel_dsi, channel, data_type, data);
}
 
static int dsi_read_data_return(struct intel_dsi *intel_dsi,
u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
int i, len = 0;
u32 data_reg, val;
 
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
}
 
while (len < buflen) {
val = I915_READ(data_reg);
for (i = 0; i < 4 && len < buflen; i++, len++)
buf[len] = val >> 8 * i;
}
 
return len;
}
 
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
 
/*
* XXX: should issue multiple read requests and reads if request is
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
 
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
 
ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
if (ret)
return ret;
 
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
 
ret = dsi_read_data_return(intel_dsi, buf, buflen);
if (ret < 0)
return ret;
 
if (ret != buflen)
return -EIO;
 
return 0;
}
 
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
 
/*
* XXX: should issue multiple read requests and reads if request is
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
 
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
 
ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
reqlen);
if (ret)
return ret;
 
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
 
ret = dsi_read_data_return(intel_dsi, buf, buflen);
if (ret < 0)
return ret;
 
if (ret != buflen)
return -EIO;
 
return 0;
}
 
/*
* send a video mode command
*
* XXX: commands with data in MIPI_DPI_DATA?
*/
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
 
/* XXX: pipe, hs */
if (intel_dsi->hs)
cmd &= ~DPI_LP_MODE;
else
cmd |= DPI_LP_MODE;
 
/* DPI virtual channel?! */
 
mask = DPI_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
 
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
 
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
 
I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
 
mask = SPL_PKT_SENT_INTERRUPT;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
 
return 0;
}
/drivers/video/drm/i915/intel_dsi_cmd.h
0,0 → 1,109
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
 
#ifndef _INTEL_DSI_DSI_H
#define _INTEL_DSI_DSI_H
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <linux/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
 
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
 
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
 
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
 
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen);
 
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
 
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
 
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
{
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
}
 
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, u8 param)
{
u8 buf[2] = { dcs_cmd, param };
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
}
 
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
int channel)
{
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
}
 
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
int channel, u8 param)
{
return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
}
 
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2)
{
u8 buf[2] = { param1, param2 };
return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
}
 
/* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
int channel, u8 *buf, int buflen)
{
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
}
 
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf,
int buflen)
{
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
}
 
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2,
u8 *buf, int buflen)
{
u8 req[2] = { param1, param2 };
 
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
}
 
 
#endif /* _INTEL_DSI_DSI_H */
/drivers/video/drm/i915/intel_dsi_pll.c
0,0 → 1,300
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Shobhit Kumar <shobhit.kumar@intel.com>
* Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
*/
 
#include <linux/kernel.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_dsi.h"
 
#define DSI_HSS_PACKET_SIZE 4
#define DSI_HSE_PACKET_SIZE 4
#define DSI_HSA_PACKET_EXTRA_SIZE 6
#define DSI_HBP_PACKET_EXTRA_SIZE 6
#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
 
struct dsi_mnp {
u32 dsi_pll_ctrl;
u32 dsi_pll_div;
};
 
static const u32 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
71, 35 /* 91 - 92 */
};
 
#ifdef DSI_CLK_FROM_RR
 
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
int pixel_format, int video_mode_format,
int lane_count, bool eotp)
{
u32 bpp;
u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
u32 bytes_per_line, bytes_per_frame;
u32 num_frames;
u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
u32 dsi_bit_clock_hz;
u32 dsi_clk;
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
 
hactive = mode->hdisplay;
vactive = mode->vdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
 
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
 
hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
 
bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
 
/*
* XXX: Need to accurately calculate LP to HS transition timeout and add
* it to bytes_per_line/bytes_per_frame.
*/
 
if (eotp && video_mode_format == VIDEO_MODE_BURST)
bytes_per_line += DSI_EOTP_PACKET_SIZE;
 
bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
vactive * bytes_per_line + vfp * bytes_per_line;
 
if (eotp &&
(video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
bytes_per_frame += DSI_EOTP_PACKET_SIZE;
 
num_frames = drm_mode_vrefresh(mode);
bytes_per_x_frames = num_frames * bytes_per_frame;
 
bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
 
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
dsi_clk = dsi_bit_clock_hz / 1000;
 
if (eotp && video_mode_format == VIDEO_MODE_BURST)
dsi_clk *= 2;
 
return dsi_clk;
}
 
#else
 
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
u32 bpp;
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
 
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
 
return dsi_clk_khz;
}
 
#endif
 
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
{
u32 m, n, p;
u32 ref_clk;
u32 error;
u32 tmp_error;
int target_dsi_clk;
int calc_dsi_clk;
u32 calc_m;
u32 calc_p;
u32 m_seed;
 
/* dsi_clk is expected in KHZ */
if (dsi_clk < 300000 || dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG;
}
 
ref_clk = 25000;
target_dsi_clk = dsi_clk;
error = 0xFFFFFFFF;
tmp_error = 0xFFFFFFFF;
calc_m = 0;
calc_p = 0;
 
for (m = 62; m <= 92; m++) {
for (p = 2; p <= 6; p++) {
/* Find the optimal m and p divisors
with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
if (calc_dsi_clk == target_dsi_clk) {
calc_m = m;
calc_p = p;
error = 0;
break;
} else
tmp_error = abs(target_dsi_clk - calc_dsi_clk);
 
if (tmp_error < error) {
error = tmp_error;
calc_m = m;
calc_p = p;
}
}
 
if (error == 0)
break;
}
 
m_seed = lfsr_converts[calc_m - 62];
n = 1;
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
 
return 0;
}
 
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
 
dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
intel_dsi->lane_count);
 
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
return;
}
 
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
 
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
 
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
}
 
void vlv_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
 
vlv_configure_dsi_pll(encoder);
 
/* wait at least 0.5 us after ungating before enabling VCO */
usleep_range(1, 10);
 
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
DRM_ERROR("DSI PLL lock failed\n");
return;
}
 
DRM_DEBUG_KMS("DSI PLL locked\n");
}
 
void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
 
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
 
mutex_unlock(&dev_priv->dpio_lock);
}
/drivers/video/drm/i915/intel_dvo.c
153,6 → 153,8
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
 
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
 
static void intel_disable_dvo(struct intel_encoder *encoder)
232,7 → 234,8
intel_modeset_check_state(connector->dev);
}
 
static int intel_dvo_mode_valid(struct drm_connector *connector,
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
279,11 → 282,6
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
 
if (intel_dvo->dev.dev_ops->mode_fixup)
return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
&pipe_config->requested_mode,
adjusted_mode);
 
return true;
}
 
378,7 → 376,6
 
static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
459,11 → 456,11
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
 
intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dvo);
return;
/drivers/video/drm/i915/intel_fbdev.c
0,0 → 1,292
/*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* David Airlie
*/
 
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
//#include <linux/mm.h>
//#include <linux/tty.h>
#include <linux/sysrq.h>
//#include <linux/delay.h>
#include <linux/fb.h>
//#include <linux/init.h>
//#include <linux/vga_switcheroo.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
 
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
int fb_info_size = sizeof(struct fb_info);
struct fb_info *info;
char *p;
 
if (size)
fb_info_size += PADDING;
 
p = kzalloc(fb_info_size + size, GFP_KERNEL);
 
if (!p)
return NULL;
 
info = (struct fb_info *) p;
 
if (size)
info->par = p + fb_info_size;
 
return info;
#undef PADDING
#undef BYTES_PER_LONG
}
 
 
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
// .fb_fillrect = cfb_fillrect,
// .fb_copyarea = cfb_copyarea,
// .fb_imageblit = cfb_imageblit,
// .fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
// .fb_setcmap = drm_fb_helper_setcmap,
// .fb_debug_enter = drm_fb_helper_debug_enter,
// .fb_debug_leave = drm_fb_helper_debug_leave,
};
 
static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size, ret;
 
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
sizes->surface_bpp = 32;
 
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
 
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
8), 512);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
 
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = main_fb_obj;
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
obj->has_global_gtt_mapping = 0;
obj->stride = mode_cmd.pitches[0];
 
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
}
 
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
goto out_unpin;
 
return 0;
 
out_unpin:
i915_gem_object_unpin(obj);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
return ret;
}
 
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = &ifbdev->ifb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
int size, ret;
 
mutex_lock(&dev->struct_mutex);
 
if (!intel_fb->obj) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
goto out_unlock;
} else {
DRM_DEBUG_KMS("re-using BIOS fb\n");
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
}
 
obj = intel_fb->obj;
size = obj->base.size;
 
info = framebuffer_alloc(0, &dev->pdev->dev);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
 
info->par = helper;
 
fb = &ifbdev->ifb.base;
 
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
 
strcpy(info->fix.id, "inteldrmfb");
 
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
 
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
 
info->screen_base = (void*) 0xFE000000;
info->screen_size = size;
 
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
 
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
i915_gem_obj_ggtt_offset(obj), obj);
 
mutex_unlock(&dev->struct_mutex);
return 0;
 
out_unpin:
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
/** Sets the color ramps on behalf of RandR */
static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
 
static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
 
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intelfb_create,
};
 
 
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return -ENOMEM;
 
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
 
ret = drm_fb_helper_init(dev, &ifbdev->helper,
INTEL_INFO(dev)->num_pipes,
4);
if (ret) {
kfree(ifbdev);
return ret;
}
 
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
 
return 0;
}
 
void intel_fbdev_initial_config(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
}
 
 
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
/drivers/video/drm/i915/intel_hdmi.c
130,9 → 130,9
 
static void g4x_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
const void *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
167,9 → 167,9
 
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
const void *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
205,9 → 205,9
 
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
const void *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
246,9 → 246,9
 
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
const void *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
284,9 → 284,9
 
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const uint8_t *frame, ssize_t len)
const void *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
713,6 → 713,7
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp, flags = 0;
int dotclock;
 
tmp = I915_READ(intel_hdmi->hdmi_reg);
 
727,6 → 728,16
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
 
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
else
dotclock = pipe_config->port_clock;
 
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
 
static void intel_enable_hdmi(struct intel_encoder *encoder)
836,13 → 847,14
 
if (IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev))
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
else
return 225000;
}
 
static int intel_hdmi_mode_valid(struct drm_connector *connector,
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
862,7 → 874,7
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
 
904,7 → 916,7
pipe_config->pipe_bpp = desired_bpp;
}
 
if (adjusted_mode->clock > portclock_limit) {
if (adjusted_mode->crtc_clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
1063,7 → 1075,7
return 0;
}
 
static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
1070,7 → 1082,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
int port = vlv_dport_to_channel(dport);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
 
1079,7 → 1091,7
 
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
1086,42 → 1098,37
else
val &= ~(1<<21);
val |= 0x001000c4;
vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
 
/* HDMI 1.0V-2dB */
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
0x2b245f5f);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
0x5578b83a);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
0x0c782040);
vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
0x2b247878);
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
 
/* Program lane clock */
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
 
intel_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, port);
vlv_wait_port_ready(dev_priv, dport);
}
 
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int port = vlv_dport_to_channel(dport);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
if (!IS_VALLEYVIEW(dev))
return;
1128,10 → 1135,10
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1138,33 → 1145,33
DPIO_PCS_CLK_SOFT_RESET);
 
/* Fix up inter-pair skew failure */
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
 
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int port = vlv_dport_to_channel(dport);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
1211,6 → 1218,7
 
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
 
switch (port) {
case PORT_B:
1275,11 → 1283,11
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
1296,10 → 1304,10
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = intel_hdmi_post_disable;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->enable = intel_enable_hdmi;
}
/drivers/video/drm/i915/intel_i2c.c
34,6 → 34,11
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
enum disp_clk {
CDCLK,
CZCLK
};
 
struct gmbus_port {
const char *name;
int reg;
58,10 → 63,60
return container_of(i2c, struct intel_gmbus, adapter);
}
 
static int get_disp_clk_div(struct drm_i915_private *dev_priv,
enum disp_clk clk)
{
u32 reg_val;
int clk_ratio;
 
reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
 
if (clk == CDCLK)
clk_ratio =
((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
else
clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
 
return clk_ratio;
}
 
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
{
int vco, gmbus_freq = 0, cdclk_div;
 
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
vco = valleyview_get_vco(dev_priv);
 
/* Get the CDCLK divide ratio */
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
 
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
if (cdclk_div)
gmbus_freq = (vco << 1) / cdclk_div;
 
if (WARN_ON(gmbus_freq == 0))
return;
 
I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
}
 
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* In BIOS-less system, program the correct gmbus frequency
* before reading edid.
*/
if (IS_VALLEYVIEW(dev))
gmbus_set_freq(dev_priv);
 
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
/drivers/video/drm/i915/intel_lvds.c
92,6 → 92,7
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
int dotclock;
 
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
116,6 → 117,13
 
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
 
dotclock = pipe_config->port_clock;
 
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
198,7 → 206,8
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
 
217,7 → 226,7
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
 
intel_panel_enable_backlight(dev, intel_crtc->pipe);
intel_panel_enable_backlight(intel_connector);
}
 
static void intel_disable_lvds(struct intel_encoder *encoder)
224,6 → 233,8
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
 
235,7 → 246,7
stat_reg = PP_STATUS;
}
 
intel_panel_disable_backlight(dev);
intel_panel_disable_backlight(intel_connector);
 
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
245,7 → 256,8
POSTING_READ(lvds_encoder->reg);
}
 
static int intel_lvds_mode_valid(struct drm_connector *connector,
static enum drm_mode_status
intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
436,9 → 448,19
if (dev_priv->modeset_restore == MODESET_DONE)
goto exit;
 
/*
* Some old platform's BIOS love to wreak havoc while the lid is closed.
* We try to detect this here and undo any damage. The split for PCH
* platforms is rather conservative and a bit arbitrary expect that on
* those platforms VGA disabling requires actual legacy VGA I/O access,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
if (!HAS_PCH_SPLIT(dev)) {
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
}
 
dev_priv->modeset_restore = MODESET_DONE;
 
466,7 → 488,6
 
intel_panel_fini(&lvds_connector->base.panel);
 
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
734,58 → 755,7
{ } /* terminating entry */
};
 
/**
* intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
* @connector: LVDS connector
*
* Find the reduced downclock for LVDS in EDID.
*/
static void intel_find_lvds_downclock(struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *scan;
int temp_downclock;
 
temp_downclock = fixed_mode->clock;
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
* that the reduced downclock is found for the LVDS. In such
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
if (scan->hdisplay == fixed_mode->hdisplay &&
scan->hsync_start == fixed_mode->hsync_start &&
scan->hsync_end == fixed_mode->hsync_end &&
scan->htotal == fixed_mode->htotal &&
scan->vdisplay == fixed_mode->vdisplay &&
scan->vsync_start == fixed_mode->vsync_start &&
scan->vsync_end == fixed_mode->vsync_end &&
scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
* expect to find the lower downclock.
*/
temp_downclock = scan->clock;
}
}
}
if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
"Normal clock %dKhz, downclock %dKhz\n",
fixed_mode->clock, temp_downclock);
}
}
 
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
* If it is present, return 1.
802,7 → 772,8
return true;
 
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
struct child_device_config *child = dev_priv->vbt.child_dev + i;
union child_device_config *uchild = dev_priv->vbt.child_dev + i;
struct old_child_dev_config *child = &uchild->old;
 
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
956,11 → 927,11
}
}
 
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
 
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return;
1061,8 → 1032,22
 
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
intel_find_lvds_downclock(dev, fixed_mode,
connector);
intel_connector->panel.downclock_mode =
intel_find_panel_downclock(dev,
fixed_mode, connector);
if (intel_connector->panel.downclock_mode !=
NULL && i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = true;
dev_priv->lvds_downclock =
intel_connector->panel.
downclock_mode->clock;
DRM_DEBUG_KMS("LVDS downclock is found"
" in EDID. Normal clock %dKhz, "
"downclock %dKhz\n",
fixed_mode->clock,
dev_priv->lvds_downclock);
}
goto out;
}
}
/drivers/video/drm/i915/intel_opregion.c
40,6 → 40,9
 
#define PCI_ASLE 0xe4
#define PCI_ASLS 0xfc
#define PCI_SWSCI 0xe8
#define PCI_SWSCI_SCISEL (1 << 15)
#define PCI_SWSCI_GSSCIE (1 << 0)
 
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
63,7 → 66,7
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
} __attribute__((packed));
} __packed;
 
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
85,7 → 88,7
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
} __attribute__((packed));
} __packed;
 
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
93,7 → 96,7
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
} __attribute__((packed));
} __packed;
 
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
109,26 → 112,39
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
u8 rsvd[102];
} __attribute__((packed));
u32 cddv; /* color correction default values */
u32 pcft; /* power conservation features */
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
u8 rsvd[86];
} __packed;
 
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
 
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
#define ASLE_SET_PFIT (1 << 2)
#define ASLE_SET_PWM_FREQ (1 << 3)
#define ASLE_REQ_MSK 0xf
/* ASLE Interrupt Command (ASLC) bits */
#define ASLC_SET_ALS_ILLUM (1 << 0)
#define ASLC_SET_BACKLIGHT (1 << 1)
#define ASLC_SET_PFIT (1 << 2)
#define ASLC_SET_PWM_FREQ (1 << 3)
#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
#define ASLC_BUTTON_ARRAY (1 << 5)
#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
#define ASLC_DOCKING_INDICATOR (1 << 7)
#define ASLC_ISCT_STATE_CHANGE (1 << 8)
#define ASLC_REQ_MSK 0x1ff
/* response bits */
#define ASLC_ALS_ILLUM_FAILED (1 << 10)
#define ASLC_BACKLIGHT_FAILED (1 << 12)
#define ASLC_PFIT_FAILED (1 << 14)
#define ASLC_PWM_FREQ_FAILED (1 << 16)
#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
#define ASLC_CONVERTIBLE_FAILED (1 << 22)
#define ASLC_DOCKING_FAILED (1 << 24)
#define ASLC_ISCT_STATE_FAILED (1 << 26)
 
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
 
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
#define ASLE_TCHE_BLC_EN (1 << 1)
153,6 → 169,60
 
#define ASLE_CBLV_VALID (1<<31)
 
/* IUER */
#define ASLE_IUER_DOCKING (1 << 7)
#define ASLE_IUER_CONVERTIBLE (1 << 6)
#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
#define ASLE_IUER_WINDOWS_BTN (1 << 1)
#define ASLE_IUER_POWER_BTN (1 << 0)
 
/* Software System Control Interrupt (SWSCI) */
#define SWSCI_SCIC_INDICATOR (1 << 0)
#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
 
#define SWSCI_FUNCTION_CODE(main, sub) \
((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
(sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
 
/* SWSCI: Get BIOS Data (GBDA) */
#define SWSCI_GBDA 4
#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
 
/* SWSCI: System BIOS Callbacks (SBCB) */
#define SWSCI_SBCB 6
#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
 
#define ACPI_OTHER_OUTPUT (0<<8)
#define ACPI_VGA_OUTPUT (1<<8)
#define ACPI_TV_OUTPUT (2<<8)
160,8 → 230,615
#define ACPI_LVDS_OUTPUT (4<<8)
 
#ifdef CONFIG_ACPI
#endif
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
u32 main_function, sub_function, scic;
u16 pci_swsci;
u32 dslp;
 
if (!swsci)
return -ENODEV;
 
main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
SWSCI_SCIC_SUB_FUNCTION_SHIFT;
 
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
if ((dev_priv->opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
if ((dev_priv->opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
 
/* Driver sleep timeout in ms. */
dslp = ioread32(&swsci->dslp);
if (!dslp) {
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
} else if (dslp > 500) {
/* Hey bios, trust must be earned. */
WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
dslp = 500;
}
 
/* The spec tells us to do this, but we are the only user... */
scic = ioread32(&swsci->scic);
if (scic & SWSCI_SCIC_INDICATOR) {
DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
return -EBUSY;
}
 
scic = function | SWSCI_SCIC_INDICATOR;
 
iowrite32(parm, &swsci->parm);
iowrite32(scic, &swsci->scic);
 
/* Ensure SCI event is selected and event trigger is cleared. */
pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
pci_swsci |= PCI_SWSCI_SCISEL;
pci_swsci &= ~PCI_SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
}
 
/* Use event trigger to tell bios to check the mail. */
pci_swsci |= PCI_SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
 
/* Poll for the result. */
#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
DRM_DEBUG_DRIVER("SWSCI request timed out\n");
return -ETIMEDOUT;
}
 
scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
SWSCI_SCIC_EXIT_STATUS_SHIFT;
 
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
return -EIO;
}
 
if (parm_out)
*parm_out = ioread32(&swsci->parm);
 
return 0;
 
#undef C
}
 
#define DISPLAY_TYPE_CRT 0
#define DISPLAY_TYPE_TV 1
#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
 
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 parm = 0;
u32 type = 0;
u32 port;
 
/* don't care about old stuff for now */
if (!HAS_DDI(dev))
return 0;
 
port = intel_ddi_get_encoder_port(intel_encoder);
if (port == PORT_E) {
port = 0;
} else {
parm |= 1 << port;
port++;
}
 
if (!enable)
parm |= 4 << 8;
 
switch (intel_encoder->type) {
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
WARN_ONCE(1, "unsupported intel_encoder type %d\n",
intel_encoder->type);
return -EINVAL;
}
 
parm |= type << (16 + port * 3);
 
return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
 
static const struct {
pci_power_t pci_power_state;
u32 parm;
} power_state_map[] = {
{ PCI_D0, 0x00 },
{ PCI_D1, 0x01 },
{ PCI_D2, 0x02 },
{ PCI_D3hot, 0x04 },
{ PCI_D3cold, 0x04 },
};
 
int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
int i;
 
if (!HAS_DDI(dev))
return 0;
 
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
 
return -EINVAL;
}
 
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
 
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
 
mutex_lock(&dev->mode_config.mutex);
 
/*
* Update backlight on all connectors that support backlight (usually
* only one).
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
intel_panel_set_backlight(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
mutex_unlock(&dev->mode_config.mutex);
 
 
return 0;
}
 
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
DRM_DEBUG_DRIVER("Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
 
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
 
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
DRM_DEBUG_DRIVER("Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
 
static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
{
DRM_DEBUG_DRIVER("SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
 
static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
{
if (!iuer)
DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
 
return ASLC_BUTTON_ARRAY_FAILED;
}
 
static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
else
DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
 
return ASLC_CONVERTIBLE_FAILED;
}
 
static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
else
DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
 
return ASLC_DOCKING_FAILED;
}
 
static u32 asle_isct_state(struct drm_device *dev)
{
DRM_DEBUG_DRIVER("ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
 
static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, opregion);
struct drm_device *dev = dev_priv->dev;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
 
if (!asle)
return;
 
aslc_req = ioread32(&asle->aslc);
 
if (!(aslc_req & ASLC_REQ_MSK)) {
DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
aslc_req);
return;
}
 
if (aslc_req & ASLC_SET_ALS_ILLUM)
aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
 
if (aslc_req & ASLC_SET_BACKLIGHT)
aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
 
if (aslc_req & ASLC_SET_PFIT)
aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
 
if (aslc_req & ASLC_SET_PWM_FREQ)
aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
 
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
aslc_stat |= asle_set_supported_rotation_angles(dev,
ioread32(&asle->srot));
 
if (aslc_req & ASLC_BUTTON_ARRAY)
aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
 
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
 
if (aslc_req & ASLC_DOCKING_INDICATOR)
aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
 
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
aslc_stat |= asle_isct_state(dev);
 
iowrite32(aslc_stat, &asle->aslc);
}
 
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
 
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
 
static struct intel_opregion *system_opregion;
 
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
*/
 
struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
 
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
 
if (!system_opregion)
return NOTIFY_DONE;
 
acpi = system_opregion->acpi;
 
if (event->type == 0x80 &&
(ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
 
iowrite32(0, &acpi->csts);
 
return ret;
}
 
static struct notifier_block intel_opregion_notifier = {
.notifier_call = intel_opregion_video_event,
};
 
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
* the firmware. Values are defined by section B.4.2 of the ACPI specification
* (version 3)
*/
 
static void intel_didl_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
u32 temp;
int i = 0;
 
handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
 
if (acpi_is_video_device(handle))
acpi_video_bus = acpi_dev;
else {
list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
if (acpi_is_video_device(acpi_cdev->handle)) {
acpi_video_bus = acpi_cdev;
break;
}
}
}
 
if (!acpi_video_bus) {
pr_warn("No ACPI video bus found\n");
return;
}
 
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
dev_dbg(&dev->pdev->dev,
"More than 8 outputs detected via ACPI\n");
return;
}
status =
acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
iowrite32((u32)(device_id & 0x0f0f),
&opregion->acpi->didl[i]);
i++;
}
}
 
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
iowrite32(0, &opregion->acpi->didl[i]);
return;
 
blind_set:
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
dev_dbg(&dev->pdev->dev,
"More than 8 outputs in connector list\n");
return;
}
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
output_type = ACPI_VGA_OUTPUT;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_9PinDIN:
output_type = ACPI_TV_OUTPUT;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
output_type = ACPI_DIGITAL_OUTPUT;
break;
case DRM_MODE_CONNECTOR_LVDS:
output_type = ACPI_LVDS_OUTPUT;
break;
}
temp = ioread32(&opregion->acpi->didl[i]);
iowrite32(temp | (1<<31) | output_type | i,
&opregion->acpi->didl[i]);
i++;
}
goto end;
}
 
static void intel_setup_cadls(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
int i = 0;
u32 disp_id;
 
/* Initialize the CADL field by duplicating the DIDL values.
* Technically, this is not always correct as display outputs may exist,
* but not active. This initialization is necessary for some Clevo
* laptops that check this field before processing the brightness and
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
* there are less than eight devices. */
do {
disp_id = ioread32(&opregion->acpi->didl[i]);
iowrite32(disp_id, &opregion->acpi->cadl[i]);
} while (++i < 8 && disp_id != 0);
}
 
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
 
if (!opregion->header)
return;
 
if (opregion->acpi) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
intel_setup_cadls(dev);
}
 
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
iowrite32(0, &opregion->acpi->csts);
iowrite32(1, &opregion->acpi->drdy);
 
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
}
 
if (opregion->asle) {
iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
}
}
 
void intel_opregion_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
 
if (!opregion->header)
return;
 
if (opregion->asle)
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
 
cancel_work_sync(&dev_priv->opregion.asle_work);
 
if (opregion->acpi) {
iowrite32(0, &opregion->acpi->drdy);
 
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
}
 
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->vbt = NULL;
opregion->lid_state = NULL;
}
 
static void swsci_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
bool requested_callbacks = false;
u32 tmp;
 
/* Sub-function code 0 is okay, let's allow them. */
opregion->swsci_gbda_sub_functions = 1;
opregion->swsci_sbcb_sub_functions = 1;
 
/* We use GBDA to ask for supported GBDA calls. */
if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
}
 
/*
* We also use GBDA to ask for _requested_ SBCB callbacks. The driver
* must not call interfaces that are not specifically requested by the
* bios.
*/
if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
}
 
/*
* But we use SBCB to ask for _supported_ SBCB calls. This does not mean
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
tmp = (high << 4) | (low << 1) | 1;
 
/* best guess what to do with supported wrt requested */
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
/* XXX: for now, trust the requested callbacks */
/* opregion->swsci_sbcb_sub_functions &= tmp; */
} else {
opregion->swsci_sbcb_sub_functions |= tmp;
}
}
 
DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
}
#else /* CONFIG_ACPI */
static inline void swsci_setup(struct drm_device *dev) {}
#endif /* CONFIG_ACPI */
 
int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
203,6 → 880,7
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev);
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
/drivers/video/drm/i915/intel_panel.c
50,23 → 50,22
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_display_mode *mode, *adjusted_mode;
struct drm_display_mode *adjusted_mode;
int x, y, width, height;
 
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
 
x = y = width = height = 0;
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
goto done;
 
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
width = mode->hdisplay;
height = mode->vdisplay;
width = pipe_config->pipe_src_w;
height = pipe_config->pipe_src_h;
x = (adjusted_mode->hdisplay - width + 1)/2;
y = (adjusted_mode->vdisplay - height + 1)/2;
break;
74,10 → 73,12
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
{
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
u32 scaled_width = adjusted_mode->hdisplay
* pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w
* adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
width = scaled_height / pipe_config->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
84,7 → 85,7
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / mode->hdisplay;
height = scaled_width / pipe_config->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
171,54 → 172,37
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
 
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
u32 *pfit_control)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *mode, *adjusted_mode;
 
mode = &pipe_config->requested_mode;
adjusted_mode = &pipe_config->adjusted_mode;
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
 
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, mode->hdisplay);
centre_vertically(adjusted_mode, mode->vdisplay);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4) {
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
adjusted_mode->vdisplay;
 
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
pfit_control |= PFIT_ENABLE |
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
pfit_control |= PFIT_ENABLE |
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != mode->hdisplay)
pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
 
static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
mode->vdisplay;
u32 scaled_height = mode->hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
adjusted_mode->vdisplay;
u32 bits;
 
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
227,14 → 211,16
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
scaled_height /
mode->vdisplay);
pipe_config->pipe_src_h);
 
border = LVDS_BORDER_ENABLE;
if (mode->vdisplay != adjusted_mode->vdisplay) {
u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
*border = LVDS_BORDER_ENABLE;
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
bits = panel_fitter_scaling(pipe_config->pipe_src_h,
adjusted_mode->vdisplay);
 
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
*pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
241,33 → 227,68
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
scaled_width /
mode->hdisplay);
pipe_config->pipe_src_w);
 
border = LVDS_BORDER_ENABLE;
if (mode->hdisplay != adjusted_mode->hdisplay) {
u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
*border = LVDS_BORDER_ENABLE;
if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
bits = panel_fitter_scaling(pipe_config->pipe_src_w,
adjusted_mode->hdisplay);
 
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
pfit_control |= (PFIT_ENABLE |
*pfit_control |= (PFIT_ENABLE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
} else {
/* Aspects match, Let hw scale both directions */
pfit_control |= (PFIT_ENABLE |
*pfit_control |= (PFIT_ENABLE |
VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
}
}
 
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode;
 
adjusted_mode = &pipe_config->adjusted_mode;
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
goto out;
 
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
if (INTEL_INFO(dev)->gen >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
&pfit_pgm_ratios, &border);
break;
case DRM_MODE_SCALE_FULLSCREEN:
/*
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (mode->vdisplay != adjusted_mode->vdisplay ||
mode->hdisplay != adjusted_mode->hdisplay) {
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
304,192 → 325,263
pipe_config->gmch_pfit.lvds_border_bits = border;
}
 
static int is_backlight_combination_mode(struct drm_device *dev)
static int i915_panel_invert_brightness;
MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
 
if (INTEL_INFO(dev)->gen >= 4)
return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
WARN_ON(panel->backlight.max == 0);
 
if (IS_GEN2(dev))
return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
if (i915_panel_invert_brightness < 0)
return val;
 
return 0;
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val;
}
 
/* XXX: query mode clock or hardware clock and program max PWM appropriately
* when it's 0.
*/
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
return val;
}
 
static u32 bdw_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
// WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
 
/* Restore the CTL value if it lost, e.g. GPU reset */
static u32 pch_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
val = dev_priv->regfile.saveBLC_PWM_CTL2;
I915_WRITE(BLC_PWM_PCH_CTL2, val);
return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
} else {
val = I915_READ(BLC_PWM_CTL);
if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
dev_priv->regfile.saveBLC_PWM_CTL = val;
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 =
I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
val = dev_priv->regfile.saveBLC_PWM_CTL;
I915_WRITE(BLC_PWM_CTL, val);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2,
dev_priv->regfile.saveBLC_PWM_CTL2);
}
}
 
return val;
}
 
static u32 intel_panel_get_max_backlight(struct drm_device *dev)
static u32 i9xx_get_backlight(struct intel_connector *connector)
{
u32 max;
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 val;
 
max = i915_read_blc_pwm_ctl(dev);
 
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
else
max >>= 16;
val >>= 1;
 
if (is_backlight_combination_mode(dev))
max *= 0xff;
if (panel->backlight.combination_mode) {
u8 lbpc;
 
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
 
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
 
return max;
return val;
}
 
static int i915_panel_invert_brightness;
MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (i915_panel_invert_brightness < 0)
return val;
 
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
u32 max = intel_panel_get_max_backlight(dev);
if (max)
return max - val;
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
 
return val;
static u32 vlv_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
enum pipe pipe = intel_get_pipe_from_connector(connector);
 
return _vlv_get_backlight(dev, pipe);
}
 
static u32 intel_panel_get_backlight(struct drm_device *dev)
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
val = dev_priv->display.get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
 
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
}
 
val = intel_panel_compute_brightness(dev, val);
 
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
 
static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
static void bdw_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
}
 
static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
static void pch_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, level);
tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
}
 
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
 
if (is_backlight_combination_mode(dev)) {
u32 max = intel_panel_get_max_backlight(dev);
WARN_ON(panel->backlight.max == 0);
 
if (panel->backlight.combination_mode) {
u8 lbpc;
 
/* we're screwed, but keep behaviour backwards compatible */
if (!max)
max = 1;
 
lbpc = level * 0xfe / max + 1;
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
}
 
tmp = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen < 4)
if (IS_GEN4(dev)) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
 
tmp = I915_READ(BLC_PWM_CTL) & ~mask;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
 
static void vlv_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
 
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
 
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
 
level = intel_panel_compute_brightness(connector, level);
dev_priv->display.set_backlight(connector, level);
}
 
/* set backlight brightness to level in range [0..max] */
void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
u32 max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 freq;
unsigned long flags;
 
dev_priv->backlight.level = level;
// if (dev_priv->backlight.device)
// dev_priv->backlight.device->props.brightness = level;
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
 
// if (dev_priv->backlight.enabled)
// intel_panel_actually_set_backlight(dev, level);
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
WARN_ON(panel->backlight.max == 0);
 
/* scale to hardware max, but be careful to not overflow */
freq = panel->backlight.max;
if (freq < max)
level = level * freq / max;
else
level = freq / max * level;
 
panel->backlight.level = level;
// if (panel->backlight.device)
// panel->backlight.device->props.brightness = level;
 
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, level);
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
 
void intel_panel_disable_backlight(struct drm_device *dev)
static void pch_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
intel_panel_actually_set_backlight(connector, 0);
 
tmp = I915_READ(BLC_PWM_CPU_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
 
tmp = I915_READ(BLC_PWM_PCH_CTL1);
I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
 
static void i9xx_disable_backlight(struct intel_connector *connector)
{
intel_panel_actually_set_backlight(connector, 0);
}
 
static void i965_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
intel_panel_actually_set_backlight(connector, 0);
 
tmp = I915_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
 
static void vlv_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
 
intel_panel_actually_set_backlight(connector, 0);
 
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
 
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
 
/*
* Do not disable backlight on the vgaswitcheroo path. When switching
* away from i915, the other client may depend on i915 to handle the
501,102 → 593,217
return;
}
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
dev_priv->backlight.enabled = false;
intel_panel_actually_set_backlight(dev, 0);
panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector);
 
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
 
reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
static void bdw_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
 
I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
 
if (HAS_PCH_SPLIT(dev)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp &= ~BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
DRM_DEBUG_KMS("pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
}
}
 
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
pch_ctl2 = panel->backlight.max << 16;
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
 
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
 
/* BDW always uses the pch pwm controls. */
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
 
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1);
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
 
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
 
void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe)
static void pch_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
unsigned long flags;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
WARN(1, "cpu backlight already enabled\n");
cpu_ctl2 &= ~BLM_PWM_ENABLE;
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
}
 
if (dev_priv->backlight.level == 0) {
dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
// if (dev_priv->backlight.device)
// dev_priv->backlight.device->props.brightness =
// dev_priv->backlight.level;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
DRM_DEBUG_KMS("pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
}
 
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
POSTING_READ(BLC_PWM_CPU_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
 
reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
pch_ctl2 = panel->backlight.max << 16;
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
 
tmp = I915_READ(reg);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
 
/* Note that this can also get called through dpms changes. And
* we don't track the backlight dpms state, hence check whether
* we have to do anything first. */
if (tmp & BLM_PWM_ENABLE)
goto set_level;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1);
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
 
if (INTEL_INFO(dev)->num_pipes == 3)
tmp &= ~BLM_PIPE_SELECT_IVB;
else
tmp &= ~BLM_PIPE_SELECT;
static void i9xx_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
 
if (cpu_transcoder == TRANSCODER_EDP)
tmp |= BLM_TRANSCODER_EDP;
else
tmp |= BLM_PIPE(cpu_transcoder);
tmp &= ~BLM_PWM_ENABLE;
ctl = I915_READ(BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
WARN(1, "backlight already enabled\n");
I915_WRITE(BLC_PWM_CTL, 0);
}
 
I915_WRITE(reg, tmp);
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
freq = panel->backlight.max;
if (panel->backlight.combination_mode)
freq /= 0xff;
 
if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
ctl = freq << 17;
if (IS_GEN2(dev) && panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
 
I915_WRITE(BLC_PWM_CTL, ctl);
POSTING_READ(BLC_PWM_CTL);
 
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
 
static void i965_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2, freq;
 
ctl2 = I915_READ(BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
WARN(1, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
I915_WRITE(BLC_PWM_CTL2, ctl2);
}
 
set_level:
/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
* BLC_PWM_CPU_CTL may be cleared to zero automatically when these
* registers are set.
*/
dev_priv->backlight.enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
freq = panel->backlight.max;
if (panel->backlight.combination_mode)
freq /= 0xff;
 
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
ctl = freq << 16;
I915_WRITE(BLC_PWM_CTL, ctl);
 
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
I915_WRITE(BLC_PWM_CTL2, ctl2);
POSTING_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
}
 
static void intel_panel_init_backlight(struct drm_device *dev)
static void vlv_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
 
dev_priv->backlight.level = intel_panel_get_backlight(dev);
dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
WARN(1, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
}
 
ctl = panel->backlight.max << 16;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
 
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
 
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
 
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
WARN_ON(panel->backlight.max == 0);
 
if (panel->backlight.level == 0) {
panel->backlight.level = panel->backlight.max;
// if (panel->backlight.device)
// panel->backlight.device->props.brightness =
// panel->backlight.level;
}
 
dev_priv->display.enable_backlight(connector);
panel->backlight.enabled = true;
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
 
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
619,85 → 826,378
}
}
 
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static int intel_panel_update_status(struct backlight_device *bd)
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
intel_panel_set_backlight(dev, bd->props.brightness,
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
 
mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness);
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
 
static int intel_panel_get_brightness(struct backlight_device *bd)
static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
return intel_panel_get_backlight(dev);
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->mode_config.mutex);
ret = intel_panel_get_backlight(connector);
mutex_unlock(&dev->mode_config.mutex);
intel_runtime_pm_put(dev_priv);
 
return ret;
}
 
static const struct backlight_ops intel_panel_bl_ops = {
.update_status = intel_panel_update_status,
.get_brightness = intel_panel_get_brightness,
static const struct backlight_ops intel_backlight_device_ops = {
.update_status = intel_backlight_device_update_status,
.get_brightness = intel_backlight_device_get_brightness,
};
 
int intel_panel_setup_backlight(struct drm_connector *connector)
static int intel_backlight_device_register(struct intel_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
unsigned long flags;
 
intel_panel_init_backlight(dev);
 
if (WARN_ON(dev_priv->backlight.device))
if (WARN_ON(panel->backlight.device))
return -ENODEV;
 
BUG_ON(panel->backlight.max == 0);
 
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.brightness = dev_priv->backlight.level;
props.brightness = panel->backlight.level;
props.max_brightness = panel->backlight.max;
 
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
props.max_brightness = intel_panel_get_max_backlight(dev);
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
/*
* Note: using the same name independent of the connector prevents
* registration of multiple backlight devices in the driver.
*/
panel->backlight.device =
backlight_device_register("intel_backlight",
connector->base.kdev,
connector,
&intel_backlight_device_ops, &props);
 
if (props.max_brightness == 0) {
DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
if (IS_ERR(panel->backlight.device)) {
DRM_ERROR("Failed to register backlight: %ld\n",
PTR_ERR(panel->backlight.device));
panel->backlight.device = NULL;
return -ENODEV;
}
dev_priv->backlight.device =
backlight_device_register("intel_backlight",
&connector->kdev, dev,
&intel_panel_bl_ops, &props);
return 0;
}
 
if (IS_ERR(dev_priv->backlight.device)) {
DRM_ERROR("Failed to register backlight: %ld\n",
PTR_ERR(dev_priv->backlight.device));
dev_priv->backlight.device = NULL;
static void intel_backlight_device_unregister(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
 
if (panel->backlight.device) {
backlight_device_unregister(panel->backlight.device);
panel->backlight.device = NULL;
}
}
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
static int intel_backlight_device_register(struct intel_connector *connector)
{
return 0;
}
static void intel_backlight_device_unregister(struct intel_connector *connector)
{
}
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
/*
* Note: The setup hooks can't assume pipe is set!
*
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
*/
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val;
 
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
 
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
if (!panel->backlight.max)
return -ENODEV;
 
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
panel->backlight.level != 0;
 
return 0;
}
 
static int pch_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
 
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
 
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
if (!panel->backlight.max)
return -ENODEV;
 
val = pch_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
 
return 0;
}
 
void intel_panel_destroy_backlight(struct drm_device *dev)
static int i9xx_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight.device) {
backlight_device_unregister(dev_priv->backlight.device);
dev_priv->backlight.device = NULL;
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
 
ctl = I915_READ(BLC_PWM_CTL);
 
if (IS_GEN2(dev))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
 
if (IS_PINEVIEW(dev))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
 
panel->backlight.max = ctl >> 17;
if (panel->backlight.combination_mode)
panel->backlight.max *= 0xff;
 
if (!panel->backlight.max)
return -ENODEV;
 
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = panel->backlight.level != 0;
 
return 0;
}
 
static int i965_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
 
ctl2 = I915_READ(BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
ctl = I915_READ(BLC_PWM_CTL);
panel->backlight.max = ctl >> 16;
if (panel->backlight.combination_mode)
panel->backlight.max *= 0xff;
 
if (!panel->backlight.max)
return -ENODEV;
 
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
panel->backlight.level != 0;
 
return 0;
}
#else
 
static int vlv_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe;
u32 ctl, ctl2, val;
 
for_each_pipe(pipe) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
 
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
continue;
 
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
cur_val);
}
 
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
return -ENODEV;
 
val = _vlv_get_backlight(dev, PIPE_A);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
panel->backlight.level != 0;
 
return 0;
}
 
int intel_panel_setup_backlight(struct drm_connector *connector)
{
intel_panel_init_backlight(connector->dev);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
unsigned long flags;
int ret;
 
/* set level and max in panel struct */
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
ret = dev_priv->display.setup_backlight(intel_connector);
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
drm_get_connector_name(connector));
return ret;
}
 
intel_backlight_device_register(intel_connector);
 
panel->backlight.present = true;
 
DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
"sysfs interface %sregistered\n",
panel->backlight.enabled ? "enabled" : "disabled",
panel->backlight.level, panel->backlight.max,
panel->backlight.device ? "" : "not ");
 
return 0;
}
 
void intel_panel_destroy_backlight(struct drm_device *dev)
void intel_panel_destroy_backlight(struct drm_connector *connector)
{
return;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
 
panel->backlight.present = false;
intel_backlight_device_unregister(intel_connector);
}
#endif
 
/**
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
* @fixed_mode : panel native mode
* @connector: LVDS/eDP connector
*
* Return downclock_avail
* Find the reduced downclock for LVDS/eDP in EDID.
*/
struct drm_display_mode *
intel_find_panel_downclock(struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
{
struct drm_display_mode *scan, *tmp_mode;
int temp_downclock;
 
temp_downclock = fixed_mode->clock;
tmp_mode = NULL;
 
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
* that the reduced downclock is found. In such
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
if (scan->hdisplay == fixed_mode->hdisplay &&
scan->hsync_start == fixed_mode->hsync_start &&
scan->hsync_end == fixed_mode->hsync_end &&
scan->htotal == fixed_mode->htotal &&
scan->vdisplay == fixed_mode->vdisplay &&
scan->vsync_start == fixed_mode->vsync_start &&
scan->vsync_end == fixed_mode->vsync_end &&
scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
* expect to find the lower downclock.
*/
temp_downclock = scan->clock;
tmp_mode = scan;
}
}
}
 
if (temp_downclock < fixed_mode->clock)
return drm_mode_duplicate(dev, tmp_mode);
else
return NULL;
}
 
/* Set up chip specific backlight functions */
void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_BROADWELL(dev)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
dev_priv->display.set_backlight = bdw_set_backlight;
dev_priv->display.get_backlight = bdw_get_backlight;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.setup_backlight = pch_setup_backlight;
dev_priv->display.enable_backlight = pch_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
dev_priv->display.set_backlight = pch_set_backlight;
dev_priv->display.get_backlight = pch_get_backlight;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.setup_backlight = vlv_setup_backlight;
dev_priv->display.enable_backlight = vlv_enable_backlight;
dev_priv->display.disable_backlight = vlv_disable_backlight;
dev_priv->display.set_backlight = vlv_set_backlight;
dev_priv->display.get_backlight = vlv_get_backlight;
} else if (IS_GEN4(dev)) {
dev_priv->display.setup_backlight = i965_setup_backlight;
dev_priv->display.enable_backlight = i965_enable_backlight;
dev_priv->display.disable_backlight = i965_disable_backlight;
dev_priv->display.set_backlight = i9xx_set_backlight;
dev_priv->display.get_backlight = i9xx_get_backlight;
} else {
dev_priv->display.setup_backlight = i9xx_setup_backlight;
dev_priv->display.enable_backlight = i9xx_enable_backlight;
dev_priv->display.disable_backlight = i9xx_disable_backlight;
dev_priv->display.set_backlight = i9xx_set_backlight;
dev_priv->display.get_backlight = i9xx_get_backlight;
}
}
 
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
{
713,4 → 1213,8
 
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
 
if (panel->downclock_mode)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
/drivers/video/drm/i915/intel_pm.c
32,6 → 32,8
//#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
 
#include <drm/i915_powerwell.h>
 
#define FORCEWAKE_ACK_TIMEOUT_MS 2
 
#define assert_spin_locked(x)
38,8 → 40,39
 
void getrawmonotonic(struct timespec *ts);
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
 
/**
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
* enabled, and as soon as new workload arises GPU wakes up automatically as well.
*
* There are different RC6 modes available in Intel GPU, which differentiate
* among each other with the latency required to enter and leave RC6 and
* voltage consumed by the GPU in different states.
*
* The combination of the following flags define which states GPU is allowed
* to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
* RC6pp is deepest RC6. Their support by hardware varies according to the
* GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
#define INTEL_RC6_ENABLE (1<<0)
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
 
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
51,14 → 84,6
* i915.i915_enable_fbc parameter
*/
 
static bool intel_crtc_active(struct drm_crtc *crtc)
{
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*/
return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
}
 
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
81,7 → 106,7
DRM_DEBUG_KMS("disabled FBC\n");
}
 
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void i8xx_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
91,13 → 116,16
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
u32 fbc_ctl;
 
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
/* FBC_CTL wants 64B units */
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
105,18 → 133,23
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
 
if (IS_GEN4(dev)) {
u32 fbc_ctl2;
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= plane;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
 
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
131,7 → 164,7
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void g4x_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
140,7 → 173,6
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
 
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
147,9 → 179,6
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
/* enable it... */
186,7 → 215,11
u32 blt_ecoskpd;
 
/* Make sure blitter notifies FBC of writes */
gen6_gt_force_wake_get(dev_priv);
 
/* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
 
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
197,10 → 230,11
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
}
 
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void ironlake_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
209,7 → 243,6
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
 
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
217,12 → 250,11
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
249,18 → 281,6
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
if (IS_IVYBRIDGE(dev))
/* WaFbcDisableDpfcClockGating:ivb */
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) &
~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
 
if (IS_HASWELL(dev))
/* WaFbcDisableDpfcClockGating:hsw */
I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
~HSW_DPFC_GATING_DISABLE);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
272,7 → 292,7
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void gen7_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
290,18 → 310,10
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
/* WaFbcDisableDpfcClockGating:ivb */
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
HSW_BYPASS_FBC_QUEUE);
/* WaFbcDisableDpfcClockGating:hsw */
I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
HSW_DPFC_GATING_DISABLE);
}
 
I915_WRITE(SNB_DPFC_CTL_SA,
310,7 → 322,7
 
sandybridge_blit_fbc_update(dev);
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
 
bool intel_fbc_enabled(struct drm_device *dev)
337,8 → 349,7
* the prior work.
*/
if (work->crtc->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc,
work->interval);
dev_priv->display.enable_fbc(work->crtc);
 
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->fb->base.id;
375,7 → 386,7
dev_priv->fbc.fbc_work = NULL;
}
 
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void intel_enable_fbc(struct drm_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
386,16 → 397,15
 
intel_cancel_fbc_work(dev_priv);
 
work = kzalloc(sizeof *work, GFP_KERNEL);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
dev_priv->display.enable_fbc(crtc);
return;
}
 
work->crtc = crtc;
work->fb = crtc->fb;
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
dev_priv->fbc.fbc_work = work;
466,9 → 476,10
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
unsigned int max_hdisplay, max_vdisplay;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
 
if (!I915_HAS_FBC(dev)) {
if (!HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
490,7 → 501,7
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled) {
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
510,6 → 521,7
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
adjusted_mode = &intel_crtc->config.adjusted_mode;
 
if (i915_enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
522,8 → 534,8
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
531,22 → 543,22
}
 
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_hdisplay = 4096;
max_vdisplay = 2048;
max_width = 4096;
max_height = 2048;
} else {
max_hdisplay = 2048;
max_vdisplay = 1536;
max_width = 2048;
max_height = 1536;
}
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
if (intel_crtc->config.pipe_src_w > max_width ||
intel_crtc->config.pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
DRM_DEBUG_KMS("plane not A, disabling compression\n");
goto out_disable;
}
 
608,7 → 620,7
intel_disable_fbc(dev);
}
 
intel_enable_fbc(crtc, 500);
intel_enable_fbc(crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
 
830,7 → 842,7
return size;
}
 
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
863,21 → 875,6
return size;
}
 
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
956,7 → 953,7
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i855_wm_info = {
static const struct intel_watermark_params i830_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
963,7 → 960,7
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i830_wm_info = {
static const struct intel_watermark_params i845_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
971,65 → 968,6
I830_FIFO_LINE_SIZE
};
 
static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
 
static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
 
 
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
1095,8 → 1033,9
return enabled;
}
 
static void pineview_update_wm(struct drm_device *dev)
static void pineview_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
1113,9 → 1052,13
 
crtc = single_enabled_crtc(dev);
if (crtc) {
int clock = crtc->mode.clock;
const struct drm_display_mode *adjusted_mode;
int pixel_size = crtc->fb->bits_per_pixel / 8;
int clock;
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
 
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
1174,6 → 1117,7
int *cursor_wm)
{
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
1185,9 → 1129,10
return false;
}
 
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
clock = crtc->mode.clock;
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
/* Use the small buffer method to calculate plane watermark */
1258,6 → 1203,7
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
1270,9 → 1216,10
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
1311,7 → 1258,7
if (!intel_crtc_active(crtc))
return false;
 
clock = crtc->mode.clock; /* VESA DOT Clock */
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
 
entries = (clock / 1000) * pixel_size;
1373,8 → 1320,9
 
#define single_plane_enabled(mask) is_power_of_2(mask)
 
static void valleyview_update_wm(struct drm_device *dev)
static void valleyview_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1432,8 → 1380,9
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void g4x_update_wm(struct drm_device *dev)
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1484,8 → 1433,9
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void i965_update_wm(struct drm_device *dev)
static void i965_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
1496,9 → 1446,11
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
int clock = crtc->mode.clock;
int htotal = crtc->mode.htotal;
int hdisplay = crtc->mode.hdisplay;
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(crtc)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
1549,8 → 1501,9
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void i9xx_update_wm(struct drm_device *dev)
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
1565,16 → 1518,18
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
wm_info = &i855_wm_info;
wm_info = &i830_wm_info;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
planea_wm = intel_calculate_wm(crtc->mode.clock,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
1584,11 → 1539,13
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
planeb_wm = intel_calculate_wm(crtc->mode.clock,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
1609,15 → 1566,17
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
 
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
int clock = enabled->mode.clock;
int htotal = enabled->mode.htotal;
int hdisplay = enabled->mode.hdisplay;
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
1659,7 → 1618,7
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
1666,10 → 1625,12
}
}
 
static void i830_update_wm(struct drm_device *dev)
static void i845_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
 
1677,7 → 1638,9
if (crtc == NULL)
return;
 
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1688,418 → 1651,6
I915_WRITE(FW_BLC, fwater_lo);
}
 
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool ironlake_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
 
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
 
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
} else if (INTEL_INFO(dev)->gen >= 6) {
/* enable FBC WM (except on ILK, where it must remain off) */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
}
 
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
 
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
 
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
 
return true;
}
 
/*
* Compute watermark values of WM[1-3],
*/
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
 
/*
* Spec says:
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
*/
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
return ironlake_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm,
display, cursor);
}
 
static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, PIPE_A,
&ironlake_display_wm_info,
dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_A;
}
 
if (g4x_compute_wm0(dev, PIPE_B,
&ironlake_display_wm_info,
dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_B;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
dev_priv->wm.pri_latency[1] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
dev_priv->wm.pri_latency[2] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/*
* WM3 is unsupported on ILK, probably because we don't have latency
* data for that power state
*/
}
 
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, PIPE_A,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEA_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEA_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_A;
}
 
if (g4x_compute_wm0(dev, PIPE_B,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEB_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEB_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_B;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled) ||
dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
 
static void ivybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, PIPE_A,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEA_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEA_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_A;
}
 
if (g4x_compute_wm0(dev, PIPE_B,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEB_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEB_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_B;
}
 
if (g4x_compute_wm0(dev, PIPE_C,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEC_IVB);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEC_IVB, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1 << PIPE_C;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled) ||
dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &ignore_cursor_wm) ||
!ironlake_compute_srwm(dev, 3, enabled,
dev_priv->wm.cur_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
 
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
{
2106,7 → 1657,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
 
pixel_rate = intel_crtc->config.adjusted_mode.clock;
pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
 
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
2115,8 → 1666,8
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
pipe_w = intel_crtc->config.requested_mode.hdisplay;
pipe_h = intel_crtc->config.requested_mode.vdisplay;
pipe_w = intel_crtc->config.pipe_src_w;
pipe_h = intel_crtc->config.pipe_src_h;
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
2168,7 → 1719,7
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
 
struct hsw_pipe_wm_parameters {
struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate;
2177,7 → 1728,7
struct intel_plane_wm_parameters cur;
};
 
struct hsw_wm_maximums {
struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
uint16_t cur;
2184,20 → 1735,11
uint16_t fbc;
};
 
struct hsw_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
uint32_t wm_lp_spr[3];
uint32_t wm_linetime[3];
bool enable_fbc_wm;
};
 
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
bool fbc_wm_enabled;
};
 
/*
2204,7 → 1746,7
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
2233,7 → 1775,7
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
2256,7 → 1798,7
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
2270,7 → 1812,7
}
 
/* Only for WM_LP. */
static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
2283,7 → 1825,9
 
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen >= 7)
if (INTEL_INFO(dev)->gen >= 8)
return 3072;
else if (INTEL_INFO(dev)->gen >= 7)
return 768;
else
return 512;
2328,7 → 1872,9
}
 
/* clamp to max that the registers can hold */
if (INTEL_INFO(dev)->gen >= 7)
if (INTEL_INFO(dev)->gen >= 8)
max = level == 0 ? 255 : 2047;
else if (INTEL_INFO(dev)->gen >= 7)
/* IVB/HSW primary/sprite plane watermarks */
max = level == 0 ? 127 : 1023;
else if (!is_sprite)
2358,26 → 1904,29
}
 
/* Calculate the maximum FBC watermark */
static unsigned int ilk_fbc_wm_max(void)
static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
{
/* max that registers can hold */
if (INTEL_INFO(dev)->gen >= 8)
return 31;
else
return 15;
}
 
static void ilk_wm_max(struct drm_device *dev,
static void ilk_compute_wm_maximums(struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct hsw_wm_maximums *max)
struct ilk_wm_maximums *max)
{
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
max->fbc = ilk_fbc_wm_max();
max->fbc = ilk_fbc_wm_max(dev);
}
 
static bool ilk_check_wm(int level,
const struct hsw_wm_maximums *max,
static bool ilk_validate_wm_level(int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
{
bool ret;
2414,14 → 1963,12
result->enable = true;
}
 
DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
 
return ret;
}
 
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
int level,
struct hsw_pipe_wm_parameters *p,
const struct ilk_pipe_wm_parameters *p,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2442,55 → 1989,6
result->enable = true;
}
 
static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
int level, struct hsw_wm_maximums *max,
struct hsw_pipe_wm_parameters *params,
struct intel_wm_level *result)
{
enum pipe pipe;
struct intel_wm_level res[3];
 
for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
 
result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
result->enable = true;
 
return ilk_check_wm(level, max, result);
}
 
static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe,
struct hsw_pipe_wm_parameters *params)
{
uint32_t pri_val, cur_val, spr_val;
/* WM0 latency values stored in 0.1us units */
uint16_t pri_latency = dev_priv->wm.pri_latency[0];
uint16_t spr_latency = dev_priv->wm.spr_latency[0];
uint16_t cur_latency = dev_priv->wm.cur_latency[0];
 
pri_val = ilk_compute_pri_wm(params, pri_latency, false);
spr_val = ilk_compute_spr_wm(params, spr_latency);
cur_val = ilk_compute_cur_wm(params, cur_latency);
 
WARN(pri_val > 127,
"Primary WM error, mode not supported for pipe %c\n",
pipe_name(pipe));
WARN(spr_val > 127,
"Sprite WM error, mode not supported for pipe %c\n",
pipe_name(pipe));
WARN(cur_val > 63,
"Cursor WM error, mode not supported for pipe %c\n",
pipe_name(pipe));
 
return (pri_val << WM0_PIPE_PLANE_SHIFT) |
(spr_val << WM0_PIPE_SPRITE_SHIFT) |
cur_val;
}
 
static uint32_t
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
2505,8 → 2003,9
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv));
 
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2517,7 → 2016,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
wm[0] = (sskpd >> 56) & 0xFF;
2562,20 → 2061,23
wm[3] *= 2;
}
 
static int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
else
return 2;
}
 
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
const uint16_t wm[5])
{
int level, max_level;
int level, max_level = ilk_wm_max_level(dev);
 
/* how many WM levels are we expecting */
if (IS_HASWELL(dev))
max_level = 4;
else if (INTEL_INFO(dev)->gen >= 6)
max_level = 3;
else
max_level = 2;
 
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
 
2614,35 → 2116,22
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
 
static void hsw_compute_wm_parameters(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
struct hsw_wm_maximums *lp_max_1_2,
struct hsw_wm_maximums *lp_max_5_6)
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p,
struct intel_wm_config *config)
{
struct drm_crtc *crtc;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
enum pipe pipe;
struct intel_wm_config config = {};
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct hsw_pipe_wm_parameters *p;
 
pipe = intel_crtc->pipe;
p = &params[pipe];
 
p->active = intel_crtc_active(crtc);
if (!p->active)
continue;
 
config.num_pipes_active++;
 
p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
if (p->active) {
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
p->pri.horiz_pixels =
intel_crtc->config.requested_mode.hdisplay;
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
p->cur.horiz_pixels = 64;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
2649,103 → 2138,232
p->cur.enabled = true;
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += intel_crtc_active(crtc);
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
struct hsw_pipe_wm_parameters *p;
 
pipe = intel_plane->pipe;
p = &params[pipe];
 
if (intel_plane->pipe == pipe)
p->spr = intel_plane->wm;
 
config.sprites_enabled |= p->spr.enabled;
config.sprites_scaled |= p->spr.scaled;
config->sprites_enabled |= intel_plane->wm.enabled;
config->sprites_scaled |= intel_plane->wm.scaled;
}
}
 
ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
/* Compute new watermarks for the pipe */
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
const struct ilk_pipe_wm_parameters *params,
struct intel_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
.num_pipes_active = 1,
.sprites_enabled = params->spr.enabled,
.sprites_scaled = params->spr.scaled,
};
struct ilk_wm_maximums max;
 
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
else
*lp_max_5_6 = *lp_max_1_2;
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
/* ILK/SNB: LP2+ watermarks only w/o sprites */
if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
max_level = 1;
 
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
if (params->spr.scaled)
max_level = 0;
 
for (level = 0; level <= max_level; level++)
ilk_compute_wm_level(dev_priv, level, params,
&pipe_wm->wm[level]);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
 
/* At least LP0 must be valid */
return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
}
 
static void hsw_compute_wm_results(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
struct hsw_wm_maximums *lp_maximums,
struct hsw_wm_values *results)
/*
* Merge the watermarks from all active pipes for a specific level.
*/
static void ilk_merge_wm_level(struct drm_device *dev,
int level,
struct intel_wm_level *ret_wm)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_wm_level lp_results[4] = {};
enum pipe pipe;
int level, max_level, wm_lp;
const struct intel_crtc *intel_crtc;
 
for (level = 1; level <= 4; level++)
if (!hsw_compute_lp_wm(dev_priv, level,
lp_maximums, params,
&lp_results[level - 1]))
break;
max_level = level - 1;
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
const struct intel_wm_level *wm =
&intel_crtc->wm.active.wm[level];
 
memset(results, 0, sizeof(*results));
if (!wm->enable)
return;
 
/* The spec says it is preferred to disable FBC WMs instead of disabling
* a WM level. */
results->enable_fbc_wm = true;
ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
}
 
ret_wm->enable = true;
}
 
/*
* Merge all low power watermarks for all active pipes.
*/
static void ilk_wm_merge(struct drm_device *dev,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
int level, max_level = ilk_wm_max_level(dev);
 
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
config->num_pipes_active > 1)
return;
 
/* ILK: FBC WM must be disabled always */
merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
 
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
results->enable_fbc_wm = false;
lp_results[level - 1].fbc_val = 0;
struct intel_wm_level *wm = &merged->wm[level];
 
ilk_merge_wm_level(dev, level, wm);
 
if (!ilk_validate_wm_level(level, max, wm))
break;
 
/*
* The spec says it is preferred to disable
* FBC WMs instead of disabling a WM level.
*/
if (wm->fbc_val > max->fbc) {
merged->fbc_wm_enabled = false;
wm->fbc_val = 0;
}
}
 
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
/*
* FIXME this is racy. FBC might get enabled later.
* What we should check here is whether FBC can be
* enabled sometime later.
*/
if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
 
wm->enable = false;
}
}
}
 
static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
{
/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
}
 
/* The value we need to program into the WM_LPx latency field */
static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
}
 
static void ilk_compute_wm_results(struct drm_device *dev,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
struct intel_crtc *intel_crtc;
int level, wm_lp;
 
results->enable_fbc_wm = merged->fbc_wm_enabled;
results->partitioning = partitioning;
 
/* LP1+ register values */
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
const struct intel_wm_level *r;
 
level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
if (level > max_level)
level = ilk_wm_lp_to_level(wm_lp, merged);
 
r = &merged->wm[level];
if (!r->enable)
break;
 
r = &lp_results[level - 1];
results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
r->fbc_val,
r->pri_val,
r->cur_val);
results->wm_lp[wm_lp - 1] = WM3_LP_EN |
(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
 
if (INTEL_INFO(dev)->gen >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT;
 
if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
}
 
for_each_pipe(pipe)
results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
&params[pipe]);
/* LP0 register values */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.wm[0];
 
for_each_pipe(pipe) {
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
if (WARN_ON(!r->enable))
continue;
 
results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
 
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
r->cur_val;
}
}
 
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
struct hsw_wm_values *r2)
static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int i, val_r1 = 0, val_r2 = 0;
int level, max_level = ilk_wm_max_level(dev);
int level1 = 0, level2 = 0;
 
for (i = 0; i < 3; i++) {
if (r1->wm_lp[i] & WM3_LP_EN)
val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
if (r2->wm_lp[i] & WM3_LP_EN)
val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
for (level = 1; level <= max_level; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
level2 = level;
}
 
if (val_r1 == val_r2) {
if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
if (level1 == level2) {
if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
return r2;
else
return r1;
} else if (val_r1 > val_r2) {
} else if (level1 > level2) {
return r1;
} else {
return r2;
2752,80 → 2370,147
}
}
 
/* dirty bits used to track which watermarks need changes */
#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
 
static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
unsigned int dirty = 0;
enum pipe pipe;
int wm_lp;
 
for_each_pipe(pipe) {
if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
dirty |= WM_DIRTY_LINETIME(pipe);
/* Must disable LP1+ watermarks too */
dirty |= WM_DIRTY_LP_ALL;
}
 
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
dirty |= WM_DIRTY_LP_ALL;
}
}
 
if (old->enable_fbc_wm != new->enable_fbc_wm) {
dirty |= WM_DIRTY_FBC;
/* Must disable LP1+ watermarks too */
dirty |= WM_DIRTY_LP_ALL;
}
 
if (old->partitioning != new->partitioning) {
dirty |= WM_DIRTY_DDB;
/* Must disable LP1+ watermarks too */
dirty |= WM_DIRTY_LP_ALL;
}
 
/* LP1+ watermarks already deemed dirty, no need to continue */
if (dirty & WM_DIRTY_LP_ALL)
return dirty;
 
/* Find the lowest numbered LP1+ watermark in need of an update... */
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
break;
}
 
/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
for (; wm_lp <= 3; wm_lp++)
dirty |= WM_DIRTY_LP(wm_lp);
 
return dirty;
}
 
static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
unsigned int dirty)
{
struct ilk_wm_values *previous = &dev_priv->wm.hw;
bool changed = false;
 
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
previous->wm_lp[2] &= ~WM1_LP_SR_EN;
I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
previous->wm_lp[1] &= ~WM1_LP_SR_EN;
I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
previous->wm_lp[0] &= ~WM1_LP_SR_EN;
I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
 
/*
* Don't touch WM1S_LP_EN here.
* Doing so could cause underruns.
*/
 
return changed;
}
 
/*
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
struct hsw_wm_values *results,
enum intel_ddb_partitioning partitioning)
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
struct hsw_wm_values previous;
struct drm_device *dev = dev_priv->dev;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
enum intel_ddb_partitioning prev_partitioning;
bool prev_enable_fbc_wm;
 
previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
 
prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
 
if (memcmp(results->wm_pipe, previous.wm_pipe,
sizeof(results->wm_pipe)) == 0 &&
memcmp(results->wm_lp, previous.wm_lp,
sizeof(results->wm_lp)) == 0 &&
memcmp(results->wm_lp_spr, previous.wm_lp_spr,
sizeof(results->wm_lp_spr)) == 0 &&
memcmp(results->wm_linetime, previous.wm_linetime,
sizeof(results->wm_linetime)) == 0 &&
partitioning == prev_partitioning &&
results->enable_fbc_wm == prev_enable_fbc_wm)
dirty = ilk_compute_wm_dirty(dev, previous, results);
if (!dirty)
return;
 
if (previous.wm_lp[2] != 0)
I915_WRITE(WM3_LP_ILK, 0);
if (previous.wm_lp[1] != 0)
I915_WRITE(WM2_LP_ILK, 0);
if (previous.wm_lp[0] != 0)
I915_WRITE(WM1_LP_ILK, 0);
_ilk_disable_lp_wm(dev_priv, dirty);
 
if (previous.wm_pipe[0] != results->wm_pipe[0])
if (dirty & WM_DIRTY_PIPE(PIPE_A))
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
if (previous.wm_pipe[1] != results->wm_pipe[1])
if (dirty & WM_DIRTY_PIPE(PIPE_B))
I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
if (previous.wm_pipe[2] != results->wm_pipe[2])
if (dirty & WM_DIRTY_PIPE(PIPE_C))
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
if (previous.wm_linetime[0] != results->wm_linetime[0])
if (dirty & WM_DIRTY_LINETIME(PIPE_A))
I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
if (previous.wm_linetime[1] != results->wm_linetime[1])
if (dirty & WM_DIRTY_LINETIME(PIPE_B))
I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
if (previous.wm_linetime[2] != results->wm_linetime[2])
if (dirty & WM_DIRTY_LINETIME(PIPE_C))
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
if (prev_partitioning != partitioning) {
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
val = I915_READ(WM_MISC);
if (partitioning == INTEL_DDB_PART_1_2)
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
I915_WRITE(WM_MISC, val);
} else {
val = I915_READ(DISP_ARB_CTL2);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~DISP_DATA_PARTITION_5_6;
else
val |= DISP_DATA_PARTITION_5_6;
I915_WRITE(DISP_ARB_CTL2, val);
}
}
 
if (prev_enable_fbc_wm != results->enable_fbc_wm) {
if (dirty & WM_DIRTY_FBC) {
val = I915_READ(DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
2834,52 → 2519,84
I915_WRITE(DISP_ARB_CTL, val);
}
 
if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
 
if (INTEL_INFO(dev)->gen >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
}
 
if (results->wm_lp[0] != 0)
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
if (results->wm_lp[1] != 0)
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
if (results->wm_lp[2] != 0)
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
 
dev_priv->wm.hw = *results;
}
 
static void haswell_update_wm(struct drm_device *dev)
static bool ilk_disable_lp_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
struct hsw_pipe_wm_parameters params[3];
struct hsw_wm_values results_1_2, results_5_6, *best_results;
 
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
 
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct ilk_wm_maximums max;
struct ilk_pipe_wm_parameters params = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
struct intel_pipe_wm pipe_wm = {};
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
 
hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
ilk_compute_wm_parameters(crtc, &params, &config);
 
hsw_compute_wm_results(dev, params,
&lp_max_1_2, &results_1_2);
if (lp_max_1_2.pri != lp_max_5_6.pri) {
hsw_compute_wm_results(dev, params,
&lp_max_5_6, &results_5_6);
best_results = hsw_find_best_result(&results_1_2, &results_5_6);
intel_compute_pipe_wm(crtc, &params, &pipe_wm);
 
if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
return;
 
intel_crtc->wm.active = pipe_wm;
 
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
 
best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_results = &results_1_2;
best_lp_wm = &lp_wm_1_2;
}
 
partitioning = (best_results == &results_1_2) ?
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
hsw_write_wm_values(dev_priv, best_results, partitioning);
ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
 
ilk_write_wm_values(dev_priv, &results);
}
 
static void haswell_update_sprite_wm(struct drm_plane *plane,
static void ilk_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
 
intel_plane->wm.enabled = enabled;
2887,171 → 2604,92
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
 
haswell_update_wm(plane->dev);
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
* when scaling is disabled.
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
intel_wait_for_vblank(dev, intel_plane->pipe);
 
ilk_update_wm(crtc);
}
 
static bool
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int display_latency_ns, int *sprite_wm)
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_crtc *crtc;
int clock;
int entries, tlb_miss;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_pipe_wm *active = &intel_crtc->wm.active;
enum pipe pipe = intel_crtc->pipe;
static const unsigned int wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
[PIPE_B] = WM0_PIPEB_ILK,
[PIPE_C] = WM0_PIPEC_IVB,
};
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
clock = crtc->mode.clock;
if (intel_crtc_active(crtc)) {
u32 tmp = hw->wm_pipe[pipe];
 
/* Use the small buffer method to calculate the sprite watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size -
sprite_width * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*sprite_wm = entries + display->guard_size;
if (*sprite_wm > (int)display->max_wm)
*sprite_wm = display->max_wm;
/*
* For active pipes LP0 watermark is marked as
* enabled, and LP1+ watermaks as disabled since
* we can't really reverse compute them in case
* multiple pipes are active.
*/
active->wm[0].enable = true;
active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
active->linetime = hw->wm_linetime[pipe];
} else {
int level, max_level = ilk_wm_max_level(dev);
 
return true;
/*
* For inactive pipes, all watermark levels
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
for (level = 0; level <= max_level; level++)
active->wm[level].enable = true;
}
 
static bool
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*sprite_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
clock = crtc->mode.clock;
if (!clock) {
*sprite_wm = 0;
return false;
}
 
line_time_us = (sprite_width * 1000) / clock;
if (!line_time_us) {
*sprite_wm = 0;
return false;
}
 
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = sprite_width * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*sprite_wm = entries + display->guard_size;
 
return *sprite_wm > 0x3ff ? false : true;
}
 
static void sandybridge_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_plane(plane)->pipe;
int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
u32 val;
int sprite_wm, reg;
int ret;
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
 
if (!enabled)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
ilk_pipe_wm_get_hw_state(crtc);
 
switch (pipe) {
case 0:
reg = WM0_PIPEA_ILK;
break;
case 1:
reg = WM0_PIPEB_ILK;
break;
case 2:
reg = WM0_PIPEC_IVB;
break;
default:
return; /* bad pipe */
}
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
 
ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
&sandybridge_display_wm_info,
latency, &sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
pipe_name(pipe));
return;
}
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
 
val = I915_READ(reg);
val &= ~WM0_PIPE_SPRITE_MASK;
I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
else if (IS_IVYBRIDGE(dev))
hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
dev_priv->wm.spr_latency[1] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
pipe_name(pipe));
return;
hw->enable_fbc_wm =
!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
I915_WRITE(WM1S_LP_ILK, sprite_wm);
 
/* Only IVB has two more LP watermarks for sprite */
if (!IS_IVYBRIDGE(dev))
return;
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
dev_priv->wm.spr_latency[2] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM2S_LP_IVB, sprite_wm);
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
dev_priv->wm.spr_latency[3] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
pipe_name(pipe));
return;
}
I915_WRITE(WM3S_LP_IVB, sprite_wm);
}
 
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
3084,12 → 2722,12
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
void intel_update_watermarks(struct drm_device *dev)
void intel_update_watermarks(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(dev);
dev_priv->display.update_wm(crtc);
}
 
void intel_update_sprite_watermarks(struct drm_plane *plane,
3271,16 → 2909,10
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
 
limits = 0;
 
if (*val >= dev_priv->rps.max_delay)
*val = dev_priv->rps.max_delay;
limits |= dev_priv->rps.max_delay << 24;
 
/* Only set the down limit when we've reached the lowest level to avoid
* getting more interrupts, otherwise leave this clear. This prevents a
* race in the hw when coming out of rc6: There's a tiny window where
3287,18 → 2919,108
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
if (*val <= dev_priv->rps.min_delay) {
*val = dev_priv->rps.min_delay;
limits = dev_priv->rps.max_delay << 24;
if (val <= dev_priv->rps.min_delay)
limits |= dev_priv->rps.min_delay << 16;
}
 
return limits;
}
 
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
 
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
new_power = BETWEEN;
break;
 
case BETWEEN:
if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
new_power = LOW_POWER;
else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
new_power = HIGH_POWER;
break;
 
case HIGH_POWER:
if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
if (val == dev_priv->rps.min_delay)
new_power = LOW_POWER;
if (val == dev_priv->rps.max_delay)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
 
/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
I915_WRITE(GEN6_RP_UP_EI, 12500);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
 
/* Downclock if less than 85% busy over 32ms */
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
break;
 
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
I915_WRITE(GEN6_RP_UP_EI, 10250);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
 
/* Downclock if less than 75% busy over 32ms */
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
break;
 
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
I915_WRITE(GEN6_RP_UP_EI, 8000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
 
/* Downclock if less than 60% busy over 32ms */
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
break;
}
 
dev_priv->rps.power = new_power;
dev_priv->rps.last_adj = 0;
}
 
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
3307,6 → 3029,8
if (val == dev_priv->rps.cur_delay)
return;
 
gen6_set_rps_thresholds(dev_priv, val);
 
if (IS_HASWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
3319,7 → 3043,8
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
gen6_rps_limits(dev_priv, val));
 
POSTING_READ(GEN6_RPNSWREQ);
 
3328,48 → 3053,48
trace_intel_gpu_freq_change(val * 50);
}
 
/*
* Wait until the previous freq change has completed,
* or the timeout elapsed, and then update our notion
* of the current GPU frequency.
*/
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
u32 pval;
struct drm_device *dev = dev_priv->dev;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
pval >>= 8;
 
if (pval != dev_priv->rps.cur_delay)
DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
 
dev_priv->rps.cur_delay = pval;
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
gen6_rps_limits(dev_priv, &val);
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
 
vlv_update_rps_cur_delay(dev_priv);
 
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.cur_delay),
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
vlv_gpu_freq(dev_priv->mem_freq, val), val);
vlv_gpu_freq(dev_priv, val), val);
 
if (val == dev_priv->rps.cur_delay)
return;
3378,7 → 3103,7
 
dev_priv->rps.cur_delay = val;
 
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
 
static void gen6_disable_rps_interrupts(struct drm_device *dev)
3423,6 → 3148,20
}
}
 
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
{
if (IS_GEN6(dev))
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 
if (IS_HASWELL(dev))
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
 
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
}
 
int intel_enable_rc6(const struct drm_device *dev)
{
/* No RC6 before Ironlake */
3437,18 → 3176,13
if (INTEL_INFO(dev)->gen == 5)
return 0;
 
if (IS_HASWELL(dev)) {
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
if (IS_HASWELL(dev))
return INTEL_RC6_ENABLE;
}
 
/* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
if (INTEL_INFO(dev)->gen == 6)
return INTEL_RC6_ENABLE;
}
 
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
 
3475,6 → 3209,78
I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
 
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
uint32_t rc6_mask = 0, rp_state_cap;
int unused;
 
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
 
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
 
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
 
/* Docs recommend 900MHz, and 300 MHz respectively */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
dev_priv->rps.max_delay << 24 |
dev_priv->rps.min_delay << 16);
 
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
 
/* 6: Ring frequency + overclocking (our driver does this later */
 
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
 
gen6_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3502,7 → 3308,7
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
gen6_gt_force_wake_get(dev_priv);
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3509,7 → 3315,10
 
/* In units of 50MHz */
dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
dev_priv->rps.cur_delay = 0;
 
/* disable the counters and set deterministic thresholds */
3526,7 → 3335,7
 
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
if (IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3547,10 → 3356,7
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
 
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
intel_print_rc6_info(dev, rc6_mask);
 
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
3557,38 → 3363,9
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
 
if (IS_HASWELL(dev)) {
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(10));
I915_WRITE(GEN6_RC_VIDEO_FREQ,
HSW_FREQUENCY(12));
} else {
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(10) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN6_FREQUENCY(12));
}
 
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
dev_priv->rps.max_delay << 24 |
dev_priv->rps.min_delay << 16);
 
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
/* Power down if completely idle for over 50ms */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
if (!ret) {
3604,7 → 3381,8
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
 
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
 
gen6_enable_rps_interrupts(dev);
 
3622,7 → 3400,7
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
 
gen6_gt_force_wake_put(dev_priv);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
void gen6_update_ring_freq(struct drm_device *dev)
3632,6 → 3410,7
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
int scaling_factor = 180;
struct cpufreq_policy *policy;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
3640,15 → 3419,14
* Default to measured freq if none found, PCU will ensure we don't go
* over
*/
if (!max_ia_freq)
max_ia_freq = tsc_khz;
 
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
 
min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
/* convert DDR frequency from units of 133.3MHz to bandwidth */
min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
min_ring_freq = I915_READ(DCLK) & 0xf;
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
/*
* For each potential GPU frequency, load a ring frequency we'd like
3660,8 → 3438,11
int diff = dev_priv->rps.max_delay - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
 
if (IS_HASWELL(dev)) {
ring_freq = (gpu_freq * 5 + 3) / 4;
if (INTEL_INFO(dev)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
} else if (IS_HASWELL(dev)) {
ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
} else {
3717,24 → 3498,6
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
 
static void vlv_rps_timer_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.vlv_work.work);
 
/*
* Timer fired, we must be idle. Drop to min voltage state.
* Note: we use RPe here since it should match the
* Vmin we were shooting for. That should give us better
* perf when we come back out of RC6 than if we used the
* min freq available.
*/
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3781,19 → 3544,21
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 gtfifodbg, val;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
valleyview_setup_pctx(dev);
 
gen6_gt_force_wake_get(dev_priv);
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3817,27 → 3582,21
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
/* allows RC6 residency counter to work */
I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE);
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
intel_print_rc6_info(dev, rc6_mode);
 
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
switch ((val >> 6) & 3) {
case 0:
case 1:
dev_priv->mem_freq = 800;
break;
case 2:
dev_priv->mem_freq = 1066;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3844,32 → 3603,27
 
dev_priv->rps.cur_delay = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.cur_delay),
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay);
 
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.max_delay),
vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
dev_priv->rps.max_delay);
 
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.rpe_delay),
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
 
dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.min_delay),
vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
dev_priv->rps.min_delay);
 
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv->mem_freq,
dev_priv->rps.rpe_delay),
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3876,7 → 3630,7
 
gen6_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
void ironlake_teardown_rc6(struct drm_device *dev)
3993,6 → 3747,8
 
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 
intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
}
 
static unsigned long intel_pxfreq(u32 vidfreq)
4611,13 → 4367,12
} else if (INTEL_INFO(dev)->gen >= 6) {
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
cancel_work_sync(&dev_priv->rps.work);
if (IS_VALLEYVIEW(dev))
cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
4633,10 → 4388,14
 
if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
gen6_update_ring_freq(dev);
} else {
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
4680,10 → 4439,24
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
intel_flush_primary_plane(dev_priv, pipe);
}
}
 
static void ilk_init_lp_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
 
/*
* Don't touch WM1S_LP_EN here.
* Doing so could cause underruns.
*/
}
 
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4717,10 → 4490,9
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
ilk_init_lp_watermarks(dev);
 
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
4826,9 → 4598,7
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
ilk_init_lp_watermarks(dev);
 
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4940,14 → 4710,70
}
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
static void gen8_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe i;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
 
WARN(!i915_preliminary_hw_support,
"GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
 
I915_WRITE(_3D_CHICKEN3,
_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
 
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
 
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
 
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
/* WaPsrDPAMaskVBlankInSRD:bdw */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(i) {
I915_WRITE(CHICKEN_PIPESL_1(i),
I915_READ(CHICKEN_PIPESL_1(i) |
DPRS_MASK_VBLANK_SRD));
}
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
I915_WRITE(HDC_CHICKEN0,
I915_READ(HDC_CHICKEN0) |
_MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
 
/* WaVSRefCountFullforceMissDisable:bdw */
/* WaDSRefCountFullforceMissDisable:bdw */
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
ilk_init_lp_watermarks(dev);
 
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:hsw workaround.
*/
4995,9 → 4821,7
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t snpcr;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
ilk_init_lp_watermarks(dev);
 
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
5084,7 → 4908,27
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
mutex_unlock(&dev_priv->rps.hw_lock);
switch ((val >> 6) & 3) {
case 0:
dev_priv->mem_freq = 800;
break;
case 1:
dev_priv->mem_freq = 1066;
break;
case 2:
dev_priv->mem_freq = 1333;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableEarlyCull:vlv */
5263,44 → 5107,137
lpt_suspend_hw(dev);
}
 
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
((power_well) = &(power_domains)->power_wells[i]); \
i++) \
if ((power_well)->domains & (domain_mask))
 
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
for (i = (power_domains)->power_well_count - 1; \
i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
i--) \
if ((power_well)->domains & (domain_mask))
 
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
static bool hsw_power_well_enabled(struct drm_device *dev,
struct i915_power_well *power_well)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
 
bool intel_display_power_enabled_sw(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
 
power_domains = &dev_priv->power_domains;
 
return power_domains->domain_use_count[domain];
}
 
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
int i;
 
if (!HAS_POWER_WELL(dev))
return true;
power_domains = &dev_priv->power_domains;
 
switch (domain) {
case POWER_DOMAIN_PIPE_A:
case POWER_DOMAIN_TRANSCODER_EDP:
return true;
case POWER_DOMAIN_PIPE_B:
case POWER_DOMAIN_PIPE_C:
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
case POWER_DOMAIN_TRANSCODER_A:
case POWER_DOMAIN_TRANSCODER_B:
case POWER_DOMAIN_TRANSCODER_C:
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
default:
BUG();
is_enabled = true;
 
mutex_lock(&power_domains->lock);
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
 
if (!power_well->is_enabled(dev, power_well)) {
is_enabled = false;
break;
}
}
mutex_unlock(&power_domains->lock);
 
static void __intel_set_power_well(struct drm_device *dev, bool enable)
return is_enabled;
}
 
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
unsigned long irqflags;
 
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
* we'll get unclaimed register interrupts. This stops after we write
* anything to the VGA MSR register. The vgacon module uses this
* register all the time, so if we unbind our driver and, as a
* consequence, bind vgacon, we'll get stuck in an infinite loop at
* console_unlock(). So make here we touch the VGA MSR register, making
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
if (IS_BROADWELL(dev)) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
dev_priv->de_irq_mask[PIPE_B]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
~dev_priv->de_irq_mask[PIPE_B] |
GEN8_PIPE_VBLANK);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
dev_priv->de_irq_mask[PIPE_C]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
~dev_priv->de_irq_mask[PIPE_C] |
GEN8_PIPE_VBLANK);
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
}
 
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
enum pipe p;
unsigned long irqflags;
 
/*
* After this, the registers on the pipes that are part of the power
* well will become zero, so we have to adjust our counters according to
* that.
*
* FIXME: Should we do this in general in drm_vblank_post_modeset?
*/
// spin_lock_irqsave(&dev->vbl_lock, irqflags);
// for_each_pipe(p)
// if (p != PIPE_A)
// dev->vblank[p].last = 0;
// spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
 
static void hsw_set_power_well(struct drm_device *dev,
struct i915_power_well *power_well, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
 
WARN_ON(dev_priv->pc8.enabled);
 
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5316,45 → 5253,98
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
 
hsw_power_well_post_enable(dev_priv);
} else {
if (enable_requested) {
unsigned long irqflags;
enum pipe p;
 
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
 
/*
* After this, the registers on the pipes that are part
* of the power well will become zero, so we have to
* adjust our counters according to that.
*
* FIXME: Should we do this in general in
* drm_vblank_post_modeset?
*/
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for_each_pipe(p)
if (p != PIPE_A)
dev->last_vblank[p] = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
hsw_power_well_post_disable(dev_priv);
}
}
}
 
static struct i915_power_well *hsw_pwr;
static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!power_well->count++ && power_well->set) {
hsw_disable_package_c8(dev_priv);
power_well->set(dev, power_well, true);
}
}
 
static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!power_well->count);
 
if (!--power_well->count && power_well->set &&
i915_disable_power_well) {
power_well->set(dev, power_well, false);
hsw_enable_package_c8(dev_priv);
}
}
 
void intel_display_power_get(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
for_each_power_well(i, power_well, BIT(domain), power_domains)
__intel_power_well_get(dev, power_well);
 
power_domains->domain_use_count[domain]++;
 
mutex_unlock(&power_domains->lock);
}
 
void intel_display_power_put(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
WARN_ON(!power_domains->domain_use_count[domain]);
power_domains->domain_use_count[domain]--;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
__intel_power_well_put(dev, power_well);
 
mutex_unlock(&power_domains->lock);
}
 
static struct i915_power_domains *hsw_pwr;
 
/* Display audio driver power well request */
void i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (WARN_ON(!hsw_pwr))
return;
 
spin_lock_irq(&hsw_pwr->lock);
if (!hsw_pwr->count++ &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, true);
spin_unlock_irq(&hsw_pwr->lock);
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
 
5361,58 → 5351,100
/* Display audio driver power well release */
void i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (WARN_ON(!hsw_pwr))
return;
 
spin_lock_irq(&hsw_pwr->lock);
WARN_ON(!hsw_pwr->count);
if (!--hsw_pwr->count &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, false);
spin_unlock_irq(&hsw_pwr->lock);
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
 
int i915_init_power_well(struct drm_device *dev)
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
},
};
 
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
},
{
.name = "display",
.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
.is_enabled = hsw_power_well_enabled,
.set = hsw_set_power_well,
},
};
 
static struct i915_power_well bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
},
{
.name = "display",
.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
.is_enabled = hsw_power_well_enabled,
.set = hsw_set_power_well,
},
};
 
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
})
 
int intel_power_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
hsw_pwr = &dev_priv->power_well;
mutex_init(&power_domains->lock);
 
hsw_pwr->device = dev;
spin_lock_init(&hsw_pwr->lock);
hsw_pwr->count = 0;
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
if (IS_HASWELL(dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
 
return 0;
}
 
void i915_remove_power_well(struct drm_device *dev)
void intel_power_domains_remove(struct drm_device *dev)
{
hsw_pwr = NULL;
}
 
void intel_set_power_well(struct drm_device *dev, bool enable)
static void intel_power_domains_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_well *power_well = &dev_priv->power_well;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
if (!HAS_POWER_WELL(dev))
return;
 
if (!i915_disable_power_well && !enable)
return;
 
spin_lock_irq(&power_well->lock);
power_well->i915_request = enable;
 
/* only reject "disable" power well request */
if (power_well->count && !enable) {
spin_unlock_irq(&power_well->lock);
return;
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->set)
power_well->set(dev, power_well, power_well->count > 0);
}
 
__intel_set_power_well(dev, enable);
spin_unlock_irq(&power_well->lock);
mutex_unlock(&power_domains->lock);
}
 
/*
5421,16 → 5453,17
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
void intel_init_power_well(struct drm_device *dev)
void intel_power_domains_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_POWER_WELL(dev))
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev, true);
intel_power_domains_resume(dev);
 
if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
return;
 
/* For now, we need the power well to be always enabled. */
intel_set_power_well(dev, true);
 
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5448,31 → 5481,69
hsw_enable_package_c8(dev_priv);
}
 
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
return;
}
 
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
return;
 
}
 
void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
dev_priv->pm.suspended = false;
 
return;
 
}
 
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
return;
 
}
 
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
if (HAS_FBC(dev)) {
if (INTEL_INFO(dev)->gen >= 7) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->display.enable_fbc =
gen7_enable_fbc;
else
dev_priv->display.enable_fbc =
ironlake_enable_fbc;
dev_priv->display.enable_fbc = gen7_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (INTEL_INFO(dev)->gen >= 5) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else if (IS_CRESTLINE(dev)) {
} else {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
 
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
/* 855GM needs testing */
}
 
/* For cxsr */
5485,56 → 5556,27
if (HAS_PCH_SPLIT(dev)) {
intel_setup_wm_latency(dev);
 
if (IS_GEN5(dev)) {
if (dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] &&
dev_priv->wm.cur_latency[1])
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
if (dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] &&
dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
 
if (IS_GEN5(dev))
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
else if (IS_GEN6(dev))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
if (dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] &&
dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
else if (IS_IVYBRIDGE(dev))
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else if (IS_HASWELL(dev)) {
if (dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] &&
dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = haswell_update_wm;
dev_priv->display.update_sprite_wm =
haswell_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
else if (IS_HASWELL(dev))
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
} else
dev_priv->display.update_wm = NULL;
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
5568,21 → 5610,21
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_I865G(dev)) {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
} else if (IS_GEN2(dev)) {
if (INTEL_INFO(dev)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
} else if (IS_I85X(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i85x_get_fifo_size;
}
 
if (IS_I85X(dev) || IS_I865G(dev))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
else
dev_priv->display.init_clock_gating = i830_init_clock_gating;
} else {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i830_init_clock_gating;
if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
DRM_ERROR("unexpected fall-through in intel_init_pm\n");
}
}
 
5633,68 → 5675,63
return 0;
}
 
int vlv_gpu_freq(int ddr_freq, int val)
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int mult, base;
int div;
 
switch (ddr_freq) {
/* 4 x czclk */
switch (dev_priv->mem_freq) {
case 800:
mult = 20;
base = 120;
div = 10;
break;
case 1066:
mult = 22;
base = 133;
div = 12;
break;
case 1333:
mult = 21;
base = 125;
div = 16;
break;
default:
return -1;
}
 
return ((val - 0xbd) * mult) + base;
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
 
int vlv_freq_opcode(int ddr_freq, int val)
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mult, base;
int mul;
 
switch (ddr_freq) {
/* 4 x czclk */
switch (dev_priv->mem_freq) {
case 800:
mult = 20;
base = 120;
mul = 10;
break;
case 1066:
mult = 22;
base = 133;
mul = 12;
break;
case 1333:
mult = 21;
base = 125;
mul = 16;
break;
default:
return -1;
}
 
val /= mult;
val -= base / mult;
val += 0xbd;
 
if (val > 0xea)
val = 0xea;
 
return val;
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
 
void intel_pm_init(struct drm_device *dev)
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_init(&dev_priv->rps.hw_lock);
 
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
 
INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
}
 
/drivers/video/drm/i915/intel_ringbuffer.c
41,6 → 41,16
return space;
}
 
void __intel_ring_advance(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
ring->tail &= ring->size - 1;
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
 
static int
gen2_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
275,14 → 285,16
if (!ring->fbc_dirty)
return 0;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_advance(ring);
 
ring->fbc_dirty = false;
344,12 → 356,53
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
if (flush_domains)
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
return 0;
}
 
static int
gen8_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
 
flags |= PIPE_CONTROL_CS_STALL;
 
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
}
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
return 0;
 
}
 
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
385,8 → 438,7
int ret = 0;
u32 head;
 
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_get(dev_priv);
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
455,8 → 507,7
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
out:
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_put(dev_priv);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
return ret;
}
555,8 → 606,8
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
if (HAS_L3_DPF(dev))
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 
return ret;
}
589,7 → 640,7
#define MBOX_UPDATE_DWORDS 4
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_NOOP);
}
 
608,26 → 659,29
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *useless;
int i, ret;
int i, ret, num_dwords = 4;
 
ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
MBOX_UPDATE_DWORDS) +
4);
if (i915_semaphore_is_enabled(dev))
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
#undef MBOX_UPDATE_DWORDS
 
if (i915_semaphore_is_enabled(dev)) {
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = ring->signal_mbox[i];
if (mbox_reg != GEN6_NOSYNC)
update_mboxes(ring, mbox_reg);
}
}
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
__intel_ring_advance(ring);
 
return 0;
}
719,7 → 773,7
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
738,9 → 792,9
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
__intel_ring_advance(ring);
 
return 0;
}
912,6 → 966,7
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
 
959,9 → 1014,9
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
__intel_ring_advance(ring);
 
return 0;
}
976,17 → 1031,12
if (!dev->irq_enabled)
return false;
 
/* It looks like we need to prevent the gt from suspending while waiting
* for an notifiy irq, otherwise irqs seem to get lost on at least the
* blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
if (HAS_L3_DPF(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1005,16 → 1055,13
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
if (HAS_L3_DPF(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
gen6_gt_force_wake_put(dev_priv);
}
 
static bool
1055,6 → 1102,52
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static bool
gen8_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_L3_DPF(dev) && ring->id == RCS) {
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
} else {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
}
POSTING_READ(RING_IMR(ring->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void
gen8_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (HAS_L3_DPF(dev) && ring->id == RCS) {
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
} else {
I915_WRITE_IMR(ring, ~0);
}
POSTING_READ(RING_IMR(ring->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
1313,7 → 1406,7
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_ring_idle(ring);
if (ret)
if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
 
1324,6 → 1417,8
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
 
if (ring->cleanup)
ring->cleanup(ring);
1410,6 → 1505,9
if (ret != -ENOSPC)
return ret;
 
/* force the tail write in case we have been skipping them */
__intel_ring_advance(ring);
 
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
1465,7 → 1563,7
int ret;
 
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
if (ring->outstanding_lazy_seqno) {
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
1485,13 → 1583,23
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
if (ring->outstanding_lazy_request)
if (ring->outstanding_lazy_seqno)
return 0;
 
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
 
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
 
ring->preallocated_lazy_request = request;
}
 
static int __intel_ring_begin(struct intel_ring_buffer *ring,
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
 
static int __intel_ring_prepare(struct intel_ring_buffer *ring,
int bytes)
{
int ret;
1508,7 → 1616,6
return ret;
}
 
ring->space -= bytes;
return 0;
}
 
1523,12 → 1630,17
if (ret)
return ret;
 
ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
 
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
 
return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
ring->space -= num_dwords * sizeof(uint32_t);
return 0;
}
 
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1535,7 → 1647,7
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
BUG_ON(ring->outstanding_lazy_request);
BUG_ON(ring->outstanding_lazy_seqno);
 
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1548,17 → 1660,6
ring->hangcheck.seqno = seqno;
}
 
void intel_ring_advance(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
ring->tail &= ring->size - 1;
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
 
 
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
1603,6 → 1704,8
return ret;
 
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
1614,13 → 1717,42
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
intel_ring_advance(ring);
return 0;
}
 
static int
gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
unsigned flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
!(flags & I915_DISPATCH_SECURE);
int ret;
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
intel_ring_emit(ring, offset);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
return 0;
}
 
static int
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
unsigned flags)
1676,6 → 1808,8
return ret;
 
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
1687,11 → 1821,16
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
intel_ring_advance(ring);
 
if (IS_GEN7(dev) && flush)
if (IS_GEN7(dev) && !invalidate && flush)
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 
return 0;
1711,8 → 1850,14
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
if (INTEL_INFO(dev)->gen >= 8) {
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
} else {
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
}
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
1754,6 → 1899,8
ring->write_tail = ring_write_tail;
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
1869,7 → 2016,7
ring->id = VCS;
 
ring->write_tail = ring_write_tail;
if (IS_GEN6(dev) || IS_GEN7(dev)) {
if (INTEL_INFO(dev)->gen >= 6) {
ring->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
1878,10 → 2025,20
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
} else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
}
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
1927,10 → 2084,18
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
} else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
}
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
1959,10 → 2124,19
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
 
if (INTEL_INFO(dev)->gen >= 8) {
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
} else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
}
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
/drivers/video/drm/i915/intel_ringbuffer.h
34,6 → 34,7
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_KICK,
140,7 → 141,8
/**
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
struct drm_i915_gem_request *preallocated_lazy_request;
u32 outstanding_lazy_seqno;
bool gpu_caches_dirty;
bool fbc_dirty;
 
237,7 → 239,12
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
void intel_ring_advance(struct intel_ring_buffer *ring);
static inline void intel_ring_advance(struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
}
void __intel_ring_advance(struct intel_ring_buffer *ring);
 
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
258,8 → 265,8
 
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
BUG_ON(ring->outstanding_lazy_request == 0);
return ring->outstanding_lazy_request;
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
 
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
/drivers/video/drm/i915/intel_sdvo.c
413,25 → 413,36
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
int i;
int i, pos = 0;
#define BUF_LEN 256
char buffer[BUF_LEN];
 
DRM_DEBUG_KMS("%s: W: %02X ",
SDVO_NAME(intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
DRM_LOG_KMS(" ");
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
 
 
for (i = 0; i < args_len; i++) {
BUF_PRINT("%02X ", ((u8 *)args)[i]);
}
for (; i < 8; i++) {
BUF_PRINT(" ");
}
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == ARRAY_SIZE(sdvo_cmd_names))
DRM_LOG_KMS("(%02X)", cmd);
DRM_LOG_KMS("\n");
if (i == ARRAY_SIZE(sdvo_cmd_names)) {
BUF_PRINT("(%02X)", cmd);
}
BUG_ON(pos >= BUF_LEN - 1);
#undef BUF_PRINT
#undef BUF_LEN
 
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
 
static const char *cmd_status_names[] = {
"Power on",
"Success",
512,9 → 523,10
{
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i;
int i, pos = 0;
#define BUF_LEN 256
char buffer[BUF_LEN];
 
DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 
/*
* The documentation states that all commands will be
551,10 → 563,13
goto log_fail;
}
 
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
 
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
DRM_LOG_KMS("(%s)", cmd_status_names[status]);
BUF_PRINT("(%s)", cmd_status_names[status]);
else
DRM_LOG_KMS("(??? %d)", status);
BUF_PRINT("(??? %d)", status);
 
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
565,13 → 580,17
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
DRM_LOG_KMS("\n");
BUG_ON(pos >= BUF_LEN - 1);
#undef BUF_PRINT
#undef BUF_LEN
 
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
return true;
 
log_fail:
DRM_LOG_KMS("... failed\n");
DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
return false;
}
 
933,7 → 952,7
 
static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned if_index, uint8_t tx_rate,
uint8_t *data, unsigned length)
const uint8_t *data, unsigned length)
{
uint8_t set_buf_index[2] = { if_index, 0 };
uint8_t hbuf_size, tmp[8];
1068,7 → 1087,7
 
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
{
unsigned dotclock = pipe_config->adjusted_mode.clock;
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
 
/* SDVO TV has fixed PLL values depend on its clock range,
1133,7 → 1152,6
*/
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
adjusted_mode->clock *= pipe_config->pixel_multiplier;
 
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
1217,11 → 1235,7
!intel_sdvo_set_tv_format(intel_sdvo))
return;
 
/* We have tried to get input timing in mode_fixup, and filled into
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
input_dtd.part1.clock /= crtc->config.pixel_multiplier;
 
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1330,6 → 1344,7
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
int dotclock;
u32 flags = 0, sdvox;
u8 val;
bool ret;
1368,6 → 1383,13
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
 
dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
 
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
 
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
&val, 1)) {
1514,7 → 1536,8
intel_modeset_check_state(connector->dev);
}
 
static int intel_sdvo_mode_valid(struct drm_connector *connector,
static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1770,6 → 1793,9
{
struct edid *edid;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
/* set the bus switch and get the modes */
edid = intel_sdvo_get_edid(connector);
 
1865,6 → 1891,9
uint32_t reply = 0, format_map = 0;
int i;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
/* Read the list of supported input resolutions for the selected TV
* format.
*/
1899,6 → 1928,9
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
 
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode.
1930,7 → 1962,6
break;
}
}
 
}
 
static int intel_sdvo_get_modes(struct drm_connector *connector)
1998,7 → 2029,6
intel_sdvo_connector->tv_format);
 
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
2394,7 → 2424,9
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
 
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
DRM_DEBUG_KMS("initialising DVI device %d\n", device);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
 
2442,7 → 2474,9
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
 
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
DRM_DEBUG_KMS("initialising TV type %d\n", type);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
 
2467,6 → 2501,7
return true;
 
err:
drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
return false;
}
2479,7 → 2514,9
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
 
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
DRM_DEBUG_KMS("initialising analog device %d\n", device);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
 
2510,7 → 2547,9
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
 
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
 
2534,6 → 2573,7
return true;
 
err:
drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
return false;
}
2605,10 → 2645,12
 
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base)
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
drm_sysfs_connector_remove(connector);
intel_sdvo_destroy(connector);
}
}
}
 
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
2876,7 → 2918,7
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
 
/drivers/video/drm/i915/intel_sdvo_regs.h
59,7 → 59,7
unsigned int stall_support:1;
unsigned int pad:1;
u16 output_flags;
} __attribute__((packed));
} __packed;
 
/* Note: SDVO detailed timing flags match EDID misc flags. */
#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
94,12 → 94,12
u8 v_sync_off_high;
u8 reserved;
} part2;
} __attribute__((packed));
} __packed;
 
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
} __attribute__((packed));
} __packed;
 
struct intel_sdvo_preferred_input_timing_args {
u16 clock;
108,7 → 108,7
u8 interlace:1;
u8 scaled:1;
u8 pad:6;
} __attribute__((packed));
} __packed;
 
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
162,7 → 162,7
unsigned int input0_trained:1;
unsigned int input1_trained:1;
unsigned int pad:6;
} __attribute__((packed));
} __packed;
 
/** Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
219,7 → 219,7
unsigned int ambient_light_interrupt:1;
unsigned int hdmi_audio_encrypt_change:1;
unsigned int pad:6;
} __attribute__((packed));
} __packed;
 
/**
* Selects which input is affected by future input commands.
232,7 → 232,7
struct intel_sdvo_set_target_input_args {
unsigned int target_1:1;
unsigned int pad:7;
} __attribute__((packed));
} __packed;
 
/**
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
370,7 → 370,7
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:3;
} __attribute__((packed));
} __packed;
 
#define SDVO_CMD_GET_TV_FORMAT 0x28
 
401,7 → 401,7
unsigned int secam_l:1;
unsigned int secam_60:1;
unsigned int pad:5;
} __attribute__((packed));
} __packed;
 
struct intel_sdvo_sdtv_resolution_reply {
unsigned int res_320x200:1;
426,7 → 426,7
unsigned int res_1024x768:1;
unsigned int res_1280x1024:1;
unsigned int pad:5;
} __attribute__((packed));
} __packed;
 
/* Get supported resolution with squire pixel aspect ratio that can be
scaled for the requested HDTV format */
463,7 → 463,7
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:6;
} __attribute__((packed));
} __packed;
 
struct intel_sdvo_hdtv_resolution_reply {
unsigned int res_640x480:1;
517,7 → 517,7
 
unsigned int res_1280x768:1;
unsigned int pad5:7;
} __attribute__((packed));
} __packed;
 
/* Get supported power state returns info for encoder and monitor, rely on
last SetTargetInput and SetTargetOutput calls */
557,13 → 557,13
 
unsigned int t4_high:2;
unsigned int pad:6;
} __attribute__((packed));
} __packed;
 
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
u8 max_value;
u8 default_value;
} __attribute__((packed));
} __packed;
 
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
573,7 → 573,7
u16 trip_low;
u16 trip_high;
u16 value;
} __attribute__((packed));
} __packed;
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
u16 trip_low;
580,7 → 580,7
u16 trip_high;
unsigned int enable:1;
unsigned int pad:7;
} __attribute__((packed));
} __packed;
 
/* Set display power state */
#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
608,7 → 608,7
unsigned int dither:1;
unsigned int tv_chroma_filter:1;
unsigned int tv_luma_filter:1;
} __attribute__((packed));
} __packed;
 
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
630,7 → 630,7
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
} __attribute__((packed));
} __packed;
 
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
671,7 → 671,7
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
} __attribute__((packed));
} __packed;
 
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
727,4 → 727,4
struct intel_sdvo_encode {
u8 dvi_rev;
u8 hdmi_rev;
} __attribute__ ((packed));
} __packed;
/drivers/video/drm/i915/intel_sideband.c
25,7 → 25,10
#include "i915_drv.h"
#include "intel_drv.h"
 
/* IOSF sideband */
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
* VLV_VLV2_PUNIT_HAS_0.8.docx
*/
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
87,6 → 90,22
mutex_unlock(&dev_priv->dpio_lock);
}
 
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
 
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
PUNIT_OPCODE_REG_READ, reg, &val);
 
return val;
}
 
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
 
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
101,19 → 120,74
return val;
}
 
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}
 
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
DPIO_OPCODE_REG_READ, reg, &val);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
 
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}
 
void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
 
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}
 
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
 
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}
 
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}
 
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
{
u32 val = 0;
 
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
 
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_WRITE, reg, &val);
}
 
175,3 → 249,17
return;
}
}
 
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
DPIO_OPCODE_REG_READ, reg, &val);
return val;
}
 
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
DPIO_OPCODE_REG_WRITE, reg, &val);
}
/drivers/video/drm/i915/intel_sprite.c
104,6 → 104,12
break;
}
 
/*
* Enable gamma to match primary/cursor plane behaviour.
* FIXME should be user controllable via propertiesa.
*/
sprctl |= SP_GAMMA_ENABLE;
 
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
 
135,7 → 141,7
 
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
152,7 → 158,7
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
 
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
224,7 → 230,6
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
sprctl = I915_READ(SPRCTL(pipe));
 
257,10 → 262,16
BUG();
}
 
/*
* Enable gamma to match primary/cursor plane behaviour.
* FIXME should be user controllable via propertiesa.
*/
sprctl |= SPRITE_GAMMA_ENABLE;
 
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
 
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
267,7 → 278,7
 
sprctl |= SPRITE_ENABLE;
 
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
279,21 → 290,8
crtc_w--;
crtc_h--;
 
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
dev_priv->sprite_scaling_enabled |= 1 << pipe;
 
if (!scaling_was_enabled) {
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
306,7 → 304,7
 
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
317,13 → 315,9
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe),
I915_WRITE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
 
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
intel_update_watermarks(dev);
}
 
static void
333,7 → 327,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
340,16 → 333,16
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
I915_WRITE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
 
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
intel_wait_for_vblank(dev, pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
 
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
intel_update_watermarks(dev);
}
 
static int
453,6 → 446,12
BUG();
}
 
/*
* Enable gamma to match primary/cursor plane behaviour.
* FIXME should be user controllable via propertiesa.
*/
dvscntr |= DVS_GAMMA_ENABLE;
 
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
 
470,7 → 469,7
crtc_h--;
 
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
490,7 → 489,7
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe),
I915_WRITE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
507,9 → 506,15
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
I915_WRITE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
 
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
intel_wait_for_vblank(dev, pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
 
521,15 → 526,30
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
if (!intel_crtc->primary_disabled)
if (intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_disabled = false;
intel_update_fbc(dev);
intel_crtc->primary_enabled = true;
 
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
* versa.
*/
if (intel_crtc->config.ips_enabled) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
hsw_enable_ips(intel_crtc);
}
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void
intel_disable_primary(struct drm_crtc *crtc)
{
538,13 → 558,26
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
if (intel_crtc->primary_disabled)
if (!intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_enabled = false;
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
intel_disable_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
* versa.
*/
hsw_disable_ips(intel_crtc);
 
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
 
intel_crtc->primary_disabled = true;
intel_update_fbc(dev);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
 
static int
615,6 → 648,15
}
}
 
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
struct drm_intel_sprite_colorkey key;
 
intel_plane->get_colorkey(&intel_plane->base, &key);
 
return key.flags != I915_SET_COLORKEY_NONE;
}
 
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
623,15 → 665,12
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
bool disable_primary = false;
bool visible;
int hscale, vscale;
652,30 → 691,24
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
const struct {
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
} orig = {
.crtc_x = crtc_x,
.crtc_y = crtc_y,
.crtc_w = crtc_w,
.crtc_h = crtc_h,
.src_x = src_x,
.src_y = src_y,
.src_w = src_w,
.src_h = src_h,
};
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
old_obj = intel_plane->obj;
 
intel_plane->crtc_x = crtc_x;
intel_plane->crtc_y = crtc_y;
intel_plane->crtc_w = crtc_w;
intel_plane->crtc_h = crtc_h;
intel_plane->src_x = src_x;
intel_plane->src_y = src_y;
intel_plane->src_w = src_w;
intel_plane->src_h = src_h;
 
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
DRM_DEBUG_KMS("Pipe disabled\n");
return -EINVAL;
}
 
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
809,8 → 842,8
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
disable_primary = drm_rect_equals(&dst, &clip);
WARN_ON(disable_primary && !visible);
disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
WARN_ON(disable_primary && !visible && intel_crtc->active);
 
mutex_lock(&dev->struct_mutex);
 
820,11 → 853,23
* the sprite planes only require 128KiB alignment and 32 PTE padding.
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 
mutex_unlock(&dev->struct_mutex);
 
if (ret)
goto out_unlock;
return ret;
 
intel_plane->crtc_x = orig.crtc_x;
intel_plane->crtc_y = orig.crtc_y;
intel_plane->crtc_w = orig.crtc_w;
intel_plane->crtc_h = orig.crtc_h;
intel_plane->src_x = orig.src_x;
intel_plane->src_y = orig.src_y;
intel_plane->src_w = orig.src_w;
intel_plane->src_h = orig.src_h;
intel_plane->obj = obj;
 
if (intel_crtc->active) {
/*
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
841,6 → 886,7
 
if (disable_primary)
intel_disable_primary(crtc);
}
 
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
850,17 → 896,15
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
if (old_obj != obj) {
mutex_unlock(&dev->struct_mutex);
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
if (old_obj != obj && intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
mutex_lock(&dev->struct_mutex);
}
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
 
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
return 0;
}
 
static int
868,7 → 912,7
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
struct intel_crtc *intel_crtc;
 
if (!plane->fb)
return 0;
876,23 → 920,27
if (WARN_ON(!plane->crtc))
return -EINVAL;
 
intel_crtc = to_intel_crtc(plane->crtc);
 
if (intel_crtc->active) {
intel_enable_primary(plane->crtc);
intel_plane->disable_plane(plane, plane->crtc);
}
 
if (!intel_plane->obj)
goto out;
 
if (intel_plane->obj) {
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_plane->pipe);
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
 
return ret;
intel_plane->obj = NULL;
}
 
return 0;
}
 
static void intel_destroy_plane(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
921,7 → 969,7
 
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out_unlock;
}
 
950,7 → 998,7
 
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -EINVAL;
ret = -ENOENT;
goto out_unlock;
}
 
1034,7 → 1082,7
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
 
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
 
1058,6 → 1106,7
break;
 
case 7:
case 8:
if (IS_IVYBRIDGE(dev)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
/drivers/video/drm/i915/intel_uncore.c
64,7 → 64,8
__raw_posting_read(dev_priv, ECOBUS);
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
89,11 → 90,12
__raw_posting_read(dev_priv, ECOBUS);
}
 
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev))
if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
112,6 → 114,7
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
/* WaRsForcewakeWaitTC0:ivb,hsw */
if (INTEL_INFO(dev_priv->dev)->gen < 8)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
120,12 → 123,12
u32 gtfifodbg;
 
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
"MMIO read or write has been dropped %x\n", gtfifodbg))
__raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
 
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
133,7 → 136,8
gen6_gt_check_fifodbg(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
146,12 → 150,19
{
int ret = 0;
 
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
 
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
170,40 → 181,126
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
 
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_VLV) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Render to ack.\n");
}
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
 
/* WaRsForcewakeWaitTC0:vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
 
}
 
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
 
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
 
}
 
void vlv_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (FORCEWAKE_RENDER & fw_engine) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
}
if (FORCEWAKE_MEDIA & fw_engine) {
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
}
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
void vlv_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (FORCEWAKE_RENDER & fw_engine) {
WARN_ON(dev_priv->uncore.fw_rendercount == 0);
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
}
 
if (FORCEWAKE_MEDIA & fw_engine) {
WARN_ON(dev_priv->uncore.fw_mediacount == 0);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
}
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void gen6_force_wake_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
223,66 → 320,47
 
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
if (IS_HASWELL(dev) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
 
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
} else if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
/* IVB configs may use multi-threaded forcewake */
 
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
 
intel_uncore_forcewake_reset(dev);
}
 
void intel_uncore_sanitize(struct drm_device *dev)
{
intel_uncore_forcewake_reset(dev);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_val;
 
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
 
/* Turn off power gate, require especially for the BIOS less system */
if (IS_VALLEYVIEW(dev)) {
 
mutex_lock(&dev_priv->rps.hw_lock);
reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
 
if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
}
}
 
/*
* Generally this is called implicitly by the register read function. However,
290,13 → 368,22
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
 
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
intel_runtime_pm_get(dev_priv);
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_get(dev_priv, fw_engine);
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
303,21 → 390,33
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
 
if (!dev_priv->uncore.funcs.force_wake_put)
return;
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_put(dev_priv, fw_engine);
 
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv);
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
mod_delayed_work(dev_priv->wq,
&dev_priv->uncore.force_wake_work,
1);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
intel_runtime_pm_put(dev_priv);
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
((reg) < 0x40000 && (reg) != FORCEWAKE)
 
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
331,8 → 430,7
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
342,51 → 440,167
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unclaimed write to %x\n", reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
 
#define __i915_read(x) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
"Device suspended\n");
}
 
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
if (dev_priv->info->gen == 5) \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define REG_READ_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
 
#define __gen4_read(x) \
static u##x \
gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
REG_READ_FOOTER; \
}
 
#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
ilk_dummy_write(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
REG_READ_FOOTER; \
}
 
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv); \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
FORCEWAKE_ALL); \
val = __raw_i915_read##x(dev_priv, reg); \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_put(dev_priv); \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
FORCEWAKE_ALL); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val; \
REG_READ_FOOTER; \
}
 
__i915_read(8)
__i915_read(16)
__i915_read(32)
__i915_read(64)
#undef __i915_read
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
unsigned *fwcount; \
REG_READ_HEADER(x); \
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
fwengine = FORCEWAKE_RENDER; \
fwcount = &dev_priv->uncore.fw_rendercount; \
} \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
fwengine = FORCEWAKE_MEDIA; \
fwcount = &dev_priv->uncore.fw_mediacount; \
} \
if (fwengine != 0) { \
if ((*fwcount)++ == 0) \
(dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
fwengine); \
val = __raw_i915_read##x(dev_priv, reg); \
if (--(*fwcount) == 0) \
(dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
fwengine); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
REG_READ_FOOTER; \
}
 
#define __i915_write(x) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
 
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
__vlv_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen4_read(8)
__gen4_read(16)
__gen4_read(32)
__gen4_read(64)
 
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
#undef __gen4_read
#undef REG_READ_FOOTER
#undef REG_READ_HEADER
 
#define REG_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define REG_WRITE_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 
#define __gen4_write(x) \
static void \
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
REG_WRITE_FOOTER; \
}
 
#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
REG_WRITE_FOOTER; \
}
 
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
REG_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (dev_priv->info->gen == 5) \
ilk_dummy_write(dev_priv); \
assert_device_not_suspended(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
REG_WRITE_FOOTER; \
}
 
#define __hsw_write(x) \
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
REG_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
assert_device_not_suspended(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
393,20 → 607,205
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
REG_WRITE_FOOTER; \
}
__i915_write(8)
__i915_write(16)
__i915_write(32)
__i915_write(64)
#undef __i915_write
 
static const u32 gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
/* TODO: Other registers are not yet used */
};
 
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
if (reg == gen8_shadowed_regs[i])
return true;
 
return false;
}
 
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
if (__needs_put) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
FORCEWAKE_ALL); \
} \
__raw_i915_write##x(dev_priv, reg, val); \
if (__needs_put) { \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
FORCEWAKE_ALL); \
} \
REG_WRITE_FOOTER; \
}
 
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
__gen8_write(64)
__hsw_write(8)
__hsw_write(16)
__hsw_write(32)
__hsw_write(64)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen5_write(64)
__gen4_write(8)
__gen4_write(16)
__gen4_write(32)
__gen4_write(64)
 
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
#undef __gen5_write
#undef __gen4_write
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
 
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
gen6_force_wake_work);
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
/* IVB configs may use multi-threaded forcewake */
 
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
}
 
switch (INTEL_INFO(dev)->gen) {
default:
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
dev_priv->uncore.funcs.mmio_writew = hsw_write16;
dev_priv->uncore.funcs.mmio_writel = hsw_write32;
dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
} else {
dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
dev_priv->uncore.funcs.mmio_writew = gen6_write16;
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
}
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_readb = vlv_read8;
dev_priv->uncore.funcs.mmio_readw = vlv_read16;
dev_priv->uncore.funcs.mmio_readl = vlv_read32;
dev_priv->uncore.funcs.mmio_readq = vlv_read64;
} else {
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
}
break;
case 5:
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
dev_priv->uncore.funcs.mmio_writew = gen5_write16;
dev_priv->uncore.funcs.mmio_writel = gen5_write32;
dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
dev_priv->uncore.funcs.mmio_readb = gen5_read8;
dev_priv->uncore.funcs.mmio_readw = gen5_read16;
dev_priv->uncore.funcs.mmio_readl = gen5_read32;
dev_priv->uncore.funcs.mmio_readq = gen5_read64;
break;
case 4:
case 3:
case 2:
dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
dev_priv->uncore.funcs.mmio_writew = gen4_write16;
dev_priv->uncore.funcs.mmio_writel = gen4_write32;
dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
dev_priv->uncore.funcs.mmio_readb = gen4_read8;
dev_priv->uncore.funcs.mmio_readw = gen4_read16;
dev_priv->uncore.funcs.mmio_readl = gen4_read32;
dev_priv->uncore.funcs.mmio_readq = gen4_read64;
break;
}
}
 
void intel_uncore_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
// flush_delayed_work(&dev_priv->uncore.force_wake_work);
 
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev);
}
 
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
};
 
int i915_reg_read_ioctl(struct drm_device *dev,
447,33 → 846,37
return 0;
}
 
static int i8xx_do_reset(struct drm_device *dev)
int i915_get_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
int ret;
 
if (IS_I85X(dev))
return -ENODEV;
if (args->flags || args->pad)
return -EINVAL;
 
I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
POSTING_READ(D_STATE);
if (args->ctx_id == DEFAULT_CONTEXT_ID)
return -EPERM;
 
if (IS_I830(dev) || IS_845G(dev)) {
I915_WRITE(DEBUG_RESET_I830,
DEBUG_RESET_DISPLAY |
DEBUG_RESET_RENDER |
DEBUG_RESET_FULL);
POSTING_READ(DEBUG_RESET_I830);
msleep(1);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
 
I915_WRITE(DEBUG_RESET_I830, 0);
POSTING_READ(DEBUG_RESET_I830);
hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
if (IS_ERR(hs)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(hs);
}
 
msleep(1);
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
 
I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
POSTING_READ(D_STATE);
args->batch_active = hs->batch_active;
args->batch_pending = hs->batch_pending;
 
mutex_unlock(&dev->struct_mutex);
 
return 0;
}
 
560,12 → 963,12
 
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
else
dev_priv->uncore.funcs.force_wake_put(dev_priv);
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 
/* Restore fifo count */
dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
574,24 → 977,15
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
case 8:
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
case 4: return i965_do_reset(dev);
case 2: return i8xx_do_reset(dev);
default: return -ENODEV;
}
}
 
void intel_uncore_clear_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* XXX needs spinlock around caller's grouping */
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
 
void intel_uncore_check_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/drivers/video/drm/i915/kms_display.c
61,11 → 61,11
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
 
};
 
 
static display_t *os_display;
struct drm_i915_gem_object *main_fb_obj;
 
u32_t cmd_buffer;
u32_t cmd_offset;
83,6 → 83,7
void disable_mouse(void)
{};
 
 
static char *manufacturer_name(unsigned char *x)
{
static char name[4];
106,7 → 107,6
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_i915_gem_object *fb_obj;
struct drm_mode_set set;
const char *con_name;
const char *enc_name;
164,7 → 164,6
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
fb_obj = get_fb_obj();
fb = fb_helper->fb;
 
fb->width = reqmode->width;
172,7 → 171,7
 
if(dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_NONE)
{
fb_obj->tiling_mode = I915_TILING_X;
main_fb_obj->tiling_mode = I915_TILING_X;
 
if(IS_GEN3(dev))
for (stride = 512; stride < reqmode->width * 4; stride <<= 1);
181,7 → 180,7
}
else
{
fb_obj->tiling_mode = I915_TILING_NONE;
main_fb_obj->tiling_mode = I915_TILING_NONE;
stride = ALIGN(reqmode->width * 4, 64);
}
 
190,7 → 189,7
fb->pitches[2] =
fb->pitches[3] = stride;
 
fb_obj->stride = stride;
main_fb_obj->stride = stride;
 
fb->bits_per_pixel = 32;
fb->depth = 24;
199,7 → 198,7
crtc->enabled = true;
os_display->crtc = crtc;
 
i915_gem_object_put_fence(fb_obj);
i915_gem_object_put_fence(main_fb_obj);
 
set.crtc = crtc;
set.x = 0;
931,7 → 930,7
return -EINVAL;
}
 
#if 1
#if 0
if(warn_count < 1000)
{
printf("left %d top %d right %d bottom %d\n",
/drivers/video/drm/i915/kos_gem_fb.c
161,7 → 161,7
return NULL;
}
 
vma = i915_gem_vma_create(obj, ggtt);
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
/drivers/video/drm/i915/main.c
167,8 → 167,8
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("\ni915 v3.12.5 build %s %s\nusage: i915 [options]\n"
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 0 - false)\n",
printf("\ni915 v3.14-rc1 build %s %s\nusage: i915 [options]\n"
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 1 - true)\n",
__DATE__, __TIME__);
printf("-rc6=<-1,0-7> Enable power-saving render C-state 6.\n"
" Different stages can be selected via bitmask values\n"
176,8 → 176,8
" For example, 3 would enable rc6 and deep rc6, and 7 would enable everything.\n"
" default: -1 (use per-chip default)\n");
printf("-fbc=<-1,0,1> Enable frame buffer compression for power savings\n"
" (default: 0 - false)\n");
printf("-ppgt=<0,1> Enable PPGTT (default: 0 - false)\n");
" (default: -1 (use per-chip default))\n");
printf("-ppgt=<0,1> Enable PPGTT (default: true)\n");
printf("-pc8=<0,1> Enable support for low power package C states (PC8+) (default: 0 - false)\n");
printf("-l<path> path to log file\n");
printf("-m<WxHxHz> set videomode\n");