Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5059 → Rev 5060

/drivers/video/drm/i915/intel_dp.c
64,6 → 64,24
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
 
/*
* CHV supports eDP 1.4 that have more link rates.
* Below only provides the fixed rate but exclude variable rate.
*/
static const struct dp_link_dpll chv_dpll[] = {
/*
* CHV requires to program fractional division for m2.
* m2 is stored in fixed point format using formula below
* (m2_int << 22) | m2_fraction
*/
{ DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
{ DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
 
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
91,11 → 109,14
}
 
static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
static int
int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
struct drm_device *dev = intel_dp->attached_connector->base.dev;
 
switch (max_link_bw) {
case DP_LINK_BW_1_62:
102,6 → 123,11
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
INTEL_INFO(dev)->gen >= 8) &&
intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
max_link_bw = DP_LINK_BW_5_4;
else
max_link_bw = DP_LINK_BW_2_7;
break;
default:
113,6 → 139,22
return max_link_bw;
}
 
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
 
source_max = 4;
if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
(intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
source_max = 2;
 
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 
return min(source_max, sink_max);
}
 
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
163,7 → 205,7
}
 
max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
max_lanes = intel_dp_max_lane_count(intel_dp);
 
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
294,7 → 336,8
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
 
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
302,12 → 345,17
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
 
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
 
return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
power_domain = intel_display_port_power_domain(intel_encoder);
return intel_display_power_enabled(dev_priv, power_domain) &&
(I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
 
static void
319,7 → 367,7
if (!is_edp(intel_dp))
return;
 
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(_pp_stat_reg(intel_dp)),
351,31 → 399,46
return status;
}
 
static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
int index)
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
/*
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
if (IS_VALLEYVIEW(dev)) {
return index ? 0 : 100;
} else if (intel_dig_port->port == PORT_A) {
return index ? 0 : intel_hrawclk(dev) / 2;
}
 
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
 
if (index)
return 0;
if (HAS_DDI(dev))
return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
else if (IS_GEN6(dev) || IS_GEN7(dev))
 
if (intel_dig_port->port == PORT_A) {
if (IS_GEN6(dev) || IS_GEN7(dev))
return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
return 225; /* eDP input clock at 450Mhz */
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
}
}
 
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
383,13 → 446,46
case 1: return 72;
default: return 0;
}
} else if (HAS_PCH_SPLIT(dev)) {
} else {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
return index ? 0 :intel_hrawclk(dev) / 2;
}
}
 
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
return index ? 0 : 100;
}
 
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
uint32_t precharge, timeout;
 
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
 
if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
 
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
403,28 → 499,19
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
bool has_aux_irq = true;
uint32_t timeout;
int try, clock = 0;
bool has_aux_irq = HAS_AUX_IRQ(dev);
bool vdd;
 
vdd = _edp_panel_vdd_on(intel_dp);
 
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
// pm_qos_update_request(&dev_priv->pm_qos, 0);
 
intel_dp_check_edp(intel_dp);
 
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
 
if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 
intel_aux_display_runtime_get(dev_priv);
 
/* Try to wait for any previous AUX channel activity */
448,7 → 535,12
goto out;
}
 
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
has_aux_irq,
send_bytes,
aux_clock_divider);
 
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
457,16 → 549,7
pack_aux(send + i, send_bytes - i));
 
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
timeout |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
I915_WRITE(ch_ctl, send_ctl);
 
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
 
525,236 → 608,146
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_aux_display_runtime_put(dev_priv);
 
if (vdd)
edp_panel_vdd_off(intel_dp, false);
 
return ret;
}
 
/* Write data to the aux channel in native mode */
static int
intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint16_t address, uint8_t *send, int send_bytes)
#define BARE_ADDRESS_SIZE 3
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
uint8_t txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
 
if (WARN_ON(send_bytes > 16))
txbuf[0] = msg->request << 4;
txbuf[1] = msg->address >> 8;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
 
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 1;
 
if (WARN_ON(txsize > 20))
return -E2BIG;
 
intel_dp_check_edp(intel_dp);
msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
return send_bytes;
}
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
 
/* Write a single byte to the aux channel in native mode */
static int
intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
uint16_t address, uint8_t byte)
{
return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
 
/* Return payload size. */
ret = msg->size;
}
break;
 
/* read bytes from a native aux channel */
static int
intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
int msg_bytes;
uint8_t reply[20];
int reply_bytes;
uint8_t ack;
int ret;
case DP_AUX_NATIVE_READ:
case DP_AUX_I2C_READ:
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
 
if (WARN_ON(recv_bytes > 19))
if (WARN_ON(rxsize > 20))
return -E2BIG;
 
intel_dp_check_edp(intel_dp);
msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
/*
* Assume happy day, and copy the data. The caller is
* expected to check msg->reply before touching it.
*
* Return payload size.
*/
ret--;
memcpy(msg->buffer, rxbuf + 1, ret);
}
break;
 
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
default:
ret = -EINVAL;
break;
}
 
for (;;) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
ack = reply[0] >> 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
}
 
static int
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp *intel_dp = container_of(adapter,
struct intel_dp,
adapter);
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
const char *name = NULL;
int ret;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = DP_AUX_I2C_READ << 4;
else
msg[0] = DP_AUX_I2C_WRITE << 4;
 
if (!(mode & MODE_I2C_STOP))
msg[0] |= DP_AUX_I2C_MOT << 4;
 
msg[1] = address >> 8;
msg[2] = address;
 
switch (mode) {
case MODE_I2C_WRITE:
msg[3] = 0;
msg[4] = write_byte;
msg_bytes = 5;
reply_bytes = 1;
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
name = "DPDDC-A";
break;
case MODE_I2C_READ:
msg[3] = 0;
msg_bytes = 4;
reply_bytes = 2;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
name = "DPDDC-B";
break;
default:
msg_bytes = 3;
reply_bytes = 1;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
name = "DPDDC-C";
break;
}
 
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
* required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
*/
for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
goto out;
}
 
switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
ret = -EREMOTEIO;
goto out;
case DP_AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
* could check the DPCD for I2C bit rate capabilities,
* and if available, adjust the interval. We could also
* be more careful with DP-to-Legacy adapters where a
* long legacy cable may force very low I2C bit rates.
*/
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
ret = -EREMOTEIO;
goto out;
BUG();
}
 
switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
if (!HAS_DDI(dev))
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
intel_dp->aux.name = name;
intel_dp->aux.dev = dev->dev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
name, ret);
return;
}
ret = reply_bytes - 1;
goto out;
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
ret = -EREMOTEIO;
goto out;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
ret = -EREMOTEIO;
goto out;
}
}
 
DRM_ERROR("too many retries, giving up\n");
ret = -EREMOTEIO;
static void
intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 
out:
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
intel_connector_unregister(intel_connector);
}
 
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
int ret;
 
DRM_DEBUG_KMS("i2c_init %s\n", name);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
 
memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
return ret;
switch (link_bw) {
case DP_LINK_BW_1_62:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
}
}
 
static void
intel_dp_set_clock(struct intel_encoder *encoder,
767,11 → 760,12
if (IS_G4X(dev)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (IS_CHERRYVIEW(dev)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
} else if (IS_VALLEYVIEW(dev)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
788,6 → 782,20
}
}
 
static void
intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
}
 
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
800,10 → 808,13
struct intel_crtc *intel_crtc = encoder->new_crtc;
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int min_lane_count = 1;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
 
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
810,6 → 821,7
pipe_config->has_pch_encoder = true;
 
pipe_config->has_dp_encoder = true;
pipe_config->has_audio = intel_dp->has_audio;
 
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
833,19 → 845,38
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
dev_priv->vbt.edp_bpp < bpp) {
if (is_edp(intel_dp)) {
if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = dev_priv->vbt.edp_bpp;
}
 
if (IS_BROADWELL(dev)) {
/* Yes, it's an ugly hack. */
min_lane_count = max_lane_count;
DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
min_lane_count);
} else if (dev_priv->vbt.edp_lanes) {
min_lane_count = min(dev_priv->vbt.edp_lanes,
max_lane_count);
DRM_DEBUG_KMS("using min %u lanes per VBT\n",
min_lane_count);
}
 
if (dev_priv->vbt.edp_rate) {
min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
bws[min_clock]);
}
}
 
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
 
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = min_clock; clock <= max_clock; clock++) {
for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
891,6 → 922,17
pipe_config->port_clock,
&pipe_config->dp_m_n);
 
if (intel_connector->panel.downclock_mode != NULL &&
intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2);
}
 
if (HAS_DDI(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
 
return true;
926,7 → 968,7
udelay(500);
}
 
static void intel_dp_mode_set(struct intel_encoder *encoder)
static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
961,7 → 1003,7
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
 
if (intel_dp->has_audio) {
if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
994,26 → 1036,27
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
if (!IS_CHERRYVIEW(dev)) {
if (crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
} else {
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
 
if (port == PORT_A && !IS_VALLEYVIEW(dev))
ironlake_set_pll_cpu_edp(intel_dp);
}
 
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
 
#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
 
#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
 
static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
1038,25 → 1081,42
DRM_DEBUG_KMS("Wait complete\n");
}
 
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
static void wait_panel_on(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power on\n");
ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
 
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
static void wait_panel_off(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power off time\n");
ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
 
static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power cycle\n");
ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 
/* When we disable the VDD override bit last we have to do the manual
* wait. */
wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
intel_dp->panel_power_cycle_delay);
 
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
 
static void wait_backlight_on(struct intel_dp *intel_dp)
{
wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
intel_dp->backlight_on_delay);
}
 
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
{
wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
intel_dp->backlight_off_delay);
}
 
/* Read the current pp_control value, unlocking the register if it
* is locked
*/
1073,30 → 1133,32
return control;
}
 
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
 
if (!is_edp(intel_dp))
return;
return false;
 
WARN(intel_dp->want_panel_vdd,
"eDP VDD already requested on\n");
 
intel_dp->want_panel_vdd = true;
 
if (ironlake_edp_have_panel_vdd(intel_dp))
return;
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
 
intel_runtime_pm_get(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
 
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
1111,22 → 1173,38
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!ironlake_edp_have_panel_power(intel_dp)) {
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
msleep(intel_dp->panel_power_up_delay);
}
 
return need_to_disable;
}
 
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
if (is_edp(intel_dp)) {
bool vdd = _edp_panel_vdd_on(intel_dp);
 
WARN(!vdd, "eDP VDD already requested on\n");
}
}
 
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
 
DRM_DEBUG_KMS("Turning eDP VDD off\n");
 
pp = ironlake_get_pp_control(intel_dp);
1143,25 → 1221,39
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
if ((pp & POWER_TARGET_ON) == 0)
msleep(intel_dp->panel_power_cycle_delay);
intel_dp->last_power_cycle = jiffies;
 
intel_runtime_pm_put(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
}
 
static void ironlake_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
 
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
unsigned long delay;
 
/*
* Queue the timer to fire a long time from now (relative to the power
* down delay) to keep the panel power up across a sequence of
* operations.
*/
delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
}
 
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
 
1169,20 → 1261,13
 
intel_dp->want_panel_vdd = false;
 
if (sync) {
ironlake_panel_vdd_off_sync(intel_dp);
} else {
/*
* Queue the timer to fire a long
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
schedule_delayed_work(&intel_dp->panel_vdd_work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
if (sync)
edp_panel_vdd_off_sync(intel_dp);
else
edp_panel_vdd_schedule_off(intel_dp);
}
}
 
void ironlake_edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
1194,12 → 1279,12
 
DRM_DEBUG_KMS("Turn eDP power on\n");
 
if (ironlake_edp_have_panel_power(intel_dp)) {
if (edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
return;
}
 
ironlake_wait_panel_power_cycle(intel_dp);
wait_panel_power_cycle(intel_dp);
 
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
1217,7 → 1302,8
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
ironlake_wait_panel_on(intel_dp);
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
 
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1226,10 → 1312,13
}
}
 
void ironlake_edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_ctrl_reg;
 
1238,20 → 1327,30
 
DRM_DEBUG_KMS("Turn eDP power off\n");
 
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
EDP_BLC_ENABLE);
 
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
intel_dp->want_panel_vdd = false;
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
ironlake_wait_panel_off(intel_dp);
intel_dp->last_power_cycle = jiffies;
wait_panel_off(intel_dp);
 
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
 
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
1263,6 → 1362,9
return;
 
DRM_DEBUG_KMS("\n");
 
intel_panel_enable_backlight(intel_dp->attached_connector);
 
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
1269,7 → 1371,7
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
msleep(intel_dp->backlight_on_delay);
wait_backlight_on(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
 
1277,11 → 1379,9
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
intel_panel_enable_backlight(intel_dp->attached_connector);
}
 
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
1291,8 → 1391,6
if (!is_edp(intel_dp))
return;
 
intel_panel_disable_backlight(intel_dp->attached_connector);
 
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
1301,7 → 1399,11
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
msleep(intel_dp->backlight_off_delay);
intel_dp->last_backlight_off = jiffies;
 
edp_wait_backlight_off(intel_dp);
 
intel_panel_disable_backlight(intel_dp->attached_connector);
}
 
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1365,7 → 1467,7
return;
 
if (mode != DRM_MODE_DPMS_ON) {
ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
if (ret != 1)
DRM_DEBUG_DRIVER("failed to write sink power state\n");
1375,8 → 1477,7
* time to wake up.
*/
for (i = 0; i < 3; i++) {
ret = intel_dp_aux_native_write_1(intel_dp,
DP_SET_POWER,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D0);
if (ret == 1)
break;
1392,13 → 1493,22
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp = I915_READ(intel_dp->output_reg);
enum intel_display_power_domain power_domain;
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(intel_dp->output_reg);
 
if (!(tmp & DP_PORT_EN))
return false;
 
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
1446,8 → 1556,11
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int dotclock;
 
tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_AUDIO_OUTPUT_ENABLE)
pipe_config->has_audio = true;
 
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
1512,11 → 1625,9
}
}
 
static bool is_edp_psr(struct drm_device *dev)
static bool is_edp_psr(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return dev_priv->psr.sink_support;
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
 
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1564,9 → 1675,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
 
if (intel_dp->psr_setup_done)
return;
 
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
1578,27 → 1686,30
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
intel_dp->psr_setup_done = true;
}
 
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
bool only_standby = false;
 
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
DP_PSR_ENABLE &
~DP_PSR_MAIN_LINK_ACTIVE);
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
DP_PSR_ENABLE |
DP_PSR_MAIN_LINK_ACTIVE);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 
/* Setup AUX registers */
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1612,23 → 1723,29
 
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
 
I915_WRITE(EDP_PSR_CTL(dev), val |
IS_BROADWELL(dev) ? 0 : link_entry_time |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
1641,51 → 1758,27
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
dev_priv->psr.source_ok = false;
 
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return false;
}
 
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
(dig_port->port != PORT_A)) {
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
 
if (!i915_enable_psr) {
if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
 
crtc = dig_port->base.base.crtc;
if (crtc == NULL) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
return false;
}
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
 
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc_active(crtc)) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
return false;
}
 
obj = to_intel_framebuffer(crtc->fb)->obj;
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
return false;
}
 
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
return false;
}
 
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1697,6 → 1790,7
return false;
}
 
out:
dev_priv->psr.source_ok = true;
return true;
}
1703,39 → 1797,67
 
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!intel_edp_psr_match_conditions(intel_dp) ||
intel_edp_is_psr_enabled(dev))
return;
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
 
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
 
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
 
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
 
dev_priv->psr.active = true;
}
 
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (intel_edp_psr_match_conditions(intel_dp) &&
!intel_edp_is_psr_enabled(dev))
intel_edp_psr_do_enable(intel_dp);
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
 
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return;
}
 
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
dev_priv->psr.busy_frontbuffer_bits = 0;
 
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
 
if (intel_edp_psr_match_conditions(intel_dp))
dev_priv->psr.enabled = intel_dp;
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!intel_edp_is_psr_enabled(dev))
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
1743,28 → 1865,124
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
 
void intel_edp_psr_update(struct drm_device *dev)
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
 
cancel_delayed_work_sync(&dev_priv->psr.work);
}
 
static void intel_edp_psr_work(struct work_struct *work)
{
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
 
if (!is_edp_psr(dev))
return;
if (!intel_dp)
goto unlock;
 
if (!intel_edp_psr_match_conditions(intel_dp))
intel_edp_psr_disable(intel_dp);
else
if (!intel_edp_is_psr_enabled(dev))
/*
* The delayed work can race with an invalidate hence we need to
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
 
intel_edp_psr_do_enable(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
 
static void intel_edp_psr_do_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
 
WARN_ON(!(val & EDP_PSR_ENABLE));
 
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
 
dev_priv->psr.active = false;
}
 
}
 
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
intel_edp_psr_do_exit(dev);
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_edp_psr_do_exit(dev);
 
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
mutex_init(&dev_priv->psr.lock);
}
 
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1773,9 → 1991,10
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_backlight_off(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
intel_edp_panel_off(intel_dp);
 
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1782,19 → 2001,61
intel_dp_link_down(intel_dp);
}
 
static void intel_post_disable_dp(struct intel_encoder *encoder)
static void g4x_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
 
if (port == PORT_A || IS_VALLEYVIEW(dev)) {
if (port != PORT_A)
return;
 
intel_dp_link_down(intel_dp);
if (!IS_VALLEYVIEW(dev))
ironlake_edp_pll_off(intel_dp);
}
 
static void vlv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
intel_dp_link_down(intel_dp);
}
 
static void chv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
intel_dp_link_down(intel_dp);
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1805,11 → 2066,11
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
1819,7 → 2080,7
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
intel_enable_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
intel_edp_backlight_on(intel_dp);
}
 
static void vlv_enable_dp(struct intel_encoder *encoder)
1826,7 → 2087,7
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
ironlake_edp_backlight_on(intel_dp);
intel_edp_backlight_on(intel_dp);
}
 
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1834,9 → 2095,14
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 
if (dport->port == PORT_A)
intel_dp_prepare(encoder);
 
/* Only ilk+ has port A */
if (dport->port == PORT_A) {
ironlake_set_pll_cpu_edp(intel_dp);
ironlake_edp_pll_on(intel_dp);
}
}
 
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
1865,10 → 2131,12
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (is_edp(intel_dp)) {
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
}
 
intel_enable_dp(encoder);
 
1885,6 → 2153,8
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
intel_dp_prepare(encoder);
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1903,29 → 2173,155
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq power_seq;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
/* Program Tx lane latency optimal setting*/
for (i = 0; i < 4; i++) {
/* Set the latency optimal bit */
data = (i == 1) ? 0x0 : 0x6;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
data << DPIO_FRC_LATENCY_SHFIT);
 
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
 
/* Data lane stagger programming */
/* FIXME: Fix up value only after power analysis */
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (is_edp(intel_dp)) {
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
}
 
intel_enable_dp(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
}
 
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
 
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
 
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
*
* Sinks are *supposed* to come up within 1ms from an off state, but we're also
* supposed to retry 3 times per the spec.
*/
static bool
intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
uint8_t *recv, int recv_bytes)
static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
int ret, i;
ssize_t ret;
int i;
 
/*
* Sinks are *supposed* to come up within 1ms from an off state,
* but we're also supposed to retry 3 times per the spec.
*/
for (i = 0; i < 3; i++) {
ret = intel_dp_aux_native_read(intel_dp, address, recv,
recv_bytes);
if (ret == recv_bytes)
return true;
ret = drm_dp_dpcd_read(aux, offset, buffer, size);
if (ret == size)
return ret;
msleep(1);
}
 
return false;
return ret;
}
 
/*
1935,17 → 2331,13
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_aux_native_read_retry(intel_dp,
return intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_LANE0_1_STATUS,
link_status,
DP_LINK_STATUS_SIZE);
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
 
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
 
/* These are source-specific values. */
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
1952,7 → 2344,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
1968,20 → 2360,9
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_BROADWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_HASWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
2128,6 → 2509,166
return 0;
}
 
static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
u32 deemph_reg_value, margin_reg_value, val;
uint8_t train_set = intel_dp->train_set[0];
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
int i;
 
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 128;
margin_reg_value = 52;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
deemph_reg_value = 128;
margin_reg_value = 77;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
deemph_reg_value = 128;
margin_reg_value = 102;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
deemph_reg_value = 128;
margin_reg_value = 154;
/* FIXME extra to set for 1200 */
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 85;
margin_reg_value = 78;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
deemph_reg_value = 85;
margin_reg_value = 116;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
deemph_reg_value = 85;
margin_reg_value = 154;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_6:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 64;
margin_reg_value = 104;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
deemph_reg_value = 64;
margin_reg_value = 154;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 43;
margin_reg_value = 154;
break;
default:
return 0;
}
break;
default:
return 0;
}
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* Program swing deemph */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
}
 
/* Program swing margin */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
/* Disable unique transition scale */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
== DP_TRAIN_PRE_EMPHASIS_0) &&
((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
== DP_TRAIN_VOLTAGE_SWING_1200)) {
 
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
}
 
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* LRC Bypass */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
 
mutex_unlock(&dev_priv->dpio_lock);
 
return 0;
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
2291,41 → 2832,6
}
}
 
static uint32_t
intel_bdw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
 
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
 
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
 
case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
 
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2336,12 → 2842,12
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
 
if (IS_BROADWELL(dev)) {
signal_levels = intel_bdw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
signal_levels = intel_chv_signal_levels(intel_dp);
mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
2452,7 → 2958,7
len = intel_dp->lane_count + 1;
}
 
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len);
 
return ret == len;
2482,9 → 2988,8
I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
 
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
intel_dp->train_set,
intel_dp->lane_count);
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->lane_count);
 
return ret == intel_dp->lane_count;
}
2540,11 → 3045,11
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
DP |= DP_PORT_EN;
 
2617,10 → 3122,15
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 
/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
training_pattern = DP_TRAINING_PATTERN_3;
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
2647,7 → 3157,7
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
2663,7 → 3173,7
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
2704,22 → 3214,7
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
 
/*
* DDI code has a strict mode set sequence and we should try to respect
* it, otherwise we might hang the machine in many different ways. So we
* really should be disabling the port only on a complete crtc_disable
* sequence. This function is just called under two conditions on DDI
* code:
* - Link train failed while doing crtc_enable, and on this case we
* really should respect the mode set sequence and wait for a
* crtc_disable.
* - Someone turned the monitor off and intel_dp_check_link_status
* called us. We don't need to disable the whole port on this case, so
* when someone turns the monitor on again,
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
if (HAS_DDI(dev))
if (WARN_ON(HAS_DDI(dev)))
return;
 
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2736,9 → 3231,6
}
POSTING_READ(intel_dp->output_reg);
 
/* We don't really know why we're doing this */
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2782,8 → 3274,8
 
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0)
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
 
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2796,7 → 3288,7
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) {
intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2805,6 → 3297,14
}
}
 
/* Training Pattern 3 support */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported");
} else
intel_dp->use_tps3 = false;
 
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
2812,9 → 3312,9
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
intel_dp->downstream_ports,
DP_MAX_DOWNSTREAM_PORTS) == 0)
DP_MAX_DOWNSTREAM_PORTS) < 0)
return false; /* downstream port status fetch failed */
 
return true;
2828,28 → 3328,92
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
ironlake_edp_panel_vdd_off(intel_dp, false);
edp_panel_vdd_off(intel_dp, false);
}
 
static bool
intel_dp_probe_mst(struct intel_dp *intel_dp)
{
u8 buf[1];
 
if (!intel_dp->can_mst)
return false;
 
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
 
_edp_panel_vdd_on(intel_dp);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
intel_dp->is_mst = true;
} else {
DRM_DEBUG_KMS("Sink is not MST capable\n");
intel_dp->is_mst = false;
}
}
edp_panel_vdd_off(intel_dp, false);
 
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
return intel_dp->is_mst;
}
 
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
u8 buf[1];
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
return -EAGAIN;
 
if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
DP_TEST_SINK_START) < 0)
return -EAGAIN;
 
/* Wait 2 vblanks to be sure we will have the correct CRC value */
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EAGAIN;
 
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
return 0;
}
 
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
return intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1) == 1;
}
 
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
int ret;
 
ret = intel_dp_aux_native_read_retry(intel_dp,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1);
if (!ret)
ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_SINK_COUNT_ESI,
sink_irq_vector, 14);
if (ret != 14)
return false;
 
return true;
2859,9 → 3423,66
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
 
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
bool bret;
 
if (intel_dp->is_mst) {
u8 esi[16] = { 0 };
int ret = 0;
int retry;
bool handled;
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
if (bret == true) {
 
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
 
DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
 
if (handled) {
for (retry = 0; retry < 3; retry++) {
int wret;
wret = drm_dp_dpcd_write(&intel_dp->aux,
DP_SINK_COUNT_ESI+1,
&esi[1], 3);
if (wret == 3) {
break;
}
}
 
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
goto go_again;
}
} else
ret = 0;
 
return ret;
} else {
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
/* send a hotplug event */
drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
}
}
return -EINVAL;
}
 
/*
* According to DP spec
* 5.1.2:
2870,14 → 3491,16
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
 
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!intel_encoder->connectors_active)
return;
 
2884,6 → 3507,9
if (WARN_ON(!intel_encoder->base.crtc))
return;
 
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
 
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
return;
2898,7 → 3524,7
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
/* Clear interrupt source */
intel_dp_aux_native_write_1(intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector);
 
2910,7 → 3536,7
 
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_encoder->base));
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
2935,15 → 3561,17
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
 
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
&reg, 1) < 0)
return connector_status_unknown;
 
return DP_GET_SINK_COUNT(reg) ? connector_status_connected
: connector_status_disconnected;
}
 
/* If no HPD, poke DDC gently */
if (drm_probe_ddc(&intel_dp->adapter))
if (drm_probe_ddc(&intel_dp->aux.ddc))
return connector_status_connected;
 
/* Well we tried, say unknown for unreliable port types */
3085,13 → 3713,24
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
bool ret;
 
intel_runtime_pm_get(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
if (intel_dp->is_mst) {
/* MST devices are disconnected from a monitor POV */
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_disconnected;
goto out;
}
 
intel_dp->has_audio = false;
 
if (HAS_PCH_SPLIT(dev))
3104,10 → 3743,20
 
intel_dp_probe_oui(intel_dp);
 
ret = intel_dp_probe_mst(intel_dp);
if (ret) {
/* if we are in MST mode then this connector
won't appear connected or have anything with EDID on it */
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_disconnected;
goto out;
}
 
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
3119,7 → 3768,7
status = connector_status_connected;
 
out:
intel_runtime_pm_put(dev_priv);
intel_display_power_put(dev_priv, power_domain);
return status;
}
 
3126,14 → 3775,22
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
 
/* We should parse the EDID data and find out if it has an audio sink
*/
 
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
intel_display_power_put(dev_priv, power_domain);
if (ret)
return ret;
 
3154,15 → 3811,25
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
 
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
 
intel_display_power_put(dev_priv, power_domain);
 
return has_audio;
}
 
3277,17 → 3944,33
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
i2c_del_adapter(&intel_dp->adapter);
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
kfree(intel_dig_port);
}
 
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
if (!is_edp(intel_dp))
return;
 
edp_panel_vdd_off_sync(intel_dp);
}
 
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
}
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
3303,17 → 3986,79
};
 
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.reset = intel_dp_encoder_reset,
.destroy = intel_dp_encoder_destroy,
};
 
static void
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
return;
}
 
bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
bool ret = true;
 
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
long_hpd ? "long" : "short");
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
if (long_hpd) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
 
if (!intel_dp_get_dpcd(intel_dp)) {
goto mst_fail;
}
 
intel_dp_probe_oui(intel_dp);
 
if (!intel_dp_probe_mst(intel_dp))
goto mst_fail;
 
} else {
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
goto mst_fail;
}
 
if (!intel_dp->is_mst) {
/*
* we'll check the link status via the normal hot plug path later -
* but for short hpds we should check it now
*/
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
ret = false;
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
put_power:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
/* Return which DP Port should be selected for Transcoder DP control */
int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
3362,7 → 4107,7
return false;
}
 
static void
void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
3381,6 → 4126,13
}
}
 
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
{
intel_dp->last_power_cycle = jiffies;
intel_dp->last_power_on = jiffies;
intel_dp->last_backlight_off = jiffies;
}
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
3503,10 → 4255,17
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
 
/* And finally store the new values in the power sequencer. */
/*
* And finally store the new values in the power sequencer. The
* backlight delays are set to 1 because we do manual waits on them. For
* T8, even BSpec recommends doing it. For T9, if we don't do this,
* we'll end up waiting for the backlight off delay twice: once when we
* do the manual sleep, and once when we disable the panel and wait for
* the PP_STATUS bit to become zero.
*/
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
3540,28 → 4299,187
I915_READ(pp_div_reg));
}
 
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
struct intel_crtc_config *config = NULL;
struct intel_crtc *intel_crtc = NULL;
struct intel_connector *intel_connector = dev_priv->drrs.connector;
u32 reg, val;
enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
if (refresh_rate <= 0) {
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
return;
}
 
if (intel_connector == NULL) {
DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
return;
}
 
/*
* FIXME: This needs proper synchronization with psr state. But really
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
 
encoder = intel_attached_encoder(&intel_connector->base);
intel_dp = enc_to_intel_dp(&encoder->base);
intel_crtc = encoder->new_crtc;
 
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
return;
}
 
config = &intel_crtc->config;
 
if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
}
 
if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
index = DRRS_LOW_RR;
 
if (index == intel_dp->drrs_state.refresh_rate_type) {
DRM_DEBUG_KMS(
"DRRS requested for previously set RR...ignoring\n");
return;
}
 
if (!intel_crtc->active) {
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
return;
}
 
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
reg = PIPECONF(intel_crtc->config.cpu_transcoder);
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
} else {
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
I915_WRITE(reg, val);
}
 
/*
* mutex taken to ensure that there is no race between differnt
* drrs calls trying to update refresh rate. This scenario may occur
* in future when idleness detection based DRRS in kernel and
* possible calls from user space to set differnt RR are made.
*/
 
mutex_lock(&intel_dp->drrs_state.mutex);
 
intel_dp->drrs_state.refresh_rate_type = index;
 
mutex_unlock(&intel_dp->drrs_state.mutex);
 
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}
 
static struct drm_display_mode *
intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
 
if (INTEL_INFO(dev)->gen <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
 
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
DRM_INFO("VBT doesn't support DRRS\n");
return NULL;
}
 
downclock_mode = intel_find_panel_downclock
(dev, fixed_mode, connector);
 
if (!downclock_mode) {
DRM_INFO("DRRS not supported\n");
return NULL;
}
 
dev_priv->drrs.connector = intel_connector;
 
mutex_init(&intel_dp->drrs_state.mutex);
 
intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
 
intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
DRM_INFO("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
 
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp;
enum intel_display_power_domain power_domain;
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(&intel_encoder->base);
if (!edp_have_panel_vdd(intel_dp))
return;
/*
* The VDD bit needs a power domain reference, so if the bit is
* already enabled when we boot or resume, grab this reference and
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edp_panel_vdd_schedule_off(intel_dp);
}
 
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
struct edp_power_seq power_seq = { 0 };
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
 
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
 
if (!is_edp(intel_dp))
return true;
 
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_edp_panel_vdd_sanitize(intel_encoder);
 
/* Cache DPCD and EDID for edp. */
ironlake_edp_panel_vdd_on(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
edp_panel_vdd_off(intel_dp, false);
 
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3575,10 → 4493,10
}
 
/* We now know it's not a ghost, init power sequence regs. */
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
 
edid = drm_get_edid(connector, &intel_dp->adapter);
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
3597,6 → 4515,9
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_dig_port,
intel_connector, fixed_mode);
break;
}
}
3608,8 → 4529,9
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
mutex_unlock(&dev->mode_config.mutex);
 
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
 
return true;
3625,9 → 4547,21
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
const char *name = NULL;
int type, error;
struct edp_power_seq power_seq = { 0 };
int type;
 
/* intel_dp vfuncs */
if (IS_VALLEYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
else if (HAS_PCH_SPLIT(dev))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
 
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
3656,73 → 4590,58
connector->doublescan_allowed = 0;
 
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work);
edp_panel_vdd_work);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
 
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_dp_connector_unregister;
 
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
if (HAS_DDI(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
break;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
break;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
break;
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
break;
default:
BUG();
}
}
 
/* Set up the DDC bus. */
/* Set up the hotplug pin. */
switch (port) {
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
name = "DPDDC-A";
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
name = "DPDDC-B";
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
name = "DPDDC-C";
break;
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
name = "DPDDC-D";
break;
default:
BUG();
}
 
error = intel_dp_i2c_init(intel_dp, intel_connector, name);
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
if (is_edp(intel_dp)) {
intel_dp_init_panel_power_timestamps(intel_dp);
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
}
 
intel_dp->psr_setup_done = false;
intel_dp_aux_init(intel_dp, intel_connector);
 
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
i2c_del_adapter(&intel_dp->adapter);
/* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
}
}
 
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
drm_sysfs_connector_remove(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
3744,6 → 4663,7
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
3766,18 → 4686,24
DRM_MODE_ENCODER_TMDS);
 
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
}
 
intel_dig_port->port = port;
3784,10 → 4710,20
intel_dig_port->dp.output_reg = output_reg;
 
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
} else {
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
}
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
 
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
 
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
3794,3 → 4730,46
kfree(intel_connector);
}
}
 
void intel_dp_mst_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
if (!intel_dig_port)
continue;
 
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
if (!intel_dig_port->dp.can_mst)
continue;
if (intel_dig_port->dp.is_mst)
drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
}
}
}
 
void intel_dp_mst_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
if (!intel_dig_port)
continue;
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
int ret;
 
if (!intel_dig_port->dp.can_mst)
continue;
 
ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
if (ret != 0) {
intel_dp_check_mst_status(&intel_dig_port->dp);
}
}
}
}