Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6936 → Rev 6937

/drivers/video/drm/i915/intel_dp.c
28,6 → 28,7
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/notifier.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
387,8 → 388,7
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
struct intel_dp *tmp;
 
if (encoder->type != INTEL_OUTPUT_EDP)
515,7 → 515,7
struct drm_device *dev = dev_priv->dev;
struct intel_encoder *encoder;
 
if (WARN_ON(!IS_VALLEYVIEW(dev)))
if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
return;
 
/*
528,7 → 528,7
* should use them always.
*/
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
 
if (encoder->type != INTEL_OUTPUT_EDP)
539,7 → 539,8
}
}
 
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
static i915_reg_t
_pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
551,7 → 552,8
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
}
 
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
static i915_reg_t
_pp_stat_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
579,9 → 581,9
 
pps_lock(intel_dp);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
u32 pp_ctrl_reg, pp_div_reg;
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
 
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608,7 → 610,7
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (IS_VALLEYVIEW(dev) &&
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
 
622,7 → 624,7
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (IS_VALLEYVIEW(dev) &&
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
 
652,7 → 654,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
 
679,7 → 681,7
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
return index ? 0 : intel_hrawclk(dev) / 2;
return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
}
 
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
692,10 → 694,10
return 0;
 
if (intel_dig_port->port == PORT_A) {
return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
}
 
709,7 → 711,7
if (index)
return 0;
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
} else if (HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
717,7 → 719,7
default: return 0;
}
} else {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
}
 
750,7 → 752,7
else
precharge = 5;
 
if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
789,8 → 791,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t ch_data = ch_ctl + 4;
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
853,7 → 854,7
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
intel_dp_pack_aux(send + i,
send_bytes - i));
 
913,11 → 914,32
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
 
/*
* By BSpec: "Message sizes of 0 or >20 are not allowed."
* We have no idea of what happened so we return -EBUSY so
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
recv_bytes);
/*
* FIXME: This patch was created on top of a series that
* organize the retries at drm level. There EBUSY should
* also take care for 1ms wait before retrying.
* That aux retries re-org is still needed and after that is
* merged we remove this sleep from here.
*/
usleep_range(1000, 1500);
ret = -EBUSY;
goto out;
}
 
if (recv_bytes > recv_size)
recv_bytes = recv_size;
 
for (i = 0; i < recv_bytes; i += 4)
intel_dp_unpack_aux(I915_READ(ch_data + i),
intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
recv + i, recv_bytes - i);
 
ret = recv_bytes;
1003,87 → 1025,193
return ret;
}
 
static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
const char *name = NULL;
uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
int ret;
switch (port) {
case PORT_B:
case PORT_C:
case PORT_D:
return DP_AUX_CH_CTL(port);
default:
MISSING_CASE(port);
return DP_AUX_CH_CTL(PORT_B);
}
}
 
/* On SKL we don't have Aux for port E so we rely on VBT to set
* a proper alternate aux channel.
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
switch (port) {
case PORT_B:
case PORT_C:
case PORT_D:
return DP_AUX_CH_DATA(port, index);
default:
MISSING_CASE(port);
return DP_AUX_CH_DATA(PORT_B, index);
}
}
 
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
switch (port) {
case PORT_A:
return DP_AUX_CH_CTL(port);
case PORT_B:
case PORT_C:
case PORT_D:
return PCH_DP_AUX_CH_CTL(port);
default:
MISSING_CASE(port);
return DP_AUX_CH_CTL(PORT_A);
}
}
 
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
switch (port) {
case PORT_A:
return DP_AUX_CH_DATA(port, index);
case PORT_B:
case PORT_C:
case PORT_D:
return PCH_DP_AUX_CH_DATA(port, index);
default:
MISSING_CASE(port);
return DP_AUX_CH_DATA(PORT_A, index);
}
}
 
/*
* On SKL we don't have Aux for port E so we rely
* on VBT to set a proper alternate aux channel.
*/
if (IS_SKYLAKE(dev) && port == PORT_E) {
static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[PORT_E];
 
switch (info->alternate_aux_channel) {
case DP_AUX_A:
return PORT_A;
case DP_AUX_B:
porte_aux_ctl_reg = DPB_AUX_CH_CTL;
break;
return PORT_B;
case DP_AUX_C:
porte_aux_ctl_reg = DPC_AUX_CH_CTL;
break;
return PORT_C;
case DP_AUX_D:
porte_aux_ctl_reg = DPD_AUX_CH_CTL;
break;
case DP_AUX_A:
return PORT_D;
default:
porte_aux_ctl_reg = DPA_AUX_CH_CTL;
MISSING_CASE(info->alternate_aux_channel);
return PORT_A;
}
}
 
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
if (port == PORT_E)
port = skl_porte_aux_port(dev_priv);
 
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
name = "DPDDC-A";
break;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
name = "DPDDC-B";
break;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
name = "DPDDC-C";
break;
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
case PORT_E:
intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
name = "DPDDC-E";
break;
return DP_AUX_CH_CTL(port);
default:
BUG();
MISSING_CASE(port);
return DP_AUX_CH_CTL(PORT_A);
}
}
 
/*
* The AUX_CTL register is usually DP_CTL + 0x10.
*
* On Haswell and Broadwell though:
* - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
* - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
*
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
*/
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
if (port == PORT_E)
port = skl_porte_aux_port(dev_priv);
 
intel_dp->aux.name = name;
switch (port) {
case PORT_A:
case PORT_B:
case PORT_C:
case PORT_D:
return DP_AUX_CH_DATA(port, index);
default:
MISSING_CASE(port);
return DP_AUX_CH_DATA(PORT_A, index);
}
}
 
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
else if (HAS_PCH_SPLIT(dev_priv))
return ilk_aux_ctl_reg(dev_priv, port);
else
return g4x_aux_ctl_reg(dev_priv, port);
}
 
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
else if (HAS_PCH_SPLIT(dev_priv))
return ilk_aux_data_reg(dev_priv, port, index);
else
return g4x_aux_data_reg(dev_priv, port, index);
}
 
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
int i;
 
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
}
 
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
drm_dp_aux_unregister(&intel_dp->aux);
kfree(intel_dp->aux.name);
}
 
static int
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
int ret;
 
intel_aux_reg_init(intel_dp);
 
intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
if (!intel_dp->aux.name)
return -ENOMEM;
 
intel_dp->aux.dev = dev->dev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
DRM_DEBUG_KMS("registering %s bus for %s\n", name,
"");
DRM_DEBUG_KMS("registering %s bus for %s\n",
intel_dp->aux.name,
connector->base.kdev->kobj.name);
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
name, ret);
return;
intel_dp->aux.name, ret);
kfree(intel_dp->aux.name);
return ret;
}
 
ret = sysfs_create_link(&connector->base.kdev->kobj,
1090,9 → 1218,13
&intel_dp->aux.ddc.dev.kobj,
intel_dp->aux.ddc.dev.kobj.name);
if (ret < 0) {
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
drm_dp_aux_unregister(&intel_dp->aux);
DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
intel_dp->aux.name, ret);
intel_dp_aux_fini(intel_dp);
return ret;
}
 
return 0;
}
 
static void
1184,10 → 1316,13
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
 
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
 
/* WaDisableHBR2:skl */
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
return false;
 
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1198,14 → 1333,16
}
 
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
int size;
 
if (IS_BROXTON(dev)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev)) {
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
1214,7 → 1351,7
}
 
/* This depends on the fact that 5.4 is last value in the array */
if (!intel_dp_source_supports_hbr2(dev))
if (!intel_dp_source_supports_hbr2(intel_dp))
size--;
 
return size;
1279,12 → 1416,11
static int intel_dp_common_rates(struct intel_dp *intel_dp,
int *common_rates)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
const int *source_rates, *sink_rates;
int source_len, sink_len;
 
sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
source_len = intel_dp_source_rates(dev, &source_rates);
source_len = intel_dp_source_rates(intel_dp, &source_rates);
 
return intersect_rates(source_rates, source_len,
sink_rates, sink_len,
1309,7 → 1445,6
 
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
const int *source_rates, *sink_rates;
int source_len, sink_len, common_len;
int common_rates[DP_MAX_SUPPORTED_RATES];
1318,7 → 1453,7
if ((drm_debug & DRM_UT_KMS) == 0)
return;
 
source_len = intel_dp_source_rates(dev, &source_rates);
source_len = intel_dp_source_rates(intel_dp, &source_rates);
snprintf_int_array(str, sizeof(str), source_rates, source_len);
DRM_DEBUG_KMS("source rates: %s\n", str);
 
1360,7 → 1495,7
return rate_to_index(rate, intel_dp->sink_rates);
}
 
static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select)
{
if (intel_dp->num_sink_rates) {
1421,7 → 1556,7
return ret;
}
 
if (!HAS_PCH_SPLIT(dev))
if (HAS_GMCH_DISPLAY(dev))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
1525,7 → 1660,7
&pipe_config->dp_m2_n2);
}
 
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config);
else if (IS_BROXTON(dev))
/* handled in ddi */;
1537,37 → 1672,6
return true;
}
 
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
crtc->config->port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
if (crtc->config->port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
dpa_ctl |= DP_PLL_FREQ_160MHZ;
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
 
I915_WRITE(DP_A, dpa_ctl);
 
POSTING_READ(DP_A);
udelay(500);
}
 
void intel_dp_set_link_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
1612,9 → 1716,6
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
 
if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
/* Split out the IBX/CPU vs CPT settings */
 
if (IS_GEN7(dev) && port == PORT_A) {
1641,7 → 1742,7
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
crtc->config->limited_color_range)
!IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1675,7 → 1776,7
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
1765,7 → 1866,7
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
 
lockdep_assert_held(&dev_priv->pps_mutex);
1773,7 → 1874,7
if (!is_edp(intel_dp))
return false;
 
cancel_delayed_work(&intel_dp->panel_vdd_work);
// cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
 
if (edp_have_panel_vdd(intel_dp))
1841,7 → 1942,7
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
1928,7 → 2029,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
1990,7 → 2091,7
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
2041,7 → 2142,7
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
/*
* If we enable the backlight right away following a panel power
2082,7 → 2183,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
i915_reg_t pp_ctrl_reg;
 
if (!is_edp(intel_dp))
return;
2141,27 → 2242,61
_intel_edp_backlight_off(intel_dp);
}
 
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
 
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
 
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
state_string(state), state_string(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
 
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
 
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
assert_pipe_disabled(dev_priv,
to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev_priv, crtc->pipe);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
 
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
crtc->config->port_clock);
 
/* We don't adjust intel_dp->DP while tearing down the link, to
* facilitate link retraining (e.g. after hotplug). Hence clear all
* enable bits here to ensure that we don't enable too much. */
intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
if (crtc->config->port_clock == 162000)
intel_dp->DP |= DP_PLL_FREQ_162MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
 
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(500);
 
intel_dp->DP |= DP_PLL_ENABLE;
 
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
2170,24 → 2305,18
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
assert_pipe_disabled(dev_priv,
to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev_priv, crtc->pipe);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
 
dpa_ctl = I915_READ(DP_A);
WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
"dp pll off, should be on\n");
WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
DRM_DEBUG_KMS("disabling eDP PLL\n");
 
/* We can't rely on the value tracked for the DP register in
* intel_dp->DP because link_down must not change that (otherwise link
* re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
intel_dp->DP &= ~DP_PLL_ENABLE;
 
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
2232,15 → 2361,18
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_is_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
 
ret = false;
 
tmp = I915_READ(intel_dp->output_reg);
 
if (!(tmp & DP_PORT_EN))
return false;
goto out;
 
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
2251,12 → 2383,14
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
return true;
ret = true;
 
goto out;
}
}
 
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
intel_dp->output_reg);
i915_mmio_reg_offset(intel_dp->output_reg));
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
2263,7 → 2397,12
*pipe = PORT_TO_PIPE(tmp);
}
 
return true;
ret = true;
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static void intel_dp_get_config(struct intel_encoder *encoder,
2308,7 → 2447,7
pipe_config->base.adjusted_mode.flags |= flags;
 
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
!IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
 
pipe_config->has_dp_encoder = true;
2319,7 → 2458,7
intel_dp_get_m_n(crtc, pipe_config);
 
if (port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
2384,6 → 2523,8
enum port port = dp_to_dig_port(intel_dp)->port;
 
intel_dp_link_down(intel_dp);
 
/* Only ilk+ has port A */
if (port == PORT_A)
ironlake_edp_pll_off(intel_dp);
}
2543,6 → 2684,8
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
/* enable with pattern 1 (as per spec) */
_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2558,6 → 2701,8
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
2570,6 → 2715,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = crtc->pipe;
 
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
2576,18 → 2723,41
 
pps_lock(intel_dp);
 
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
 
/*
* We get an occasional spurious underrun between the port
* enable and vdd enable, when enabling port A eDP.
*
* FIXME: Not sure if this applies to (PCH) port D eDP as well
*/
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_dp_enable_port(intel_dp);
 
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* Underrun reporting for the other pipe was disabled in
* g4x_pre_enable_dp(). The eDP PLL and port have now been
* enabled, so it's now safe to re-enable underrun reporting.
*/
intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
}
 
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
 
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
pps_unlock(intel_dp);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
unsigned int lane_mask = 0x0;
 
if (IS_CHERRYVIEW(dev))
2603,7 → 2773,7
 
if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
pipe_name(pipe));
intel_audio_codec_enable(encoder);
}
}
2626,17 → 2796,30
 
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
 
intel_dp_prepare(encoder);
 
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* We get FIFO underruns on the other pipe when
* enabling the CPU eDP PLL, and when enabling CPU
* eDP port. We could potentially avoid the PLL
* underrun with a vblank wait just prior to enabling
* the PLL, but that doesn't appear to help the port
* enable case. Just sweep it all under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
}
 
/* Only ilk+ has port A */
if (dport->port == PORT_A) {
ironlake_set_pll_cpu_edp(intel_dp);
if (port == PORT_A)
ironlake_edp_pll_on(intel_dp);
}
}
 
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
2643,7 → 2826,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
enum pipe pipe = intel_dp->pps_pipe;
int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
 
edp_panel_vdd_off_sync(intel_dp);
 
2675,8 → 2858,7
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
enum port port;
 
3041,7 → 3223,7
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
static bool
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_dpcd_read_wake(&intel_dp->aux,
3051,7 → 3233,7
}
 
/* These are source-specific values. */
static uint8_t
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
3064,7 → 3246,7
if (dev_priv->edp_low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev))
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3074,7 → 3256,7
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
 
static uint8_t
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
3105,7 → 3287,7
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
3416,38 → 3598,6
return 0;
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t voltage_max;
uint8_t preemph_max;
 
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
 
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
 
static uint32_t
gen4_signal_levels(uint8_t train_set)
{
3545,13 → 3695,13
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
 
3586,73 → 3736,27
(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
 
*DP = (*DP & ~mask) | signal_levels;
}
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
 
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
 
_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
 
I915_WRITE(intel_dp->output_reg, *DP);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
 
buf[0] = dp_train_pat;
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
DP_TRAINING_PATTERN_DISABLE) {
/* don't write DP_TRAINING_LANEx_SET on disable */
len = 1;
} else {
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
len = intel_dp->lane_count + 1;
}
 
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len);
 
return ret == len;
}
 
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
uint8_t dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
 
static bool
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
int ret;
 
intel_get_adjust_train(intel_dp, link_status);
intel_dp_set_signal_levels(intel_dp, DP);
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
 
I915_WRITE(intel_dp->output_reg, *DP);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
 
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->lane_count);
 
return ret == intel_dp->lane_count;
}
 
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
3683,213 → 3787,7
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
 
/* Enable corresponding port and start training pattern 1 */
static void
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
 
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
 
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
 
/* Write the link configuration data */
link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
DP |= DP_PORT_EN;
 
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
 
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
break;
}
 
 
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_ERROR("too many full retries, give up\n");
break;
}
intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
voltage_tries = 0;
continue;
}
 
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
DRM_ERROR("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
/* Update training set as requested by target */
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
DRM_ERROR("failed to update link training\n");
break;
}
}
 
intel_dp->DP = DP;
}
 
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 
/*
* Training Pattern 3 for HBR2 or 1.2 devices that support it.
*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported but still not enabled.
*/
if (intel_dp_source_supports_hbr2(dev) &&
drm_dp_tps3_supported(intel_dp->dpcd))
training_pattern = DP_TRAINING_PATTERN_3;
else if (intel_dp->link_rate == 540000)
DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
}
 
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
break;
}
 
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
}
 
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
break;
}
 
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
continue;
}
 
/* Update training set as requested by target */
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
DRM_ERROR("failed to update link training\n");
break;
}
++tries;
}
 
intel_dp_set_idle_link_train(intel_dp);
 
intel_dp->DP = DP;
 
if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
}
 
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_DISABLE);
}
 
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_link_training_channel_equalization(intel_dp);
}
 
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3931,6 → 3829,13
* matching HDMI port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
/* always enable with pattern 1 (as per spec) */
DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3940,9 → 3845,15
DP &= ~DP_PORT_EN;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
 
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
 
msleep(intel_dp->panel_power_down_delay);
 
intel_dp->DP = DP;
}
 
static bool
3990,7 → 3901,7
}
 
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
yesno(intel_dp_source_supports_hbr2(dev)),
yesno(intel_dp_source_supports_hbr2(intel_dp)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
 
/* Intermediate frequency support */
4080,9 → 3991,12
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
int count = 0;
int attempts = 10;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4097,7 → 4011,22
goto out;
}
 
intel_dp->sink_crc.started = false;
do {
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
goto out;
}
count = buf & DP_TEST_COUNT_MASK;
} while (--attempts && count);
 
if (attempts == 0) {
DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
ret = -ETIMEDOUT;
}
 
out:
hsw_enable_ips(intel_crtc);
return ret;
4106,16 → 4035,11
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
 
if (intel_dp->sink_crc.started) {
ret = intel_dp_sink_crc_stop(intel_dp);
if (ret)
return ret;
}
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
 
4122,11 → 4046,15
if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
 
intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
 
if (buf & DP_TEST_SINK_START) {
ret = intel_dp_sink_crc_stop(intel_dp);
if (ret)
return ret;
}
 
hsw_disable_ips(intel_crtc);
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4135,7 → 4063,7
return -EIO;
}
 
intel_dp->sink_crc.started = true;
intel_wait_for_vblank(dev, intel_crtc->pipe);
return 0;
}
 
4147,7 → 4075,6
u8 buf;
int count, ret;
int attempts = 6;
bool old_equal_new;
 
ret = intel_dp_sink_crc_start(intel_dp);
if (ret)
4163,35 → 4090,17
}
count = buf & DP_TEST_COUNT_MASK;
 
/*
* Count might be reset during the loop. In this case
* last known count needs to be reset as well.
*/
if (count == 0)
intel_dp->sink_crc.last_count = 0;
} while (--attempts && count == 0);
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
ret = -EIO;
goto stop;
}
 
old_equal_new = (count == intel_dp->sink_crc.last_count &&
!memcmp(intel_dp->sink_crc.last_crc, crc,
6 * sizeof(u8)));
 
} while (--attempts && (count == 0 || old_equal_new));
 
intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
 
if (attempts == 0) {
if (old_equal_new) {
DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
} else {
DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
ret = -ETIMEDOUT;
goto stop;
}
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
ret = -EIO;
goto stop;
}
 
stop:
4291,13 → 4200,6
uint8_t rxdata = 0;
int status = 0;
 
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
intel_dp->aux.i2c_nack_count = 0;
intel_dp->aux.i2c_defer_count = 0;
 
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
if (status <= 0) {
DRM_DEBUG_KMS("Could not read test request from sink\n");
4413,6 → 4315,14
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
/*
* Clearing compliance test variables to allow capturing
* of values for next automated test request.
*/
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
if (!intel_encoder->base.crtc)
return;
 
4443,7 → 4353,9
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
 
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
4646,7 → 4558,7
*
* Return %true if @port is connected, %false otherwise.
*/
static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
4661,41 → 4573,6
return g4x_digital_port_connected(dev_priv, port);
}
 
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
}
 
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
enum drm_connector_status status;
 
status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
}
 
if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
}
 
static struct edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
4768,12 → 4645,19
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (HAS_PCH_SPLIT(dev))
status = ironlake_dp_detect(intel_dp);
else if (intel_digital_port_connected(to_i915(dev),
dp_to_dig_port(intel_dp)))
status = intel_dp_detect_dpcd(intel_dp);
else
status = g4x_dp_detect(intel_dp);
if (status != connector_status_connected)
status = connector_status_disconnected;
 
if (status != connector_status_connected) {
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
goto out;
}
 
intel_dp_probe_oui(intel_dp);
 
4787,6 → 4671,14
goto out;
}
 
/*
* Clearing NACK and defer counts to get their exact values
* while reading EDID which are required by Compliance tests
* 4.2.2.4 and 4.2.2.5
*/
intel_dp->aux.i2c_nack_count = 0;
intel_dp->aux.i2c_defer_count = 0;
 
intel_dp_set_edid(intel_dp);
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
4991,10 → 4883,10
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_aux_fini(intel_dp);
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
5003,7 → 4895,10
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
 
if (intel_dp->edp_notifier.notifier_call) {
intel_dp->edp_notifier.notifier_call = NULL;
}
}
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
5019,7 → 4914,7
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
5052,15 → 4947,13
 
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp;
 
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
 
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(encoder);
 
pps_lock(intel_dp);
 
/*
5067,7 → 4960,7
* Read out the current power sequencer assignment,
* in case the BIOS did something with it.
*/
if (IS_VALLEYVIEW(encoder->dev))
if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
vlv_initial_power_sequencer_setup(intel_dp);
 
intel_edp_panel_vdd_sanitize(intel_dp);
5132,6 → 5025,9
intel_display_power_get(dev_priv, power_domain);
 
if (long_hpd) {
/* indicate that we need to restart link training */
intel_dp->train_set_valid = false;
 
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
 
5176,25 → 5072,6
return ret;
}
 
/* Return which DP Port should be selected for Transcoder DP control */
int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
struct intel_dp *intel_dp;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
 
return -1;
}
 
/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
5266,7 → 5143,7
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
5388,7 → 5265,7
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
5443,7 → 5320,7
 
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (port == PORT_A)
5550,17 → 5427,17
DRM_ERROR("Unsupported refreshrate type\n");
}
} else if (INTEL_INFO(dev)->gen > 6) {
u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
u32 val;
 
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else {
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5636,7 → 5513,7
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
 
cancel_delayed_work_sync(&dev_priv->drrs.work);
// cancel_delayed_work_sync(&dev_priv->drrs.work);
}
 
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5689,7 → 5566,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
cancel_delayed_work(&dev_priv->drrs.work);
// cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5734,7 → 5611,7
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
cancel_delayed_work(&dev_priv->drrs.work);
// cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
5927,7 → 5804,9
}
mutex_unlock(&dev->mode_config.mutex);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
// intel_dp->edp_notifier.notifier_call = edp_notify_handler;
// register_reboot_notifier(&intel_dp->edp_notifier);
 
/*
* Figure out the current pipe for the initial backlight setup.
5966,7 → 5845,7
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int type;
int type, ret;
 
intel_dp->pps_pipe = INVALID_PIPE;
 
5973,7 → 5852,7
/* intel_dp vfuncs */
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5987,6 → 5866,9
else
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
if (HAS_DDI(dev))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
 
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
6005,8 → 5887,8
intel_encoder->type = INTEL_OUTPUT_EDP;
 
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
port != PORT_B && port != PORT_C))
if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
 
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6038,7 → 5920,7
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
6057,7 → 5939,7
if (is_edp(intel_dp)) {
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
if (IS_VALLEYVIEW(dev))
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_initial_power_sequencer_setup(intel_dp);
else
intel_dp_init_panel_power_sequencer(dev, intel_dp);
6064,7 → 5946,9
pps_unlock(intel_dp);
}
 
intel_dp_aux_init(intel_dp, intel_connector);
ret = intel_dp_aux_init(intel_dp, intel_connector);
if (ret)
goto fail;
 
/* init MST on ports that can support it */
if (HAS_DP_MST(dev) &&
6073,21 → 5957,10
intel_connector->base.base.id);
 
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
intel_dp_aux_fini(intel_dp);
intel_dp_mst_encoder_cleanup(intel_dig_port);
goto fail;
}
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
 
intel_dp_add_properties(intel_dp, connector);
 
6103,11 → 5976,27
i915_debugfs_connector_add(connector);
 
return true;
 
fail:
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
}
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
 
bool intel_dp_init(struct drm_device *dev,
int output_reg,
enum port port)
return false;
}
 
void
intel_dp_init(struct drm_device *dev,
i915_reg_t output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
6117,7 → 6006,7
 
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
return false;
return;
 
intel_connector = intel_connector_alloc();
if (!intel_connector)
6126,8 → 6015,9
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS, NULL))
goto err_encoder_init;
 
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->disable = intel_disable_dp;
6172,14 → 6062,16
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
 
return true;
return;
 
err_init_connector:
drm_encoder_cleanup(encoder);
err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
return false;
 
return;
}
 
void intel_dp_mst_suspend(struct drm_device *dev)