61,25 → 61,26 |
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; |
} |
|
bool intel_psr_is_enabled(struct drm_device *dev) |
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t val; |
|
if (!HAS_PSR(dev)) |
return false; |
|
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; |
val = I915_READ(VLV_PSRSTAT(pipe)) & |
VLV_EDP_PSR_CURR_STATE_MASK; |
return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) || |
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE); |
} |
|
static void intel_psr_write_vsc(struct intel_dp *intel_dp, |
struct edp_vsc_psr *vsc_psr) |
const struct edp_vsc_psr *vsc_psr) |
{ |
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); |
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); |
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); |
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; |
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); |
uint32_t *data = (uint32_t *) vsc_psr; |
unsigned int i; |
|
89,21 → 90,52 |
I915_WRITE(ctl_reg, 0); |
POSTING_READ(ctl_reg); |
|
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { |
if (i < sizeof(struct edp_vsc_psr)) |
I915_WRITE(data_reg + i, *data++); |
else |
I915_WRITE(data_reg + i, 0); |
for (i = 0; i < sizeof(*vsc_psr); i += 4) { |
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, |
i >> 2), *data); |
data++; |
} |
for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) |
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, |
i >> 2), 0); |
|
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); |
POSTING_READ(ctl_reg); |
} |
|
static void intel_psr_setup_vsc(struct intel_dp *intel_dp) |
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
enum pipe pipe = to_intel_crtc(crtc)->pipe; |
uint32_t val; |
|
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */ |
val = I915_READ(VLV_VSCSDP(pipe)); |
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK; |
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME; |
I915_WRITE(VLV_VSCSDP(pipe), val); |
} |
|
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp) |
{ |
struct edp_vsc_psr psr_vsc; |
|
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ |
memset(&psr_vsc, 0, sizeof(psr_vsc)); |
psr_vsc.sdp_header.HB0 = 0; |
psr_vsc.sdp_header.HB1 = 0x7; |
psr_vsc.sdp_header.HB2 = 0x3; |
psr_vsc.sdp_header.HB3 = 0xb; |
intel_psr_write_vsc(intel_dp, &psr_vsc); |
} |
|
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp) |
{ |
struct edp_vsc_psr psr_vsc; |
|
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ |
memset(&psr_vsc, 0, sizeof(psr_vsc)); |
psr_vsc.sdp_header.HB0 = 0; |
113,14 → 145,20 |
intel_psr_write_vsc(intel_dp, &psr_vsc); |
} |
|
static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
static void vlv_psr_enable_sink(struct intel_dp *intel_dp) |
{ |
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); |
} |
|
static void hsw_psr_enable_sink(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t aux_clock_divider; |
uint32_t aux_data_reg, aux_ctl_reg; |
int precharge = 0x3; |
bool only_standby = false; |
static const uint8_t aux_msg[] = { |
[0] = DP_AUX_NATIVE_WRITE << 4, |
[1] = DP_SET_POWER >> 8, |
134,23 → 172,40 |
|
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
|
if (IS_BROADWELL(dev) && dig_port->port != PORT_A) |
only_standby = true; |
|
/* Enable PSR in sink */ |
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) |
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); |
else |
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); |
|
/* Enable AUX frame sync at sink */ |
if (dev_priv->psr.aux_frame_sync) |
drm_dp_dpcd_writeb(&intel_dp->aux, |
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, |
DP_AUX_FRAME_SYNC_ENABLE); |
|
aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ? |
DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev); |
aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ? |
DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev); |
|
/* Setup AUX registers */ |
for (i = 0; i < sizeof(aux_msg); i += 4) |
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i, |
I915_WRITE(aux_data_reg + i, |
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); |
|
I915_WRITE(EDP_PSR_AUX_CTL(dev), |
if (INTEL_INFO(dev)->gen >= 9) { |
uint32_t val; |
|
val = I915_READ(aux_ctl_reg); |
val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK; |
val |= DP_AUX_CH_CTL_TIME_OUT_1600us; |
val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK; |
val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
/* Use hardcoded data values for PSR, frame sync and GTC */ |
val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL; |
val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL; |
val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL; |
I915_WRITE(aux_ctl_reg, val); |
} else { |
I915_WRITE(aux_ctl_reg, |
DP_AUX_CH_CTL_TIME_OUT_400us | |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
157,28 → 212,69 |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); |
} |
|
static void intel_psr_enable_source(struct intel_dp *intel_dp) |
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE); |
} |
|
static void vlv_psr_enable_source(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc = dig_port->base.base.crtc; |
enum pipe pipe = to_intel_crtc(crtc)->pipe; |
|
/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */ |
I915_WRITE(VLV_PSRCTL(pipe), |
VLV_EDP_PSR_MODE_SW_TIMER | |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE | |
VLV_EDP_PSR_ENABLE); |
} |
|
static void vlv_psr_activate(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc = dig_port->base.base.crtc; |
enum pipe pipe = to_intel_crtc(crtc)->pipe; |
|
/* Let's do the transition from PSR_state 1 to PSR_state 2 |
* that is PSR transition to active - static frame transmission. |
* Then Hardware is responsible for the transition to PSR_state 3 |
* that is PSR active - no Remote Frame Buffer (RFB) update. |
*/ |
I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | |
VLV_EDP_PSR_ACTIVE_ENTRY); |
} |
|
static void hsw_psr_enable_source(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
|
uint32_t max_sleep_time = 0x1f; |
uint32_t idle_frames = 1; |
/* Lately it was identified that depending on panel idle frame count |
* calculated at HW can be off by 1. So let's use what came |
* from VBT + 1. |
* There are also other cases where panel demands at least 4 |
* but VBT is not being set. To cover these 2 cases lets use |
* at least 5 when VBT isn't set to be on the safest side. |
*/ |
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? |
dev_priv->vbt.psr.idle_frames + 1 : 5; |
uint32_t val = 0x0; |
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
bool only_standby = false; |
|
if (IS_BROADWELL(dev) && dig_port->port != PORT_A) |
only_standby = true; |
|
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { |
val |= EDP_PSR_LINK_STANDBY; |
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { |
/* It doesn't mean we shouldn't send TPS patters, so let's |
send the minimal TP1 possible and skip TP2. */ |
val |= EDP_PSR_TP1_TIME_100us; |
val |= EDP_PSR_TP2_TP3_TIME_0us; |
val |= EDP_PSR_TP1_TIME_0us; |
val |= EDP_PSR_SKIP_AUX_EXIT; |
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; |
} else |
val |= EDP_PSR_LINK_DISABLE; |
/* Sink should be able to train with the 5 or 6 idle patterns */ |
idle_frames += 4; |
} |
|
I915_WRITE(EDP_PSR_CTL(dev), val | |
(IS_BROADWELL(dev) ? 0 : link_entry_time) | |
185,6 → 281,10 |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | |
EDP_PSR_ENABLE); |
|
if (dev_priv->psr.psr2_support) |
I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE | |
EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); |
} |
|
static bool intel_psr_match_conditions(struct intel_dp *intel_dp) |
211,27 → 311,30 |
return false; |
} |
|
/* Below limitations aren't valid for Broadwell */ |
if (IS_BROADWELL(dev)) |
goto out; |
|
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & |
if (IS_HASWELL(dev) && |
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) & |
S3D_ENABLE) { |
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); |
return false; |
} |
|
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { |
if (IS_HASWELL(dev) && |
intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { |
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); |
return false; |
} |
|
out: |
if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) || |
(dig_port->port != PORT_A))) { |
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); |
return false; |
} |
|
dev_priv->psr.source_ok = true; |
return true; |
} |
|
static void intel_psr_do_enable(struct intel_dp *intel_dp) |
static void intel_psr_activate(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
242,7 → 345,14 |
lockdep_assert_held(&dev_priv->psr.lock); |
|
/* Enable/Re-enable PSR on the host */ |
intel_psr_enable_source(intel_dp); |
if (HAS_DDI(dev)) |
/* On HSW+ after we enable PSR on source it will activate it |
* as soon as it match configure idle_frame count. So |
* we just actually enable it here on activation time. |
*/ |
hsw_psr_enable_source(intel_dp); |
else |
vlv_psr_activate(intel_dp); |
|
dev_priv->psr.active = true; |
} |
258,6 → 368,7 |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); |
|
if (!HAS_PSR(dev)) { |
DRM_DEBUG_KMS("PSR not supported on this platform\n"); |
280,38 → 391,80 |
|
dev_priv->psr.busy_frontbuffer_bits = 0; |
|
intel_psr_setup_vsc(intel_dp); |
if (HAS_DDI(dev)) { |
hsw_psr_setup_vsc(intel_dp); |
|
if (dev_priv->psr.psr2_support) { |
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ |
if (crtc->config->pipe_src_w > 3200 || |
crtc->config->pipe_src_h > 2000) |
dev_priv->psr.psr2_support = false; |
else |
skl_psr_setup_su_vsc(intel_dp); |
} |
|
/* Avoid continuous PSR exit by masking memup and hpd */ |
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); |
EDP_PSR_DEBUG_MASK_HPD); |
|
/* Enable PSR on the panel */ |
intel_psr_enable_sink(intel_dp); |
hsw_psr_enable_sink(intel_dp); |
|
if (INTEL_INFO(dev)->gen >= 9) |
intel_psr_activate(intel_dp); |
} else { |
vlv_psr_setup_vsc(intel_dp); |
|
/* Enable PSR on the panel */ |
vlv_psr_enable_sink(intel_dp); |
|
/* On HSW+ enable_source also means go to PSR entry/active |
* state as soon as idle_frame achieved and here would be |
* to soon. However on VLV enable_source just enable PSR |
* but let it on inactive state. So we might do this prior |
* to active transition, i.e. here. |
*/ |
vlv_psr_enable_source(intel_dp); |
} |
|
dev_priv->psr.enabled = intel_dp; |
unlock: |
mutex_unlock(&dev_priv->psr.lock); |
} |
|
/** |
* intel_psr_disable - Disable PSR |
* @intel_dp: Intel DP |
* |
* This function needs to be called before disabling pipe. |
*/ |
void intel_psr_disable(struct intel_dp *intel_dp) |
static void vlv_psr_disable(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = |
to_intel_crtc(intel_dig_port->base.base.crtc); |
uint32_t val; |
|
mutex_lock(&dev_priv->psr.lock); |
if (!dev_priv->psr.enabled) { |
mutex_unlock(&dev_priv->psr.lock); |
return; |
if (dev_priv->psr.active) { |
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ |
if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) & |
VLV_EDP_PSR_IN_TRANS) == 0, 1)) |
WARN(1, "PSR transition took longer than expected\n"); |
|
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); |
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; |
val &= ~VLV_EDP_PSR_ENABLE; |
val &= ~VLV_EDP_PSR_MODE_MASK; |
I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val); |
|
dev_priv->psr.active = false; |
} else { |
WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe)); |
} |
} |
|
static void hsw_psr_disable(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
|
if (dev_priv->psr.active) { |
I915_WRITE(EDP_PSR_CTL(dev), |
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); |
325,7 → 478,31 |
} else { |
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); |
} |
} |
|
/** |
* intel_psr_disable - Disable PSR |
* @intel_dp: Intel DP |
* |
* This function needs to be called before disabling pipe. |
*/ |
void intel_psr_disable(struct intel_dp *intel_dp) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
|
mutex_lock(&dev_priv->psr.lock); |
if (!dev_priv->psr.enabled) { |
mutex_unlock(&dev_priv->psr.lock); |
return; |
} |
|
if (HAS_DDI(dev)) |
hsw_psr_disable(intel_dp); |
else |
vlv_psr_disable(intel_dp); |
|
dev_priv->psr.enabled = NULL; |
mutex_unlock(&dev_priv->psr.lock); |
|
337,6 → 514,8 |
struct drm_i915_private *dev_priv = |
container_of(work, typeof(*dev_priv), psr.work.work); |
struct intel_dp *intel_dp = dev_priv->psr.enabled; |
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; |
enum pipe pipe = to_intel_crtc(crtc)->pipe; |
|
/* We have to make sure PSR is ready for re-enable |
* otherwise it keeps disabled until next full enable/disable cycle. |
343,12 → 522,19 |
* PSR might take some time to get fully disabled |
* and be ready for re-enable. |
*/ |
if (HAS_DDI(dev_priv->dev)) { |
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & |
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { |
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); |
return; |
} |
|
} else { |
if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) & |
VLV_EDP_PSR_IN_TRANS) == 0, 1)) { |
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); |
return; |
} |
} |
mutex_lock(&dev_priv->psr.lock); |
intel_dp = dev_priv->psr.enabled; |
|
363,7 → 549,7 |
if (dev_priv->psr.busy_frontbuffer_bits) |
goto unlock; |
|
intel_psr_do_enable(intel_dp); |
intel_psr_activate(intel_dp); |
unlock: |
mutex_unlock(&dev_priv->psr.lock); |
} |
371,19 → 557,93 |
static void intel_psr_exit(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_dp *intel_dp = dev_priv->psr.enabled; |
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; |
enum pipe pipe = to_intel_crtc(crtc)->pipe; |
u32 val; |
|
if (dev_priv->psr.active) { |
u32 val = I915_READ(EDP_PSR_CTL(dev)); |
if (!dev_priv->psr.active) |
return; |
|
if (HAS_DDI(dev)) { |
val = I915_READ(EDP_PSR_CTL(dev)); |
|
WARN_ON(!(val & EDP_PSR_ENABLE)); |
|
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); |
} else { |
val = I915_READ(VLV_PSRCTL(pipe)); |
|
/* Here we do the transition from PSR_state 3 to PSR_state 5 |
* directly once PSR State 4 that is active with single frame |
* update can be skipped. PSR_state 5 that is PSR exit then |
* Hardware is responsible to transition back to PSR_state 1 |
* that is PSR inactive. Same state after |
* vlv_edp_psr_enable_source. |
*/ |
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; |
I915_WRITE(VLV_PSRCTL(pipe), val); |
|
/* Send AUX wake up - Spec says after transitioning to PSR |
* active we have to send AUX wake up by writing 01h in DPCD |
* 600h of sink device. |
* XXX: This might slow down the transition, but without this |
* HW doesn't complete the transition to PSR_state 1 and we |
* never get the screen updated. |
*/ |
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
DP_SET_POWER_D0); |
} |
|
dev_priv->psr.active = false; |
} |
|
/** |
* intel_psr_single_frame_update - Single Frame Update |
* @dev: DRM device |
* @frontbuffer_bits: frontbuffer plane tracking bits |
* |
* Some platforms support a single frame update feature that is used to |
* send and update only one frame on Remote Frame Buffer. |
* So far it is only implemented for Valleyview and Cherryview because |
* hardware requires this to be done before a page flip. |
*/ |
void intel_psr_single_frame_update(struct drm_device *dev, |
unsigned frontbuffer_bits) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc; |
enum pipe pipe; |
u32 val; |
|
/* |
* Single frame update is already supported on BDW+ but it requires |
* many W/A and it isn't really needed. |
*/ |
if (!IS_VALLEYVIEW(dev)) |
return; |
|
mutex_lock(&dev_priv->psr.lock); |
if (!dev_priv->psr.enabled) { |
mutex_unlock(&dev_priv->psr.lock); |
return; |
} |
|
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; |
pipe = to_intel_crtc(crtc)->pipe; |
|
if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { |
val = I915_READ(VLV_PSRCTL(pipe)); |
|
/* |
* We need to set this bit before writing registers for a flip. |
* This bit will be self-clear when it gets to the PSR active state. |
*/ |
I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); |
} |
mutex_unlock(&dev_priv->psr.lock); |
} |
|
/** |
* intel_psr_invalidate - Invalidade PSR |
* @dev: DRM device |
412,11 → 672,12 |
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; |
pipe = to_intel_crtc(crtc)->pipe; |
|
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
|
if (frontbuffer_bits) |
intel_psr_exit(dev); |
|
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
|
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
mutex_unlock(&dev_priv->psr.lock); |
} |
|
424,6 → 685,7 |
* intel_psr_flush - Flush PSR |
* @dev: DRM device |
* @frontbuffer_bits: frontbuffer plane tracking bits |
* @origin: which operation caused the flush |
* |
* Since the hardware frontbuffer tracking has gaps we need to integrate |
* with the software frontbuffer tracking. This function gets called every |
433,11 → 695,12 |
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. |
*/ |
void intel_psr_flush(struct drm_device *dev, |
unsigned frontbuffer_bits) |
unsigned frontbuffer_bits, enum fb_op_origin origin) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_crtc *crtc; |
enum pipe pipe; |
int delay_ms = HAS_DDI(dev) ? 100 : 500; |
|
mutex_lock(&dev_priv->psr.lock); |
if (!dev_priv->psr.enabled) { |
447,21 → 710,33 |
|
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; |
pipe = to_intel_crtc(crtc)->pipe; |
|
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); |
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
|
if (HAS_DDI(dev)) { |
/* |
* On Haswell sprite plane updates don't result in a psr invalidating |
* signal in the hardware. Which means we need to manually fake this in |
* software for all flushes, not just when we've seen a preceding |
* invalidation through frontbuffer rendering. |
* By definition every flush should mean invalidate + flush, |
* however on core platforms let's minimize the |
* disable/re-enable so we can avoid the invalidate when flip |
* originated the flush. |
*/ |
if (IS_HASWELL(dev) && |
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) |
if (frontbuffer_bits && origin != ORIGIN_FLIP) |
intel_psr_exit(dev); |
} else { |
/* |
* On Valleyview and Cherryview we don't use hardware tracking |
* so any plane updates or cursor moves don't result in a PSR |
* invalidating. Which means we need to manually fake this in |
* software for all flushes. |
*/ |
if (frontbuffer_bits) |
intel_psr_exit(dev); |
} |
|
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
schedule_delayed_work(&dev_priv->psr.work, |
msecs_to_jiffies(100)); |
msecs_to_jiffies(delay_ms)); |
mutex_unlock(&dev_priv->psr.lock); |
} |
|