Subversion Repositories Kolibri OS

Rev

Rev 5354 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  */
  23.  
  24. /**
  25.  * DOC: Panel Self Refresh (PSR/SRD)
  26.  *
  27.  * Since Haswell Display controller supports Panel Self-Refresh on display
  28.  * panels witch have a remote frame buffer (RFB) implemented according to PSR
  29.  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  30.  * when system is idle but display is on as it eliminates display refresh
  31.  * request to DDR memory completely as long as the frame buffer for that
  32.  * display is unchanged.
  33.  *
  34.  * Panel Self Refresh must be supported by both Hardware (source) and
  35.  * Panel (sink).
  36.  *
  37.  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  38.  * to power down the link and memory controller. For DSI panels the same idea
  39.  * is called "manual mode".
  40.  *
  41.  * The implementation uses the hardware-based PSR support which automatically
  42.  * enters/exits self-refresh mode. The hardware takes care of sending the
  43.  * required DP aux message and could even retrain the link (that part isn't
  44.  * enabled yet though). The hardware also keeps track of any frontbuffer
  45.  * changes to know when to exit self-refresh mode again. Unfortunately that
  46.  * part doesn't work too well, hence why the i915 PSR support uses the
  47.  * software frontbuffer tracking to make sure it doesn't miss a screen
  48.  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  49.  * get called by the frontbuffer tracking code. Note that because of locking
  50.  * issues the self-refresh re-enable code is done from a work queue, which
  51.  * must be correctly synchronized/cancelled when shutting down the pipe."
  52.  */
  53.  
  54. #include <drm/drmP.h>
  55.  
  56. #include "intel_drv.h"
  57. #include "i915_drv.h"
  58.  
  59. static bool is_edp_psr(struct intel_dp *intel_dp)
  60. {
  61.         return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
  62. }
  63.  
  64. static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
  65. {
  66.         struct drm_i915_private *dev_priv = dev->dev_private;
  67.         uint32_t val;
  68.  
  69.         val = I915_READ(VLV_PSRSTAT(pipe)) &
  70.               VLV_EDP_PSR_CURR_STATE_MASK;
  71.         return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
  72.                (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
  73. }
  74.  
  75. static void intel_psr_write_vsc(struct intel_dp *intel_dp,
  76.                                 const struct edp_vsc_psr *vsc_psr)
  77. {
  78.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  79.         struct drm_device *dev = dig_port->base.base.dev;
  80.         struct drm_i915_private *dev_priv = dev->dev_private;
  81.         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  82.         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  83.         u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
  84.         uint32_t *data = (uint32_t *) vsc_psr;
  85.         unsigned int i;
  86.  
  87.         /* As per BSPec (Pipe Video Data Island Packet), we need to disable
  88.            the video DIP being updated before program video DIP data buffer
  89.            registers for DIP being updated. */
  90.         I915_WRITE(ctl_reg, 0);
  91.         POSTING_READ(ctl_reg);
  92.  
  93.         for (i = 0; i < sizeof(*vsc_psr); i += 4) {
  94.                 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
  95.                                                    i >> 2), *data);
  96.                 data++;
  97.         }
  98.         for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
  99.                 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
  100.                                                    i >> 2), 0);
  101.  
  102.         I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
  103.         POSTING_READ(ctl_reg);
  104. }
  105.  
  106. static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
  107. {
  108.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  109.         struct drm_device *dev = intel_dig_port->base.base.dev;
  110.         struct drm_i915_private *dev_priv = dev->dev_private;
  111.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  112.         enum pipe pipe = to_intel_crtc(crtc)->pipe;
  113.         uint32_t val;
  114.  
  115.         /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
  116.         val  = I915_READ(VLV_VSCSDP(pipe));
  117.         val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
  118.         val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
  119.         I915_WRITE(VLV_VSCSDP(pipe), val);
  120. }
  121.  
  122. static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
  123. {
  124.         struct edp_vsc_psr psr_vsc;
  125.  
  126.         /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
  127.         memset(&psr_vsc, 0, sizeof(psr_vsc));
  128.         psr_vsc.sdp_header.HB0 = 0;
  129.         psr_vsc.sdp_header.HB1 = 0x7;
  130.         psr_vsc.sdp_header.HB2 = 0x3;
  131.         psr_vsc.sdp_header.HB3 = 0xb;
  132.         intel_psr_write_vsc(intel_dp, &psr_vsc);
  133. }
  134.  
  135. static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
  136. {
  137.         struct edp_vsc_psr psr_vsc;
  138.  
  139.         /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
  140.         memset(&psr_vsc, 0, sizeof(psr_vsc));
  141.         psr_vsc.sdp_header.HB0 = 0;
  142.         psr_vsc.sdp_header.HB1 = 0x7;
  143.         psr_vsc.sdp_header.HB2 = 0x2;
  144.         psr_vsc.sdp_header.HB3 = 0x8;
  145.         intel_psr_write_vsc(intel_dp, &psr_vsc);
  146. }
  147.  
  148. static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
  149. {
  150.         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  151.                            DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
  152. }
  153.  
  154. static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
  155. {
  156.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  157.         struct drm_device *dev = dig_port->base.base.dev;
  158.         struct drm_i915_private *dev_priv = dev->dev_private;
  159.         uint32_t aux_clock_divider;
  160.         uint32_t aux_data_reg, aux_ctl_reg;
  161.         int precharge = 0x3;
  162.         static const uint8_t aux_msg[] = {
  163.                 [0] = DP_AUX_NATIVE_WRITE << 4,
  164.                 [1] = DP_SET_POWER >> 8,
  165.                 [2] = DP_SET_POWER & 0xff,
  166.                 [3] = 1 - 1,
  167.                 [4] = DP_SET_POWER_D0,
  168.         };
  169.         int i;
  170.  
  171.         BUILD_BUG_ON(sizeof(aux_msg) > 20);
  172.  
  173.         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
  174.  
  175.         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  176.                            DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
  177.  
  178.         /* Enable AUX frame sync at sink */
  179.         if (dev_priv->psr.aux_frame_sync)
  180.                 drm_dp_dpcd_writeb(&intel_dp->aux,
  181.                                 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
  182.                                 DP_AUX_FRAME_SYNC_ENABLE);
  183.  
  184.         aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
  185.                                 DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
  186.         aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
  187.                                 DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
  188.  
  189.         /* Setup AUX registers */
  190.         for (i = 0; i < sizeof(aux_msg); i += 4)
  191.                 I915_WRITE(aux_data_reg + i,
  192.                            intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
  193.  
  194.         if (INTEL_INFO(dev)->gen >= 9) {
  195.                 uint32_t val;
  196.  
  197.                 val = I915_READ(aux_ctl_reg);
  198.                 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
  199.                 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
  200.                 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
  201.                 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
  202.                 /* Use hardcoded data values for PSR, frame sync and GTC */
  203.                 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
  204.                 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
  205.                 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
  206.                 I915_WRITE(aux_ctl_reg, val);
  207.         } else {
  208.                 I915_WRITE(aux_ctl_reg,
  209.                    DP_AUX_CH_CTL_TIME_OUT_400us |
  210.                    (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  211.                    (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  212.                    (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
  213.         }
  214.  
  215.         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
  216. }
  217.  
  218. static void vlv_psr_enable_source(struct intel_dp *intel_dp)
  219. {
  220.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  221.         struct drm_device *dev = dig_port->base.base.dev;
  222.         struct drm_i915_private *dev_priv = dev->dev_private;
  223.         struct drm_crtc *crtc = dig_port->base.base.crtc;
  224.         enum pipe pipe = to_intel_crtc(crtc)->pipe;
  225.  
  226.         /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
  227.         I915_WRITE(VLV_PSRCTL(pipe),
  228.                    VLV_EDP_PSR_MODE_SW_TIMER |
  229.                    VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
  230.                    VLV_EDP_PSR_ENABLE);
  231. }
  232.  
  233. static void vlv_psr_activate(struct intel_dp *intel_dp)
  234. {
  235.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  236.         struct drm_device *dev = dig_port->base.base.dev;
  237.         struct drm_i915_private *dev_priv = dev->dev_private;
  238.         struct drm_crtc *crtc = dig_port->base.base.crtc;
  239.         enum pipe pipe = to_intel_crtc(crtc)->pipe;
  240.  
  241.         /* Let's do the transition from PSR_state 1 to PSR_state 2
  242.          * that is PSR transition to active - static frame transmission.
  243.          * Then Hardware is responsible for the transition to PSR_state 3
  244.          * that is PSR active - no Remote Frame Buffer (RFB) update.
  245.          */
  246.         I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
  247.                    VLV_EDP_PSR_ACTIVE_ENTRY);
  248. }
  249.  
  250. static void hsw_psr_enable_source(struct intel_dp *intel_dp)
  251. {
  252.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  253.         struct drm_device *dev = dig_port->base.base.dev;
  254.         struct drm_i915_private *dev_priv = dev->dev_private;
  255.  
  256.         uint32_t max_sleep_time = 0x1f;
  257.         /* Lately it was identified that depending on panel idle frame count
  258.          * calculated at HW can be off by 1. So let's use what came
  259.          * from VBT + 1.
  260.          * There are also other cases where panel demands at least 4
  261.          * but VBT is not being set. To cover these 2 cases lets use
  262.          * at least 5 when VBT isn't set to be on the safest side.
  263.          */
  264.         uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
  265.                                dev_priv->vbt.psr.idle_frames + 1 : 5;
  266.         uint32_t val = 0x0;
  267.         const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
  268.  
  269.         if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
  270.                 /* It doesn't mean we shouldn't send TPS patters, so let's
  271.                    send the minimal TP1 possible and skip TP2. */
  272.                 val |= EDP_PSR_TP1_TIME_100us;
  273.                 val |= EDP_PSR_TP2_TP3_TIME_0us;
  274.                 val |= EDP_PSR_SKIP_AUX_EXIT;
  275.                 /* Sink should be able to train with the 5 or 6 idle patterns */
  276.                 idle_frames += 4;
  277.         }
  278.  
  279.         I915_WRITE(EDP_PSR_CTL(dev), val |
  280.                    (IS_BROADWELL(dev) ? 0 : link_entry_time) |
  281.                    max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
  282.                    idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
  283.                    EDP_PSR_ENABLE);
  284.  
  285.         if (dev_priv->psr.psr2_support)
  286.                 I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
  287.                                 EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
  288. }
  289.  
  290. static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
  291. {
  292.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  293.         struct drm_device *dev = dig_port->base.base.dev;
  294.         struct drm_i915_private *dev_priv = dev->dev_private;
  295.         struct drm_crtc *crtc = dig_port->base.base.crtc;
  296.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  297.  
  298.         lockdep_assert_held(&dev_priv->psr.lock);
  299.         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  300.         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  301.  
  302.         dev_priv->psr.source_ok = false;
  303.  
  304.         if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
  305.                 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
  306.                 return false;
  307.         }
  308.  
  309.         if (!i915.enable_psr) {
  310.                 DRM_DEBUG_KMS("PSR disable by flag\n");
  311.                 return false;
  312.         }
  313.  
  314.         if (IS_HASWELL(dev) &&
  315.             I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
  316.                       S3D_ENABLE) {
  317.                 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
  318.                 return false;
  319.         }
  320.  
  321.         if (IS_HASWELL(dev) &&
  322.             intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  323.                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
  324.                 return false;
  325.         }
  326.  
  327.         if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
  328.                                     (dig_port->port != PORT_A))) {
  329.                 DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
  330.                 return false;
  331.         }
  332.  
  333.         dev_priv->psr.source_ok = true;
  334.         return true;
  335. }
  336.  
  337. static void intel_psr_activate(struct intel_dp *intel_dp)
  338. {
  339.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  340.         struct drm_device *dev = intel_dig_port->base.base.dev;
  341.         struct drm_i915_private *dev_priv = dev->dev_private;
  342.  
  343.         WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
  344.         WARN_ON(dev_priv->psr.active);
  345.         lockdep_assert_held(&dev_priv->psr.lock);
  346.  
  347.         /* Enable/Re-enable PSR on the host */
  348.         if (HAS_DDI(dev))
  349.                 /* On HSW+ after we enable PSR on source it will activate it
  350.                  * as soon as it match configure idle_frame count. So
  351.                  * we just actually enable it here on activation time.
  352.                  */
  353.                 hsw_psr_enable_source(intel_dp);
  354.         else
  355.                 vlv_psr_activate(intel_dp);
  356.  
  357.         dev_priv->psr.active = true;
  358. }
  359.  
  360. /**
  361.  * intel_psr_enable - Enable PSR
  362.  * @intel_dp: Intel DP
  363.  *
  364.  * This function can only be called after the pipe is fully trained and enabled.
  365.  */
  366. void intel_psr_enable(struct intel_dp *intel_dp)
  367. {
  368.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  369.         struct drm_device *dev = intel_dig_port->base.base.dev;
  370.         struct drm_i915_private *dev_priv = dev->dev_private;
  371.         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
  372.  
  373.         if (!HAS_PSR(dev)) {
  374.                 DRM_DEBUG_KMS("PSR not supported on this platform\n");
  375.                 return;
  376.         }
  377.  
  378.         if (!is_edp_psr(intel_dp)) {
  379.                 DRM_DEBUG_KMS("PSR not supported by this panel\n");
  380.                 return;
  381.         }
  382.  
  383.         mutex_lock(&dev_priv->psr.lock);
  384.         if (dev_priv->psr.enabled) {
  385.                 DRM_DEBUG_KMS("PSR already in use\n");
  386.                 goto unlock;
  387.         }
  388.  
  389.         if (!intel_psr_match_conditions(intel_dp))
  390.                 goto unlock;
  391.  
  392.         dev_priv->psr.busy_frontbuffer_bits = 0;
  393.  
  394.         if (HAS_DDI(dev)) {
  395.                 hsw_psr_setup_vsc(intel_dp);
  396.  
  397.                 if (dev_priv->psr.psr2_support) {
  398.                         /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
  399.                         if (crtc->config->pipe_src_w > 3200 ||
  400.                                 crtc->config->pipe_src_h > 2000)
  401.                                 dev_priv->psr.psr2_support = false;
  402.                         else
  403.                                 skl_psr_setup_su_vsc(intel_dp);
  404.                 }
  405.  
  406.                 /* Avoid continuous PSR exit by masking memup and hpd */
  407.                 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
  408.                            EDP_PSR_DEBUG_MASK_HPD);
  409.  
  410.                 /* Enable PSR on the panel */
  411.                 hsw_psr_enable_sink(intel_dp);
  412.  
  413.                 if (INTEL_INFO(dev)->gen >= 9)
  414.                         intel_psr_activate(intel_dp);
  415.         } else {
  416.                 vlv_psr_setup_vsc(intel_dp);
  417.  
  418.                 /* Enable PSR on the panel */
  419.                 vlv_psr_enable_sink(intel_dp);
  420.  
  421.                 /* On HSW+ enable_source also means go to PSR entry/active
  422.                  * state as soon as idle_frame achieved and here would be
  423.                  * to soon. However on VLV enable_source just enable PSR
  424.                  * but let it on inactive state. So we might do this prior
  425.                  * to active transition, i.e. here.
  426.                  */
  427.                 vlv_psr_enable_source(intel_dp);
  428.         }
  429.  
  430.         dev_priv->psr.enabled = intel_dp;
  431. unlock:
  432.         mutex_unlock(&dev_priv->psr.lock);
  433. }
  434.  
  435. static void vlv_psr_disable(struct intel_dp *intel_dp)
  436. {
  437.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  438.         struct drm_device *dev = intel_dig_port->base.base.dev;
  439.         struct drm_i915_private *dev_priv = dev->dev_private;
  440.         struct intel_crtc *intel_crtc =
  441.                 to_intel_crtc(intel_dig_port->base.base.crtc);
  442.         uint32_t val;
  443.  
  444.         if (dev_priv->psr.active) {
  445.                 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
  446.                 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
  447.                               VLV_EDP_PSR_IN_TRANS) == 0, 1))
  448.                         WARN(1, "PSR transition took longer than expected\n");
  449.  
  450.                 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
  451.                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
  452.                 val &= ~VLV_EDP_PSR_ENABLE;
  453.                 val &= ~VLV_EDP_PSR_MODE_MASK;
  454.                 I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
  455.  
  456.                 dev_priv->psr.active = false;
  457.         } else {
  458.                 WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
  459.         }
  460. }
  461.  
  462. static void hsw_psr_disable(struct intel_dp *intel_dp)
  463. {
  464.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  465.         struct drm_device *dev = intel_dig_port->base.base.dev;
  466.         struct drm_i915_private *dev_priv = dev->dev_private;
  467.  
  468.         if (dev_priv->psr.active) {
  469.                 I915_WRITE(EDP_PSR_CTL(dev),
  470.                            I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
  471.  
  472.                 /* Wait till PSR is idle */
  473.                 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
  474.                                EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
  475.                         DRM_ERROR("Timed out waiting for PSR Idle State\n");
  476.  
  477.                 dev_priv->psr.active = false;
  478.         } else {
  479.                 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
  480.         }
  481. }
  482.  
  483. /**
  484.  * intel_psr_disable - Disable PSR
  485.  * @intel_dp: Intel DP
  486.  *
  487.  * This function needs to be called before disabling pipe.
  488.  */
  489. void intel_psr_disable(struct intel_dp *intel_dp)
  490. {
  491.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  492.         struct drm_device *dev = intel_dig_port->base.base.dev;
  493.         struct drm_i915_private *dev_priv = dev->dev_private;
  494.  
  495.         mutex_lock(&dev_priv->psr.lock);
  496.         if (!dev_priv->psr.enabled) {
  497.                 mutex_unlock(&dev_priv->psr.lock);
  498.                 return;
  499.         }
  500.  
  501.         if (HAS_DDI(dev))
  502.                 hsw_psr_disable(intel_dp);
  503.         else
  504.                 vlv_psr_disable(intel_dp);
  505.  
  506.         dev_priv->psr.enabled = NULL;
  507.         mutex_unlock(&dev_priv->psr.lock);
  508.  
  509.         cancel_delayed_work_sync(&dev_priv->psr.work);
  510. }
  511.  
  512. static void intel_psr_work(struct work_struct *work)
  513. {
  514.         struct drm_i915_private *dev_priv =
  515.                 container_of(work, typeof(*dev_priv), psr.work.work);
  516.         struct intel_dp *intel_dp = dev_priv->psr.enabled;
  517.         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
  518.         enum pipe pipe = to_intel_crtc(crtc)->pipe;
  519.  
  520.         /* We have to make sure PSR is ready for re-enable
  521.          * otherwise it keeps disabled until next full enable/disable cycle.
  522.          * PSR might take some time to get fully disabled
  523.          * and be ready for re-enable.
  524.          */
  525.         if (HAS_DDI(dev_priv->dev)) {
  526.                 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
  527.                               EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
  528.                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
  529.                         return;
  530.                 }
  531.         } else {
  532.                 if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
  533.                               VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
  534.                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
  535.                         return;
  536.                 }
  537.         }
  538.         mutex_lock(&dev_priv->psr.lock);
  539.         intel_dp = dev_priv->psr.enabled;
  540.  
  541.         if (!intel_dp)
  542.                 goto unlock;
  543.  
  544.         /*
  545.          * The delayed work can race with an invalidate hence we need to
  546.          * recheck. Since psr_flush first clears this and then reschedules we
  547.          * won't ever miss a flush when bailing out here.
  548.          */
  549.         if (dev_priv->psr.busy_frontbuffer_bits)
  550.                 goto unlock;
  551.  
  552.         intel_psr_activate(intel_dp);
  553. unlock:
  554.         mutex_unlock(&dev_priv->psr.lock);
  555. }
  556.  
  557. static void intel_psr_exit(struct drm_device *dev)
  558. {
  559.         struct drm_i915_private *dev_priv = dev->dev_private;
  560.         struct intel_dp *intel_dp = dev_priv->psr.enabled;
  561.         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
  562.         enum pipe pipe = to_intel_crtc(crtc)->pipe;
  563.         u32 val;
  564.  
  565.         if (!dev_priv->psr.active)
  566.                 return;
  567.  
  568.         if (HAS_DDI(dev)) {
  569.                 val = I915_READ(EDP_PSR_CTL(dev));
  570.  
  571.                 WARN_ON(!(val & EDP_PSR_ENABLE));
  572.  
  573.                 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
  574.         } else {
  575.                 val = I915_READ(VLV_PSRCTL(pipe));
  576.  
  577.                 /* Here we do the transition from PSR_state 3 to PSR_state 5
  578.                  * directly once PSR State 4 that is active with single frame
  579.                  * update can be skipped. PSR_state 5 that is PSR exit then
  580.                  * Hardware is responsible to transition back to PSR_state 1
  581.                  * that is PSR inactive. Same state after
  582.                  * vlv_edp_psr_enable_source.
  583.                  */
  584.                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
  585.                 I915_WRITE(VLV_PSRCTL(pipe), val);
  586.  
  587.                 /* Send AUX wake up - Spec says after transitioning to PSR
  588.                  * active we have to send AUX wake up by writing 01h in DPCD
  589.                  * 600h of sink device.
  590.                  * XXX: This might slow down the transition, but without this
  591.                  * HW doesn't complete the transition to PSR_state 1 and we
  592.                  * never get the screen updated.
  593.                  */
  594.                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  595.                                    DP_SET_POWER_D0);
  596.         }
  597.  
  598.         dev_priv->psr.active = false;
  599. }
  600.  
  601. /**
  602.  * intel_psr_single_frame_update - Single Frame Update
  603.  * @dev: DRM device
  604.  * @frontbuffer_bits: frontbuffer plane tracking bits
  605.  *
  606.  * Some platforms support a single frame update feature that is used to
  607.  * send and update only one frame on Remote Frame Buffer.
  608.  * So far it is only implemented for Valleyview and Cherryview because
  609.  * hardware requires this to be done before a page flip.
  610.  */
  611. void intel_psr_single_frame_update(struct drm_device *dev,
  612.                                    unsigned frontbuffer_bits)
  613. {
  614.         struct drm_i915_private *dev_priv = dev->dev_private;
  615.         struct drm_crtc *crtc;
  616.         enum pipe pipe;
  617.         u32 val;
  618.  
  619.         /*
  620.          * Single frame update is already supported on BDW+ but it requires
  621.          * many W/A and it isn't really needed.
  622.          */
  623.         if (!IS_VALLEYVIEW(dev))
  624.                 return;
  625.  
  626.         mutex_lock(&dev_priv->psr.lock);
  627.         if (!dev_priv->psr.enabled) {
  628.                 mutex_unlock(&dev_priv->psr.lock);
  629.                 return;
  630.         }
  631.  
  632.         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  633.         pipe = to_intel_crtc(crtc)->pipe;
  634.  
  635.         if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
  636.                 val = I915_READ(VLV_PSRCTL(pipe));
  637.  
  638.                 /*
  639.                  * We need to set this bit before writing registers for a flip.
  640.                  * This bit will be self-clear when it gets to the PSR active state.
  641.                  */
  642.                 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
  643.         }
  644.         mutex_unlock(&dev_priv->psr.lock);
  645. }
  646.  
  647. /**
  648.  * intel_psr_invalidate - Invalidade PSR
  649.  * @dev: DRM device
  650.  * @frontbuffer_bits: frontbuffer plane tracking bits
  651.  *
  652.  * Since the hardware frontbuffer tracking has gaps we need to integrate
  653.  * with the software frontbuffer tracking. This function gets called every
  654.  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
  655.  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
  656.  *
  657.  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  658.  */
  659. void intel_psr_invalidate(struct drm_device *dev,
  660.                           unsigned frontbuffer_bits)
  661. {
  662.         struct drm_i915_private *dev_priv = dev->dev_private;
  663.         struct drm_crtc *crtc;
  664.         enum pipe pipe;
  665.  
  666.         mutex_lock(&dev_priv->psr.lock);
  667.         if (!dev_priv->psr.enabled) {
  668.                 mutex_unlock(&dev_priv->psr.lock);
  669.                 return;
  670.         }
  671.  
  672.         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  673.         pipe = to_intel_crtc(crtc)->pipe;
  674.  
  675.         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
  676.         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
  677.  
  678.         if (frontbuffer_bits)
  679.                 intel_psr_exit(dev);
  680.  
  681.         mutex_unlock(&dev_priv->psr.lock);
  682. }
  683.  
  684. /**
  685.  * intel_psr_flush - Flush PSR
  686.  * @dev: DRM device
  687.  * @frontbuffer_bits: frontbuffer plane tracking bits
  688.  * @origin: which operation caused the flush
  689.  *
  690.  * Since the hardware frontbuffer tracking has gaps we need to integrate
  691.  * with the software frontbuffer tracking. This function gets called every
  692.  * time frontbuffer rendering has completed and flushed out to memory. PSR
  693.  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
  694.  *
  695.  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  696.  */
  697. void intel_psr_flush(struct drm_device *dev,
  698.                      unsigned frontbuffer_bits, enum fb_op_origin origin)
  699. {
  700.         struct drm_i915_private *dev_priv = dev->dev_private;
  701.         struct drm_crtc *crtc;
  702.         enum pipe pipe;
  703.         int delay_ms = HAS_DDI(dev) ? 100 : 500;
  704.  
  705.         mutex_lock(&dev_priv->psr.lock);
  706.         if (!dev_priv->psr.enabled) {
  707.                 mutex_unlock(&dev_priv->psr.lock);
  708.                 return;
  709.         }
  710.  
  711.         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  712.         pipe = to_intel_crtc(crtc)->pipe;
  713.  
  714.         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
  715.         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
  716.  
  717.         if (HAS_DDI(dev)) {
  718.                 /*
  719.                  * By definition every flush should mean invalidate + flush,
  720.                  * however on core platforms let's minimize the
  721.                  * disable/re-enable so we can avoid the invalidate when flip
  722.                  * originated the flush.
  723.                  */
  724.                 if (frontbuffer_bits && origin != ORIGIN_FLIP)
  725.                         intel_psr_exit(dev);
  726.         } else {
  727.                 /*
  728.                  * On Valleyview and Cherryview we don't use hardware tracking
  729.                  * so any plane updates or cursor moves don't result in a PSR
  730.                  * invalidating. Which means we need to manually fake this in
  731.                  * software for all flushes.
  732.                  */
  733.                 if (frontbuffer_bits)
  734.                         intel_psr_exit(dev);
  735.         }
  736.  
  737.         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
  738.                 schedule_delayed_work(&dev_priv->psr.work,
  739.                                       msecs_to_jiffies(delay_ms));
  740.         mutex_unlock(&dev_priv->psr.lock);
  741. }
  742.  
  743. /**
  744.  * intel_psr_init - Init basic PSR work and mutex.
  745.  * @dev: DRM device
  746.  *
  747.  * This function is  called only once at driver load to initialize basic
  748.  * PSR stuff.
  749.  */
  750. void intel_psr_init(struct drm_device *dev)
  751. {
  752.         struct drm_i915_private *dev_priv = dev->dev_private;
  753.  
  754.         INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
  755.         mutex_init(&dev_priv->psr.lock);
  756. }
  757.