Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  */
  23.  
  24. /**
  25.  * DOC: Panel Self Refresh (PSR/SRD)
  26.  *
  27.  * Since Haswell Display controller supports Panel Self-Refresh on display
  28.  * panels witch have a remote frame buffer (RFB) implemented according to PSR
  29.  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  30.  * when system is idle but display is on as it eliminates display refresh
  31.  * request to DDR memory completely as long as the frame buffer for that
  32.  * display is unchanged.
  33.  *
  34.  * Panel Self Refresh must be supported by both Hardware (source) and
  35.  * Panel (sink).
  36.  *
  37.  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  38.  * to power down the link and memory controller. For DSI panels the same idea
  39.  * is called "manual mode".
  40.  *
  41.  * The implementation uses the hardware-based PSR support which automatically
  42.  * enters/exits self-refresh mode. The hardware takes care of sending the
  43.  * required DP aux message and could even retrain the link (that part isn't
  44.  * enabled yet though). The hardware also keeps track of any frontbuffer
  45.  * changes to know when to exit self-refresh mode again. Unfortunately that
  46.  * part doesn't work too well, hence why the i915 PSR support uses the
  47.  * software frontbuffer tracking to make sure it doesn't miss a screen
  48.  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  49.  * get called by the frontbuffer tracking code. Note that because of locking
  50.  * issues the self-refresh re-enable code is done from a work queue, which
  51.  * must be correctly synchronized/cancelled when shutting down the pipe."
  52.  */
  53.  
  54. #include <drm/drmP.h>
  55.  
  56. #include "intel_drv.h"
  57. #include "i915_drv.h"
  58.  
  59. static bool is_edp_psr(struct intel_dp *intel_dp)
  60. {
  61.         return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
  62. }
  63.  
  64. bool intel_psr_is_enabled(struct drm_device *dev)
  65. {
  66.         struct drm_i915_private *dev_priv = dev->dev_private;
  67.  
  68.         if (!HAS_PSR(dev))
  69.                 return false;
  70.  
  71.         return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
  72. }
  73.  
  74. static void intel_psr_write_vsc(struct intel_dp *intel_dp,
  75.                                     struct edp_vsc_psr *vsc_psr)
  76. {
  77.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  78.         struct drm_device *dev = dig_port->base.base.dev;
  79.         struct drm_i915_private *dev_priv = dev->dev_private;
  80.         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  81.         u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
  82.         u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
  83.         uint32_t *data = (uint32_t *) vsc_psr;
  84.         unsigned int i;
  85.  
  86.         /* As per BSPec (Pipe Video Data Island Packet), we need to disable
  87.            the video DIP being updated before program video DIP data buffer
  88.            registers for DIP being updated. */
  89.         I915_WRITE(ctl_reg, 0);
  90.         POSTING_READ(ctl_reg);
  91.  
  92.         for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
  93.                 if (i < sizeof(struct edp_vsc_psr))
  94.                         I915_WRITE(data_reg + i, *data++);
  95.                 else
  96.                         I915_WRITE(data_reg + i, 0);
  97.         }
  98.  
  99.         I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
  100.         POSTING_READ(ctl_reg);
  101. }
  102.  
  103. static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
  104. {
  105.         struct edp_vsc_psr psr_vsc;
  106.  
  107.         /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
  108.         memset(&psr_vsc, 0, sizeof(psr_vsc));
  109.         psr_vsc.sdp_header.HB0 = 0;
  110.         psr_vsc.sdp_header.HB1 = 0x7;
  111.         psr_vsc.sdp_header.HB2 = 0x2;
  112.         psr_vsc.sdp_header.HB3 = 0x8;
  113.         intel_psr_write_vsc(intel_dp, &psr_vsc);
  114. }
  115.  
  116. static void intel_psr_enable_sink(struct intel_dp *intel_dp)
  117. {
  118.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  119.         struct drm_device *dev = dig_port->base.base.dev;
  120.         struct drm_i915_private *dev_priv = dev->dev_private;
  121.         uint32_t aux_clock_divider;
  122.         int precharge = 0x3;
  123.         bool only_standby = false;
  124.         static const uint8_t aux_msg[] = {
  125.                 [0] = DP_AUX_NATIVE_WRITE << 4,
  126.                 [1] = DP_SET_POWER >> 8,
  127.                 [2] = DP_SET_POWER & 0xff,
  128.                 [3] = 1 - 1,
  129.                 [4] = DP_SET_POWER_D0,
  130.         };
  131.         int i;
  132.  
  133.         BUILD_BUG_ON(sizeof(aux_msg) > 20);
  134.  
  135.         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
  136.  
  137.         if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
  138.                 only_standby = true;
  139.  
  140.         /* Enable PSR in sink */
  141.         if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
  142.                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  143.                                    DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
  144.         else
  145.                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  146.                                    DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
  147.  
  148.         /* Setup AUX registers */
  149.         for (i = 0; i < sizeof(aux_msg); i += 4)
  150.                 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
  151.                            intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
  152.  
  153.         I915_WRITE(EDP_PSR_AUX_CTL(dev),
  154.                    DP_AUX_CH_CTL_TIME_OUT_400us |
  155.                    (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  156.                    (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  157.                    (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
  158. }
  159.  
  160. static void intel_psr_enable_source(struct intel_dp *intel_dp)
  161. {
  162.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  163.         struct drm_device *dev = dig_port->base.base.dev;
  164.         struct drm_i915_private *dev_priv = dev->dev_private;
  165.         uint32_t max_sleep_time = 0x1f;
  166.         uint32_t idle_frames = 1;
  167.         uint32_t val = 0x0;
  168.         const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
  169.         bool only_standby = false;
  170.  
  171.         if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
  172.                 only_standby = true;
  173.  
  174.         if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
  175.                 val |= EDP_PSR_LINK_STANDBY;
  176.                 val |= EDP_PSR_TP2_TP3_TIME_0us;
  177.                 val |= EDP_PSR_TP1_TIME_0us;
  178.                 val |= EDP_PSR_SKIP_AUX_EXIT;
  179.                 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
  180.         } else
  181.                 val |= EDP_PSR_LINK_DISABLE;
  182.  
  183.         I915_WRITE(EDP_PSR_CTL(dev), val |
  184.                    (IS_BROADWELL(dev) ? 0 : link_entry_time) |
  185.                    max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
  186.                    idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
  187.                    EDP_PSR_ENABLE);
  188. }
  189.  
  190. static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
  191. {
  192.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  193.         struct drm_device *dev = dig_port->base.base.dev;
  194.         struct drm_i915_private *dev_priv = dev->dev_private;
  195.         struct drm_crtc *crtc = dig_port->base.base.crtc;
  196.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  197.  
  198.         lockdep_assert_held(&dev_priv->psr.lock);
  199.         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  200.         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  201.  
  202.         dev_priv->psr.source_ok = false;
  203.  
  204.         if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
  205.                 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
  206.                 return false;
  207.         }
  208.  
  209.         if (!i915.enable_psr) {
  210.                 DRM_DEBUG_KMS("PSR disable by flag\n");
  211.                 return false;
  212.         }
  213.  
  214.         /* Below limitations aren't valid for Broadwell */
  215.         if (IS_BROADWELL(dev))
  216.                 goto out;
  217.  
  218.         if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
  219.             S3D_ENABLE) {
  220.                 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
  221.                 return false;
  222.         }
  223.  
  224.         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  225.                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
  226.                 return false;
  227.         }
  228.  
  229.  out:
  230.         dev_priv->psr.source_ok = true;
  231.         return true;
  232. }
  233.  
  234. static void intel_psr_do_enable(struct intel_dp *intel_dp)
  235. {
  236.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  237.         struct drm_device *dev = intel_dig_port->base.base.dev;
  238.         struct drm_i915_private *dev_priv = dev->dev_private;
  239.  
  240.         WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
  241.         WARN_ON(dev_priv->psr.active);
  242.         lockdep_assert_held(&dev_priv->psr.lock);
  243.  
  244.         /* Enable/Re-enable PSR on the host */
  245.         intel_psr_enable_source(intel_dp);
  246.  
  247.         dev_priv->psr.active = true;
  248. }
  249.  
  250. /**
  251.  * intel_psr_enable - Enable PSR
  252.  * @intel_dp: Intel DP
  253.  *
  254.  * This function can only be called after the pipe is fully trained and enabled.
  255.  */
  256. void intel_psr_enable(struct intel_dp *intel_dp)
  257. {
  258.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  259.         struct drm_device *dev = intel_dig_port->base.base.dev;
  260.         struct drm_i915_private *dev_priv = dev->dev_private;
  261.  
  262.         if (!HAS_PSR(dev)) {
  263.                 DRM_DEBUG_KMS("PSR not supported on this platform\n");
  264.                 return;
  265.         }
  266.  
  267.         if (!is_edp_psr(intel_dp)) {
  268.                 DRM_DEBUG_KMS("PSR not supported by this panel\n");
  269.                 return;
  270.         }
  271.  
  272.         mutex_lock(&dev_priv->psr.lock);
  273.         if (dev_priv->psr.enabled) {
  274.                 DRM_DEBUG_KMS("PSR already in use\n");
  275.                 goto unlock;
  276.         }
  277.  
  278.         if (!intel_psr_match_conditions(intel_dp))
  279.                 goto unlock;
  280.  
  281.         dev_priv->psr.busy_frontbuffer_bits = 0;
  282.  
  283.         intel_psr_setup_vsc(intel_dp);
  284.  
  285.         /* Avoid continuous PSR exit by masking memup and hpd */
  286.         I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
  287.                    EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
  288.  
  289.         /* Enable PSR on the panel */
  290.         intel_psr_enable_sink(intel_dp);
  291.  
  292.         dev_priv->psr.enabled = intel_dp;
  293. unlock:
  294.         mutex_unlock(&dev_priv->psr.lock);
  295. }
  296.  
  297. /**
  298.  * intel_psr_disable - Disable PSR
  299.  * @intel_dp: Intel DP
  300.  *
  301.  * This function needs to be called before disabling pipe.
  302.  */
  303. void intel_psr_disable(struct intel_dp *intel_dp)
  304. {
  305.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  306.         struct drm_device *dev = intel_dig_port->base.base.dev;
  307.         struct drm_i915_private *dev_priv = dev->dev_private;
  308.  
  309.         mutex_lock(&dev_priv->psr.lock);
  310.         if (!dev_priv->psr.enabled) {
  311.                 mutex_unlock(&dev_priv->psr.lock);
  312.                 return;
  313.         }
  314.  
  315.         if (dev_priv->psr.active) {
  316.                 I915_WRITE(EDP_PSR_CTL(dev),
  317.                            I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
  318.  
  319.                 /* Wait till PSR is idle */
  320.                 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
  321.                                EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
  322.                         DRM_ERROR("Timed out waiting for PSR Idle State\n");
  323.  
  324.                 dev_priv->psr.active = false;
  325.         } else {
  326.                 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
  327.         }
  328.  
  329.         dev_priv->psr.enabled = NULL;
  330.         mutex_unlock(&dev_priv->psr.lock);
  331.  
  332.         cancel_delayed_work_sync(&dev_priv->psr.work);
  333. }
  334.  
  335. static void intel_psr_work(struct work_struct *work)
  336. {
  337.         struct drm_i915_private *dev_priv =
  338.                 container_of(work, typeof(*dev_priv), psr.work.work);
  339.         struct intel_dp *intel_dp = dev_priv->psr.enabled;
  340.  
  341.         /* We have to make sure PSR is ready for re-enable
  342.          * otherwise it keeps disabled until next full enable/disable cycle.
  343.          * PSR might take some time to get fully disabled
  344.          * and be ready for re-enable.
  345.          */
  346.         if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
  347.                       EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
  348.                 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
  349.                 return;
  350.         }
  351.  
  352.         mutex_lock(&dev_priv->psr.lock);
  353.         intel_dp = dev_priv->psr.enabled;
  354.  
  355.         if (!intel_dp)
  356.                 goto unlock;
  357.  
  358.         /*
  359.          * The delayed work can race with an invalidate hence we need to
  360.          * recheck. Since psr_flush first clears this and then reschedules we
  361.          * won't ever miss a flush when bailing out here.
  362.          */
  363.         if (dev_priv->psr.busy_frontbuffer_bits)
  364.                 goto unlock;
  365.  
  366.         intel_psr_do_enable(intel_dp);
  367. unlock:
  368.         mutex_unlock(&dev_priv->psr.lock);
  369. }
  370.  
  371. static void intel_psr_exit(struct drm_device *dev)
  372. {
  373.         struct drm_i915_private *dev_priv = dev->dev_private;
  374.  
  375.         if (dev_priv->psr.active) {
  376.                 u32 val = I915_READ(EDP_PSR_CTL(dev));
  377.  
  378.                 WARN_ON(!(val & EDP_PSR_ENABLE));
  379.  
  380.                 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
  381.  
  382.                 dev_priv->psr.active = false;
  383.         }
  384.  
  385. }
  386.  
  387. /**
  388.  * intel_psr_invalidate - Invalidade PSR
  389.  * @dev: DRM device
  390.  * @frontbuffer_bits: frontbuffer plane tracking bits
  391.  *
  392.  * Since the hardware frontbuffer tracking has gaps we need to integrate
  393.  * with the software frontbuffer tracking. This function gets called every
  394.  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
  395.  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
  396.  *
  397.  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  398.  */
  399. void intel_psr_invalidate(struct drm_device *dev,
  400.                               unsigned frontbuffer_bits)
  401. {
  402.         struct drm_i915_private *dev_priv = dev->dev_private;
  403.         struct drm_crtc *crtc;
  404.         enum pipe pipe;
  405.  
  406.         mutex_lock(&dev_priv->psr.lock);
  407.         if (!dev_priv->psr.enabled) {
  408.                 mutex_unlock(&dev_priv->psr.lock);
  409.                 return;
  410.         }
  411.  
  412.         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  413.         pipe = to_intel_crtc(crtc)->pipe;
  414.  
  415.         intel_psr_exit(dev);
  416.  
  417.         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
  418.  
  419.         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
  420.         mutex_unlock(&dev_priv->psr.lock);
  421. }
  422.  
  423. /**
  424.  * intel_psr_flush - Flush PSR
  425.  * @dev: DRM device
  426.  * @frontbuffer_bits: frontbuffer plane tracking bits
  427.  *
  428.  * Since the hardware frontbuffer tracking has gaps we need to integrate
  429.  * with the software frontbuffer tracking. This function gets called every
  430.  * time frontbuffer rendering has completed and flushed out to memory. PSR
  431.  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
  432.  *
  433.  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  434.  */
  435. void intel_psr_flush(struct drm_device *dev,
  436.                          unsigned frontbuffer_bits)
  437. {
  438.         struct drm_i915_private *dev_priv = dev->dev_private;
  439.         struct drm_crtc *crtc;
  440.         enum pipe pipe;
  441.  
  442.         mutex_lock(&dev_priv->psr.lock);
  443.         if (!dev_priv->psr.enabled) {
  444.                 mutex_unlock(&dev_priv->psr.lock);
  445.                 return;
  446.         }
  447.  
  448.         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  449.         pipe = to_intel_crtc(crtc)->pipe;
  450.         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
  451.  
  452.         /*
  453.          * On Haswell sprite plane updates don't result in a psr invalidating
  454.          * signal in the hardware. Which means we need to manually fake this in
  455.          * software for all flushes, not just when we've seen a preceding
  456.          * invalidation through frontbuffer rendering.
  457.          */
  458.         if (IS_HASWELL(dev) &&
  459.             (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
  460.                 intel_psr_exit(dev);
  461.  
  462.         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
  463.                 schedule_delayed_work(&dev_priv->psr.work,
  464.                                       msecs_to_jiffies(100));
  465.         mutex_unlock(&dev_priv->psr.lock);
  466. }
  467.  
  468. /**
  469.  * intel_psr_init - Init basic PSR work and mutex.
  470.  * @dev: DRM device
  471.  *
  472.  * This function is  called only once at driver load to initialize basic
  473.  * PSR stuff.
  474.  */
  475. void intel_psr_init(struct drm_device *dev)
  476. {
  477.         struct drm_i915_private *dev_priv = dev->dev_private;
  478.  
  479.         INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
  480.         mutex_init(&dev_priv->psr.lock);
  481. }
  482.