Subversion Repositories Kolibri OS

Rev

Rev 6296 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2012-2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
  25.  *    Daniel Vetter <daniel.vetter@ffwll.ch>
  26.  *
  27.  */
  28.  
  29. //#include <linux/pm_runtime.h>
  30.  
  31. #include "i915_drv.h"
  32. #include "intel_drv.h"
  33. #include <drm/i915_powerwell.h>
  34. #include <linux/vgaarb.h>
  35.  
  36. /**
  37.  * DOC: runtime pm
  38.  *
  39.  * The i915 driver supports dynamic enabling and disabling of entire hardware
  40.  * blocks at runtime. This is especially important on the display side where
  41.  * software is supposed to control many power gates manually on recent hardware,
  42.  * since on the GT side a lot of the power management is done by the hardware.
  43.  * But even there some manual control at the device level is required.
  44.  *
  45.  * Since i915 supports a diverse set of platforms with a unified codebase and
  46.  * hardware engineers just love to shuffle functionality around between power
  47.  * domains there's a sizeable amount of indirection required. This file provides
  48.  * generic functions to the driver for grabbing and releasing references for
  49.  * abstract power domains. It then maps those to the actual power wells
  50.  * present for a given platform.
  51.  */
  52.  
  53. static struct i915_power_domains *hsw_pwr;
  54.  
  55. #define for_each_power_well(i, power_well, domain_mask, power_domains)  \
  56.         for (i = 0;                                                     \
  57.              i < (power_domains)->power_well_count &&                   \
  58.                  ((power_well) = &(power_domains)->power_wells[i]);     \
  59.              i++)                                                       \
  60.                 if ((power_well)->domains & (domain_mask))
  61.  
  62. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  63.         for (i = (power_domains)->power_well_count - 1;                  \
  64.              i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  65.              i--)                                                        \
  66.                 if ((power_well)->domains & (domain_mask))
  67.  
  68. /*
  69.  * We should only use the power well if we explicitly asked the hardware to
  70.  * enable it, so check if it's enabled and also check if we've requested it to
  71.  * be enabled.
  72.  */
  73. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  74.                                    struct i915_power_well *power_well)
  75. {
  76.         return I915_READ(HSW_PWR_WELL_DRIVER) ==
  77.                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  78. }
  79.  
  80. /**
  81.  * __intel_display_power_is_enabled - unlocked check for a power domain
  82.  * @dev_priv: i915 device instance
  83.  * @domain: power domain to check
  84.  *
  85.  * This is the unlocked version of intel_display_power_is_enabled() and should
  86.  * only be used from error capture and recovery code where deadlocks are
  87.  * possible.
  88.  *
  89.  * Returns:
  90.  * True when the power domain is enabled, false otherwise.
  91.  */
  92. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  93.                                       enum intel_display_power_domain domain)
  94. {
  95.         struct i915_power_domains *power_domains;
  96.         struct i915_power_well *power_well;
  97.         bool is_enabled;
  98.         int i;
  99.  
  100.         if (dev_priv->pm.suspended)
  101.                 return false;
  102.  
  103.         power_domains = &dev_priv->power_domains;
  104.  
  105.         is_enabled = true;
  106.  
  107.         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  108.                 if (power_well->always_on)
  109.                         continue;
  110.  
  111.                 if (!power_well->hw_enabled) {
  112.                         is_enabled = false;
  113.                         break;
  114.                 }
  115.         }
  116.  
  117.         return is_enabled;
  118. }
  119.  
  120. /**
  121.  * intel_display_power_is_enabled - unlocked check for a power domain
  122.  * @dev_priv: i915 device instance
  123.  * @domain: power domain to check
  124.  *
  125.  * This function can be used to check the hw power domain state. It is mostly
  126.  * used in hardware state readout functions. Everywhere else code should rely
  127.  * upon explicit power domain reference counting to ensure that the hardware
  128.  * block is powered up before accessing it.
  129.  *
  130.  * Callers must hold the relevant modesetting locks to ensure that concurrent
  131.  * threads can't disable the power well while the caller tries to read a few
  132.  * registers.
  133.  *
  134.  * Returns:
  135.  * True when the power domain is enabled, false otherwise.
  136.  */
  137. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  138.                                     enum intel_display_power_domain domain)
  139. {
  140.         struct i915_power_domains *power_domains;
  141.         bool ret;
  142.  
  143.         power_domains = &dev_priv->power_domains;
  144.  
  145.         mutex_lock(&power_domains->lock);
  146.         ret = __intel_display_power_is_enabled(dev_priv, domain);
  147.         mutex_unlock(&power_domains->lock);
  148.  
  149.         return ret;
  150. }
  151.  
  152. /**
  153.  * intel_display_set_init_power - set the initial power domain state
  154.  * @dev_priv: i915 device instance
  155.  * @enable: whether to enable or disable the initial power domain state
  156.  *
  157.  * For simplicity our driver load/unload and system suspend/resume code assumes
  158.  * that all power domains are always enabled. This functions controls the state
  159.  * of this little hack. While the initial power domain state is enabled runtime
  160.  * pm is effectively disabled.
  161.  */
  162. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  163.                                   bool enable)
  164. {
  165.         if (dev_priv->power_domains.init_power_on == enable)
  166.                 return;
  167.  
  168.         if (enable)
  169.                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  170.         else
  171.                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  172.  
  173.         dev_priv->power_domains.init_power_on = enable;
  174. }
  175.  
  176. /*
  177.  * Starting with Haswell, we have a "Power Down Well" that can be turned off
  178.  * when not needed anymore. We have 4 registers that can request the power well
  179.  * to be enabled, and it will only be disabled if none of the registers is
  180.  * requesting it to be enabled.
  181.  */
  182. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  183. {
  184.         struct drm_device *dev = dev_priv->dev;
  185.  
  186.         /*
  187.          * After we re-enable the power well, if we touch VGA register 0x3d5
  188.          * we'll get unclaimed register interrupts. This stops after we write
  189.          * anything to the VGA MSR register. The vgacon module uses this
  190.          * register all the time, so if we unbind our driver and, as a
  191.          * consequence, bind vgacon, we'll get stuck in an infinite loop at
  192.          * console_unlock(). So make here we touch the VGA MSR register, making
  193.          * sure vgacon can keep working normally without triggering interrupts
  194.          * and error messages.
  195.          */
  196.         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  197.         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  198.         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  199.  
  200.         if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
  201.                 gen8_irq_power_well_post_enable(dev_priv);
  202. }
  203.  
  204. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  205.                                struct i915_power_well *power_well, bool enable)
  206. {
  207.         bool is_enabled, enable_requested;
  208.         uint32_t tmp;
  209.  
  210.         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  211.         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  212.         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  213.  
  214.         if (enable) {
  215.                 if (!enable_requested)
  216.                         I915_WRITE(HSW_PWR_WELL_DRIVER,
  217.                                    HSW_PWR_WELL_ENABLE_REQUEST);
  218.  
  219.                 if (!is_enabled) {
  220.                         DRM_DEBUG_KMS("Enabling power well\n");
  221.                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  222.                                       HSW_PWR_WELL_STATE_ENABLED), 20))
  223.                                 DRM_ERROR("Timeout enabling power well\n");
  224.                         hsw_power_well_post_enable(dev_priv);
  225.                 }
  226.  
  227.         } else {
  228.                 if (enable_requested) {
  229.                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  230.                         POSTING_READ(HSW_PWR_WELL_DRIVER);
  231.                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
  232.                 }
  233.         }
  234. }
  235.  
  236. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  237.                                    struct i915_power_well *power_well)
  238. {
  239.         hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  240.  
  241.         /*
  242.          * We're taking over the BIOS, so clear any requests made by it since
  243.          * the driver is in charge now.
  244.          */
  245.         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  246.                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  247. }
  248.  
  249. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  250.                                   struct i915_power_well *power_well)
  251. {
  252.         hsw_set_power_well(dev_priv, power_well, true);
  253. }
  254.  
  255. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  256.                                    struct i915_power_well *power_well)
  257. {
  258.         hsw_set_power_well(dev_priv, power_well, false);
  259. }
  260.  
  261. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  262.                                            struct i915_power_well *power_well)
  263. {
  264. }
  265.  
  266. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  267.                                              struct i915_power_well *power_well)
  268. {
  269.         return true;
  270. }
  271.  
  272. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  273.                                struct i915_power_well *power_well, bool enable)
  274. {
  275.         enum punit_power_well power_well_id = power_well->data;
  276.         u32 mask;
  277.         u32 state;
  278.         u32 ctrl;
  279.  
  280.         mask = PUNIT_PWRGT_MASK(power_well_id);
  281.         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  282.                          PUNIT_PWRGT_PWR_GATE(power_well_id);
  283.  
  284.         mutex_lock(&dev_priv->rps.hw_lock);
  285.  
  286. #define COND \
  287.         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  288.  
  289.         if (COND)
  290.                 goto out;
  291.  
  292.         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  293.         ctrl &= ~mask;
  294.         ctrl |= state;
  295.         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  296.  
  297.         if (wait_for(COND, 100))
  298.                 DRM_ERROR("timout setting power well state %08x (%08x)\n",
  299.                           state,
  300.                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  301.  
  302. #undef COND
  303.  
  304. out:
  305.         mutex_unlock(&dev_priv->rps.hw_lock);
  306. }
  307.  
  308. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  309.                                    struct i915_power_well *power_well)
  310. {
  311.         vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  312. }
  313.  
  314. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  315.                                   struct i915_power_well *power_well)
  316. {
  317.         vlv_set_power_well(dev_priv, power_well, true);
  318. }
  319.  
  320. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  321.                                    struct i915_power_well *power_well)
  322. {
  323.         vlv_set_power_well(dev_priv, power_well, false);
  324. }
  325.  
  326. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  327.                                    struct i915_power_well *power_well)
  328. {
  329.         int power_well_id = power_well->data;
  330.         bool enabled = false;
  331.         u32 mask;
  332.         u32 state;
  333.         u32 ctrl;
  334.  
  335.         mask = PUNIT_PWRGT_MASK(power_well_id);
  336.         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  337.  
  338.         mutex_lock(&dev_priv->rps.hw_lock);
  339.  
  340.         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  341.         /*
  342.          * We only ever set the power-on and power-gate states, anything
  343.          * else is unexpected.
  344.          */
  345.         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  346.                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  347.         if (state == ctrl)
  348.                 enabled = true;
  349.  
  350.         /*
  351.          * A transient state at this point would mean some unexpected party
  352.          * is poking at the power controls too.
  353.          */
  354.         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  355.         WARN_ON(ctrl != state);
  356.  
  357.         mutex_unlock(&dev_priv->rps.hw_lock);
  358.  
  359.         return enabled;
  360. }
  361.  
  362. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  363.                                           struct i915_power_well *power_well)
  364. {
  365.         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  366.  
  367.         vlv_set_power_well(dev_priv, power_well, true);
  368.  
  369.         spin_lock_irq(&dev_priv->irq_lock);
  370.         valleyview_enable_display_irqs(dev_priv);
  371.         spin_unlock_irq(&dev_priv->irq_lock);
  372.  
  373.         /*
  374.          * During driver initialization/resume we can avoid restoring the
  375.          * part of the HW/SW state that will be inited anyway explicitly.
  376.          */
  377.         if (dev_priv->power_domains.initializing)
  378.                 return;
  379.  
  380.         intel_hpd_init(dev_priv);
  381.  
  382.         i915_redisable_vga_power_on(dev_priv->dev);
  383. }
  384.  
  385. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  386.                                            struct i915_power_well *power_well)
  387. {
  388.         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  389.  
  390.         spin_lock_irq(&dev_priv->irq_lock);
  391.         valleyview_disable_display_irqs(dev_priv);
  392.         spin_unlock_irq(&dev_priv->irq_lock);
  393.  
  394.         vlv_set_power_well(dev_priv, power_well, false);
  395.  
  396.         vlv_power_sequencer_reset(dev_priv);
  397. }
  398.  
  399. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  400.                                            struct i915_power_well *power_well)
  401. {
  402.         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  403.  
  404.         /*
  405.          * Enable the CRI clock source so we can get at the
  406.          * display and the reference clock for VGA
  407.          * hotplug / manual detection.
  408.          */
  409.         I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  410.                    DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  411.         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  412.  
  413.         vlv_set_power_well(dev_priv, power_well, true);
  414.  
  415.         /*
  416.          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  417.          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
  418.          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
  419.          *   b. The other bits such as sfr settings / modesel may all
  420.          *      be set to 0.
  421.          *
  422.          * This should only be done on init and resume from S3 with
  423.          * both PLLs disabled, or we risk losing DPIO and PLL
  424.          * synchronization.
  425.          */
  426.         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  427. }
  428.  
  429. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  430.                                             struct i915_power_well *power_well)
  431. {
  432.         enum pipe pipe;
  433.  
  434.         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  435.  
  436.         for_each_pipe(dev_priv, pipe)
  437.                 assert_pll_disabled(dev_priv, pipe);
  438.  
  439.         /* Assert common reset */
  440.         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  441.  
  442.         vlv_set_power_well(dev_priv, power_well, false);
  443. }
  444.  
  445. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  446.                                            struct i915_power_well *power_well)
  447. {
  448.         enum dpio_phy phy;
  449.  
  450.         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  451.                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  452.  
  453.         /*
  454.          * Enable the CRI clock source so we can get at the
  455.          * display and the reference clock for VGA
  456.          * hotplug / manual detection.
  457.          */
  458.         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  459.                 phy = DPIO_PHY0;
  460.                 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  461.                            DPLL_REFA_CLK_ENABLE_VLV);
  462.                 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  463.                            DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  464.         } else {
  465.                 phy = DPIO_PHY1;
  466.                 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
  467.                            DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  468.         }
  469.         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  470.         vlv_set_power_well(dev_priv, power_well, true);
  471.  
  472.         /* Poll for phypwrgood signal */
  473.         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
  474.                 DRM_ERROR("Display PHY %d is not power up\n", phy);
  475.  
  476.         I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
  477.                    PHY_COM_LANE_RESET_DEASSERT(phy));
  478. }
  479.  
  480. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  481.                                             struct i915_power_well *power_well)
  482. {
  483.         enum dpio_phy phy;
  484.  
  485.         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  486.                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  487.  
  488.         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  489.                 phy = DPIO_PHY0;
  490.                 assert_pll_disabled(dev_priv, PIPE_A);
  491.                 assert_pll_disabled(dev_priv, PIPE_B);
  492.         } else {
  493.                 phy = DPIO_PHY1;
  494.                 assert_pll_disabled(dev_priv, PIPE_C);
  495.         }
  496.  
  497.         I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
  498.                    ~PHY_COM_LANE_RESET_DEASSERT(phy));
  499.  
  500.         vlv_set_power_well(dev_priv, power_well, false);
  501. }
  502.  
  503. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  504.                                         struct i915_power_well *power_well)
  505. {
  506.         enum pipe pipe = power_well->data;
  507.         bool enabled;
  508.         u32 state, ctrl;
  509.  
  510.         mutex_lock(&dev_priv->rps.hw_lock);
  511.  
  512.         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  513.         /*
  514.          * We only ever set the power-on and power-gate states, anything
  515.          * else is unexpected.
  516.          */
  517.         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  518.         enabled = state == DP_SSS_PWR_ON(pipe);
  519.  
  520.         /*
  521.          * A transient state at this point would mean some unexpected party
  522.          * is poking at the power controls too.
  523.          */
  524.         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  525.         WARN_ON(ctrl << 16 != state);
  526.  
  527.         mutex_unlock(&dev_priv->rps.hw_lock);
  528.  
  529.         return enabled;
  530. }
  531.  
  532. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  533.                                     struct i915_power_well *power_well,
  534.                                     bool enable)
  535. {
  536.         enum pipe pipe = power_well->data;
  537.         u32 state;
  538.         u32 ctrl;
  539.  
  540.         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  541.  
  542.         mutex_lock(&dev_priv->rps.hw_lock);
  543.  
  544. #define COND \
  545.         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  546.  
  547.         if (COND)
  548.                 goto out;
  549.  
  550.         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  551.         ctrl &= ~DP_SSC_MASK(pipe);
  552.         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  553.         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  554.  
  555.         if (wait_for(COND, 100))
  556.                 DRM_ERROR("timout setting power well state %08x (%08x)\n",
  557.                           state,
  558.                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  559.  
  560. #undef COND
  561.  
  562. out:
  563.         mutex_unlock(&dev_priv->rps.hw_lock);
  564. }
  565.  
  566. static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
  567.                                         struct i915_power_well *power_well)
  568. {
  569.         chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
  570. }
  571.  
  572. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  573.                                        struct i915_power_well *power_well)
  574. {
  575.         WARN_ON_ONCE(power_well->data != PIPE_A &&
  576.                      power_well->data != PIPE_B &&
  577.                      power_well->data != PIPE_C);
  578.  
  579.         chv_set_pipe_power_well(dev_priv, power_well, true);
  580.  
  581.         if (power_well->data == PIPE_A) {
  582.                 spin_lock_irq(&dev_priv->irq_lock);
  583.                 valleyview_enable_display_irqs(dev_priv);
  584.                 spin_unlock_irq(&dev_priv->irq_lock);
  585.  
  586.                 /*
  587.                  * During driver initialization/resume we can avoid restoring the
  588.                  * part of the HW/SW state that will be inited anyway explicitly.
  589.                  */
  590.                 if (dev_priv->power_domains.initializing)
  591.                         return;
  592.  
  593.                 intel_hpd_init(dev_priv);
  594.  
  595.                 i915_redisable_vga_power_on(dev_priv->dev);
  596.         }
  597. }
  598.  
  599. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  600.                                         struct i915_power_well *power_well)
  601. {
  602.         WARN_ON_ONCE(power_well->data != PIPE_A &&
  603.                      power_well->data != PIPE_B &&
  604.                      power_well->data != PIPE_C);
  605.  
  606.         if (power_well->data == PIPE_A) {
  607.                 spin_lock_irq(&dev_priv->irq_lock);
  608.                 valleyview_disable_display_irqs(dev_priv);
  609.                 spin_unlock_irq(&dev_priv->irq_lock);
  610.         }
  611.  
  612.         chv_set_pipe_power_well(dev_priv, power_well, false);
  613.  
  614.         if (power_well->data == PIPE_A)
  615.                 vlv_power_sequencer_reset(dev_priv);
  616. }
  617.  
  618. static void check_power_well_state(struct drm_i915_private *dev_priv,
  619.                                    struct i915_power_well *power_well)
  620. {
  621.         bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
  622.  
  623.         if (power_well->always_on || !i915.disable_power_well) {
  624.                 if (!enabled)
  625.                         goto mismatch;
  626.  
  627.                 return;
  628.         }
  629.  
  630.         if (enabled != (power_well->count > 0))
  631.                 goto mismatch;
  632.  
  633.         return;
  634.  
  635. mismatch:
  636.         WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
  637.                   power_well->name, power_well->always_on, enabled,
  638.                   power_well->count, i915.disable_power_well);
  639. }
  640.  
  641. /**
  642.  * intel_display_power_get - grab a power domain reference
  643.  * @dev_priv: i915 device instance
  644.  * @domain: power domain to reference
  645.  *
  646.  * This function grabs a power domain reference for @domain and ensures that the
  647.  * power domain and all its parents are powered up. Therefore users should only
  648.  * grab a reference to the innermost power domain they need.
  649.  *
  650.  * Any power domain reference obtained by this function must have a symmetric
  651.  * call to intel_display_power_put() to release the reference again.
  652.  */
  653. void intel_display_power_get(struct drm_i915_private *dev_priv,
  654.                              enum intel_display_power_domain domain)
  655. {
  656.         struct i915_power_domains *power_domains;
  657.         struct i915_power_well *power_well;
  658.         int i;
  659.  
  660.         intel_runtime_pm_get(dev_priv);
  661.  
  662.         power_domains = &dev_priv->power_domains;
  663.  
  664.         mutex_lock(&power_domains->lock);
  665.  
  666.         for_each_power_well(i, power_well, BIT(domain), power_domains) {
  667.                 if (!power_well->count++) {
  668.                         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  669.                         power_well->ops->enable(dev_priv, power_well);
  670.                         power_well->hw_enabled = true;
  671.                 }
  672.  
  673.                 check_power_well_state(dev_priv, power_well);
  674.         }
  675.  
  676.         power_domains->domain_use_count[domain]++;
  677.  
  678.         mutex_unlock(&power_domains->lock);
  679. }
  680.  
  681. /**
  682.  * intel_display_power_put - release a power domain reference
  683.  * @dev_priv: i915 device instance
  684.  * @domain: power domain to reference
  685.  *
  686.  * This function drops the power domain reference obtained by
  687.  * intel_display_power_get() and might power down the corresponding hardware
  688.  * block right away if this is the last reference.
  689.  */
  690. void intel_display_power_put(struct drm_i915_private *dev_priv,
  691.                              enum intel_display_power_domain domain)
  692. {
  693.         struct i915_power_domains *power_domains;
  694.         struct i915_power_well *power_well;
  695.         int i;
  696.  
  697.         power_domains = &dev_priv->power_domains;
  698.  
  699.         mutex_lock(&power_domains->lock);
  700.  
  701.         WARN_ON(!power_domains->domain_use_count[domain]);
  702.         power_domains->domain_use_count[domain]--;
  703.  
  704.         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  705.                 WARN_ON(!power_well->count);
  706.  
  707.                 if (!--power_well->count && i915.disable_power_well) {
  708.                         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  709.                         power_well->hw_enabled = false;
  710.                         power_well->ops->disable(dev_priv, power_well);
  711.                 }
  712.  
  713.                 check_power_well_state(dev_priv, power_well);
  714.         }
  715.  
  716.         mutex_unlock(&power_domains->lock);
  717.  
  718.         intel_runtime_pm_put(dev_priv);
  719. }
  720.  
  721. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  722.  
  723. #define HSW_ALWAYS_ON_POWER_DOMAINS (                   \
  724.         BIT(POWER_DOMAIN_PIPE_A) |                      \
  725.         BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
  726.         BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
  727.         BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
  728.         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
  729.         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
  730.         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
  731.         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
  732.         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
  733.         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
  734.         BIT(POWER_DOMAIN_PORT_CRT) |                    \
  735.         BIT(POWER_DOMAIN_PLLS) |                        \
  736.         BIT(POWER_DOMAIN_INIT))
  737. #define HSW_DISPLAY_POWER_DOMAINS (                             \
  738.         (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
  739.         BIT(POWER_DOMAIN_INIT))
  740.  
  741. #define BDW_ALWAYS_ON_POWER_DOMAINS (                   \
  742.         HSW_ALWAYS_ON_POWER_DOMAINS |                   \
  743.         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
  744. #define BDW_DISPLAY_POWER_DOMAINS (                             \
  745.         (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
  746.         BIT(POWER_DOMAIN_INIT))
  747.  
  748. #define VLV_ALWAYS_ON_POWER_DOMAINS     BIT(POWER_DOMAIN_INIT)
  749. #define VLV_DISPLAY_POWER_DOMAINS       POWER_DOMAIN_MASK
  750.  
  751. #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
  752.         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
  753.         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
  754.         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
  755.         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
  756.         BIT(POWER_DOMAIN_PORT_CRT) |            \
  757.         BIT(POWER_DOMAIN_INIT))
  758.  
  759. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
  760.         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
  761.         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
  762.         BIT(POWER_DOMAIN_INIT))
  763.  
  764. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
  765.         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
  766.         BIT(POWER_DOMAIN_INIT))
  767.  
  768. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
  769.         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
  770.         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
  771.         BIT(POWER_DOMAIN_INIT))
  772.  
  773. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
  774.         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
  775.         BIT(POWER_DOMAIN_INIT))
  776.  
  777. #define CHV_PIPE_A_POWER_DOMAINS (      \
  778.         BIT(POWER_DOMAIN_PIPE_A) |      \
  779.         BIT(POWER_DOMAIN_INIT))
  780.  
  781. #define CHV_PIPE_B_POWER_DOMAINS (      \
  782.         BIT(POWER_DOMAIN_PIPE_B) |      \
  783.         BIT(POWER_DOMAIN_INIT))
  784.  
  785. #define CHV_PIPE_C_POWER_DOMAINS (      \
  786.         BIT(POWER_DOMAIN_PIPE_C) |      \
  787.         BIT(POWER_DOMAIN_INIT))
  788.  
  789. #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
  790.         BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
  791.         BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
  792.         BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
  793.         BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
  794.         BIT(POWER_DOMAIN_INIT))
  795.  
  796. #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
  797.         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
  798.         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
  799.         BIT(POWER_DOMAIN_INIT))
  800.  
  801. #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS (  \
  802.         BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
  803.         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
  804.         BIT(POWER_DOMAIN_INIT))
  805.  
  806. #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS (  \
  807.         BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
  808.         BIT(POWER_DOMAIN_INIT))
  809.  
  810. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  811.         .sync_hw = i9xx_always_on_power_well_noop,
  812.         .enable = i9xx_always_on_power_well_noop,
  813.         .disable = i9xx_always_on_power_well_noop,
  814.         .is_enabled = i9xx_always_on_power_well_enabled,
  815. };
  816.  
  817. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  818.         .sync_hw = chv_pipe_power_well_sync_hw,
  819.         .enable = chv_pipe_power_well_enable,
  820.         .disable = chv_pipe_power_well_disable,
  821.         .is_enabled = chv_pipe_power_well_enabled,
  822. };
  823.  
  824. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  825.         .sync_hw = vlv_power_well_sync_hw,
  826.         .enable = chv_dpio_cmn_power_well_enable,
  827.         .disable = chv_dpio_cmn_power_well_disable,
  828.         .is_enabled = vlv_power_well_enabled,
  829. };
  830.  
  831. static struct i915_power_well i9xx_always_on_power_well[] = {
  832.         {
  833.                 .name = "always-on",
  834.                 .always_on = 1,
  835.                 .domains = POWER_DOMAIN_MASK,
  836.                 .ops = &i9xx_always_on_power_well_ops,
  837.         },
  838. };
  839.  
  840. static const struct i915_power_well_ops hsw_power_well_ops = {
  841.         .sync_hw = hsw_power_well_sync_hw,
  842.         .enable = hsw_power_well_enable,
  843.         .disable = hsw_power_well_disable,
  844.         .is_enabled = hsw_power_well_enabled,
  845. };
  846.  
  847. static struct i915_power_well hsw_power_wells[] = {
  848.         {
  849.                 .name = "always-on",
  850.                 .always_on = 1,
  851.                 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
  852.                 .ops = &i9xx_always_on_power_well_ops,
  853.         },
  854.         {
  855.                 .name = "display",
  856.                 .domains = HSW_DISPLAY_POWER_DOMAINS,
  857.                 .ops = &hsw_power_well_ops,
  858.         },
  859. };
  860.  
  861. static struct i915_power_well bdw_power_wells[] = {
  862.         {
  863.                 .name = "always-on",
  864.                 .always_on = 1,
  865.                 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
  866.                 .ops = &i9xx_always_on_power_well_ops,
  867.         },
  868.         {
  869.                 .name = "display",
  870.                 .domains = BDW_DISPLAY_POWER_DOMAINS,
  871.                 .ops = &hsw_power_well_ops,
  872.         },
  873. };
  874.  
  875. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  876.         .sync_hw = vlv_power_well_sync_hw,
  877.         .enable = vlv_display_power_well_enable,
  878.         .disable = vlv_display_power_well_disable,
  879.         .is_enabled = vlv_power_well_enabled,
  880. };
  881.  
  882. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  883.         .sync_hw = vlv_power_well_sync_hw,
  884.         .enable = vlv_dpio_cmn_power_well_enable,
  885.         .disable = vlv_dpio_cmn_power_well_disable,
  886.         .is_enabled = vlv_power_well_enabled,
  887. };
  888.  
  889. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  890.         .sync_hw = vlv_power_well_sync_hw,
  891.         .enable = vlv_power_well_enable,
  892.         .disable = vlv_power_well_disable,
  893.         .is_enabled = vlv_power_well_enabled,
  894. };
  895.  
  896. static struct i915_power_well vlv_power_wells[] = {
  897.         {
  898.                 .name = "always-on",
  899.                 .always_on = 1,
  900.                 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  901.                 .ops = &i9xx_always_on_power_well_ops,
  902.         },
  903.         {
  904.                 .name = "display",
  905.                 .domains = VLV_DISPLAY_POWER_DOMAINS,
  906.                 .data = PUNIT_POWER_WELL_DISP2D,
  907.                 .ops = &vlv_display_power_well_ops,
  908.         },
  909.         {
  910.                 .name = "dpio-tx-b-01",
  911.                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  912.                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  913.                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  914.                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  915.                 .ops = &vlv_dpio_power_well_ops,
  916.                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  917.         },
  918.         {
  919.                 .name = "dpio-tx-b-23",
  920.                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  921.                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  922.                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  923.                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  924.                 .ops = &vlv_dpio_power_well_ops,
  925.                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  926.         },
  927.         {
  928.                 .name = "dpio-tx-c-01",
  929.                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  930.                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  931.                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  932.                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  933.                 .ops = &vlv_dpio_power_well_ops,
  934.                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  935.         },
  936.         {
  937.                 .name = "dpio-tx-c-23",
  938.                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  939.                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  940.                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  941.                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  942.                 .ops = &vlv_dpio_power_well_ops,
  943.                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  944.         },
  945.         {
  946.                 .name = "dpio-common",
  947.                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  948.                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  949.                 .ops = &vlv_dpio_cmn_power_well_ops,
  950.         },
  951. };
  952.  
  953. static struct i915_power_well chv_power_wells[] = {
  954.         {
  955.                 .name = "always-on",
  956.                 .always_on = 1,
  957.                 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  958.                 .ops = &i9xx_always_on_power_well_ops,
  959.         },
  960. #if 0
  961.         {
  962.                 .name = "display",
  963.                 .domains = VLV_DISPLAY_POWER_DOMAINS,
  964.                 .data = PUNIT_POWER_WELL_DISP2D,
  965.                 .ops = &vlv_display_power_well_ops,
  966.         },
  967. #endif
  968.         {
  969.                 .name = "pipe-a",
  970.                 /*
  971.                  * FIXME: pipe A power well seems to be the new disp2d well.
  972.                  * At least all registers seem to be housed there. Figure
  973.                  * out if this a a temporary situation in pre-production
  974.                  * hardware or a permanent state of affairs.
  975.                  */
  976.                 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
  977.                 .data = PIPE_A,
  978.                 .ops = &chv_pipe_power_well_ops,
  979.         },
  980. #if 0
  981.         {
  982.                 .name = "pipe-b",
  983.                 .domains = CHV_PIPE_B_POWER_DOMAINS,
  984.                 .data = PIPE_B,
  985.                 .ops = &chv_pipe_power_well_ops,
  986.         },
  987.         {
  988.                 .name = "pipe-c",
  989.                 .domains = CHV_PIPE_C_POWER_DOMAINS,
  990.                 .data = PIPE_C,
  991.                 .ops = &chv_pipe_power_well_ops,
  992.         },
  993. #endif
  994.         {
  995.                 .name = "dpio-common-bc",
  996.                 /*
  997.                  * XXX: cmnreset for one PHY seems to disturb the other.
  998.                  * As a workaround keep both powered on at the same
  999.                  * time for now.
  1000.                  */
  1001.                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  1002.                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1003.                 .ops = &chv_dpio_cmn_power_well_ops,
  1004.         },
  1005.         {
  1006.                 .name = "dpio-common-d",
  1007.                 /*
  1008.                  * XXX: cmnreset for one PHY seems to disturb the other.
  1009.                  * As a workaround keep both powered on at the same
  1010.                  * time for now.
  1011.                  */
  1012.                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  1013.                 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
  1014.                 .ops = &chv_dpio_cmn_power_well_ops,
  1015.         },
  1016. #if 0
  1017.         {
  1018.                 .name = "dpio-tx-b-01",
  1019.                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1020.                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  1021.                 .ops = &vlv_dpio_power_well_ops,
  1022.                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1023.         },
  1024.         {
  1025.                 .name = "dpio-tx-b-23",
  1026.                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1027.                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  1028.                 .ops = &vlv_dpio_power_well_ops,
  1029.                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1030.         },
  1031.         {
  1032.                 .name = "dpio-tx-c-01",
  1033.                 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1034.                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1035.                 .ops = &vlv_dpio_power_well_ops,
  1036.                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1037.         },
  1038.         {
  1039.                 .name = "dpio-tx-c-23",
  1040.                 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1041.                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1042.                 .ops = &vlv_dpio_power_well_ops,
  1043.                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1044.         },
  1045.         {
  1046.                 .name = "dpio-tx-d-01",
  1047.                 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  1048.                            CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  1049.                 .ops = &vlv_dpio_power_well_ops,
  1050.                 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
  1051.         },
  1052.         {
  1053.                 .name = "dpio-tx-d-23",
  1054.                 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  1055.                            CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  1056.                 .ops = &vlv_dpio_power_well_ops,
  1057.                 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
  1058.         },
  1059. #endif
  1060. };
  1061.  
  1062. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  1063.                                                  enum punit_power_well power_well_id)
  1064. {
  1065.         struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1066.         struct i915_power_well *power_well;
  1067.         int i;
  1068.  
  1069.         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1070.                 if (power_well->data == power_well_id)
  1071.                         return power_well;
  1072.         }
  1073.  
  1074.         return NULL;
  1075. }
  1076.  
  1077. #define set_power_wells(power_domains, __power_wells) ({                \
  1078.         (power_domains)->power_wells = (__power_wells);                 \
  1079.         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
  1080. })
  1081.  
  1082. /**
  1083.  * intel_power_domains_init - initializes the power domain structures
  1084.  * @dev_priv: i915 device instance
  1085.  *
  1086.  * Initializes the power domain structures for @dev_priv depending upon the
  1087.  * supported platform.
  1088.  */
  1089. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  1090. {
  1091.         struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1092.  
  1093.         mutex_init(&power_domains->lock);
  1094.  
  1095.         /*
  1096.          * The enabling order will be from lower to higher indexed wells,
  1097.          * the disabling order is reversed.
  1098.          */
  1099.         if (IS_HASWELL(dev_priv->dev)) {
  1100.                 set_power_wells(power_domains, hsw_power_wells);
  1101.                 hsw_pwr = power_domains;
  1102.         } else if (IS_BROADWELL(dev_priv->dev)) {
  1103.                 set_power_wells(power_domains, bdw_power_wells);
  1104.                 hsw_pwr = power_domains;
  1105.         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
  1106.                 set_power_wells(power_domains, chv_power_wells);
  1107.         } else if (IS_VALLEYVIEW(dev_priv->dev)) {
  1108.                 set_power_wells(power_domains, vlv_power_wells);
  1109.         } else {
  1110.                 set_power_wells(power_domains, i9xx_always_on_power_well);
  1111.         }
  1112.  
  1113.         return 0;
  1114. }
  1115.  
  1116. static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
  1117. {
  1118.         struct drm_device *dev = dev_priv->dev;
  1119.         struct device *device = &dev->pdev->dev;
  1120.  
  1121.         if (!HAS_RUNTIME_PM(dev))
  1122.                 return;
  1123.  
  1124.         if (!intel_enable_rc6(dev))
  1125.                 return;
  1126.  
  1127.         /* Make sure we're not suspended first. */
  1128.         pm_runtime_get_sync(device);
  1129.         pm_runtime_disable(device);
  1130. }
  1131.  
  1132. /**
  1133.  * intel_power_domains_fini - finalizes the power domain structures
  1134.  * @dev_priv: i915 device instance
  1135.  *
  1136.  * Finalizes the power domain structures for @dev_priv depending upon the
  1137.  * supported platform. This function also disables runtime pm and ensures that
  1138.  * the device stays powered up so that the driver can be reloaded.
  1139.  */
  1140. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  1141. {
  1142.         intel_runtime_pm_disable(dev_priv);
  1143.  
  1144.         /* The i915.ko module is still not prepared to be loaded when
  1145.          * the power well is not enabled, so just enable it in case
  1146.          * we're going to unload/reload. */
  1147.         intel_display_set_init_power(dev_priv, true);
  1148.  
  1149.         hsw_pwr = NULL;
  1150. }
  1151.  
  1152. static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
  1153. {
  1154.         struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1155.         struct i915_power_well *power_well;
  1156.         int i;
  1157.  
  1158.         mutex_lock(&power_domains->lock);
  1159.         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1160.                 power_well->ops->sync_hw(dev_priv, power_well);
  1161.                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  1162.                                                                      power_well);
  1163.         }
  1164.         mutex_unlock(&power_domains->lock);
  1165. }
  1166.  
  1167. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  1168. {
  1169.         struct i915_power_well *cmn =
  1170.                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  1171.         struct i915_power_well *disp2d =
  1172.                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  1173.  
  1174.         /* If the display might be already active skip this */
  1175.         if (cmn->ops->is_enabled(dev_priv, cmn) &&
  1176.             disp2d->ops->is_enabled(dev_priv, disp2d) &&
  1177.             I915_READ(DPIO_CTL) & DPIO_CMNRST)
  1178.                 return;
  1179.  
  1180.         DRM_DEBUG_KMS("toggling display PHY side reset\n");
  1181.  
  1182.         /* cmnlane needs DPLL registers */
  1183.         disp2d->ops->enable(dev_priv, disp2d);
  1184.  
  1185.         /*
  1186.          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  1187.          * Need to assert and de-assert PHY SB reset by gating the
  1188.          * common lane power, then un-gating it.
  1189.          * Simply ungating isn't enough to reset the PHY enough to get
  1190.          * ports and lanes running.
  1191.          */
  1192.         cmn->ops->disable(dev_priv, cmn);
  1193. }
  1194.  
  1195. /**
  1196.  * intel_power_domains_init_hw - initialize hardware power domain state
  1197.  * @dev_priv: i915 device instance
  1198.  *
  1199.  * This function initializes the hardware power domain state and enables all
  1200.  * power domains using intel_display_set_init_power().
  1201.  */
  1202. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  1203. {
  1204.         struct drm_device *dev = dev_priv->dev;
  1205.         struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1206.  
  1207.         power_domains->initializing = true;
  1208.  
  1209.         if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  1210.                 mutex_lock(&power_domains->lock);
  1211.                 vlv_cmnlane_wa(dev_priv);
  1212.                 mutex_unlock(&power_domains->lock);
  1213.         }
  1214.  
  1215.         /* For now, we need the power well to be always enabled. */
  1216.         intel_display_set_init_power(dev_priv, true);
  1217.         intel_power_domains_resume(dev_priv);
  1218.         power_domains->initializing = false;
  1219. }
  1220.  
  1221. /**
  1222.  * intel_aux_display_runtime_get - grab an auxilliary power domain reference
  1223.  * @dev_priv: i915 device instance
  1224.  *
  1225.  * This function grabs a power domain reference for the auxiliary power domain
  1226.  * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
  1227.  * parents are powered up. Therefore users should only grab a reference to the
  1228.  * innermost power domain they need.
  1229.  *
  1230.  * Any power domain reference obtained by this function must have a symmetric
  1231.  * call to intel_aux_display_runtime_put() to release the reference again.
  1232.  */
  1233. void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
  1234. {
  1235.         intel_runtime_pm_get(dev_priv);
  1236. }
  1237.  
  1238. /**
  1239.  * intel_aux_display_runtime_put - release an auxilliary power domain reference
  1240.  * @dev_priv: i915 device instance
  1241.  *
  1242.  * This function drops the auxilliary power domain reference obtained by
  1243.  * intel_aux_display_runtime_get() and might power down the corresponding
  1244.  * hardware block right away if this is the last reference.
  1245.  */
  1246. void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
  1247. {
  1248.         intel_runtime_pm_put(dev_priv);
  1249. }
  1250.  
  1251. /**
  1252.  * intel_runtime_pm_get - grab a runtime pm reference
  1253.  * @dev_priv: i915 device instance
  1254.  *
  1255.  * This function grabs a device-level runtime pm reference (mostly used for GEM
  1256.  * code to ensure the GTT or GT is on) and ensures that it is powered up.
  1257.  *
  1258.  * Any runtime pm reference obtained by this function must have a symmetric
  1259.  * call to intel_runtime_pm_put() to release the reference again.
  1260.  */
  1261. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  1262. {
  1263.         struct drm_device *dev = dev_priv->dev;
  1264.         struct device *device = &dev->pdev->dev;
  1265.  
  1266.         if (!HAS_RUNTIME_PM(dev))
  1267.                 return;
  1268.  
  1269.         pm_runtime_get_sync(device);
  1270.         WARN(dev_priv->pm.suspended, "Device still suspended.\n");
  1271. }
  1272.  
  1273. /**
  1274.  * intel_runtime_pm_get_noresume - grab a runtime pm reference
  1275.  * @dev_priv: i915 device instance
  1276.  *
  1277.  * This function grabs a device-level runtime pm reference (mostly used for GEM
  1278.  * code to ensure the GTT or GT is on).
  1279.  *
  1280.  * It will _not_ power up the device but instead only check that it's powered
  1281.  * on.  Therefore it is only valid to call this functions from contexts where
  1282.  * the device is known to be powered up and where trying to power it up would
  1283.  * result in hilarity and deadlocks. That pretty much means only the system
  1284.  * suspend/resume code where this is used to grab runtime pm references for
  1285.  * delayed setup down in work items.
  1286.  *
  1287.  * Any runtime pm reference obtained by this function must have a symmetric
  1288.  * call to intel_runtime_pm_put() to release the reference again.
  1289.  */
  1290. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  1291. {
  1292.         struct drm_device *dev = dev_priv->dev;
  1293.         struct device *device = &dev->pdev->dev;
  1294.  
  1295.         if (!HAS_RUNTIME_PM(dev))
  1296.                 return;
  1297.  
  1298.         WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
  1299. //   pm_runtime_get_noresume(device);
  1300. }
  1301.  
  1302. /**
  1303.  * intel_runtime_pm_put - release a runtime pm reference
  1304.  * @dev_priv: i915 device instance
  1305.  *
  1306.  * This function drops the device-level runtime pm reference obtained by
  1307.  * intel_runtime_pm_get() and might power down the corresponding
  1308.  * hardware block right away if this is the last reference.
  1309.  */
  1310. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  1311. {
  1312.         struct drm_device *dev = dev_priv->dev;
  1313.         struct device *device = &dev->pdev->dev;
  1314.  
  1315.         if (!HAS_RUNTIME_PM(dev))
  1316.                 return;
  1317.  
  1318. //   pm_runtime_mark_last_busy(device);
  1319. //   pm_runtime_put_autosuspend(device);
  1320. }
  1321.  
  1322. /**
  1323.  * intel_runtime_pm_enable - enable runtime pm
  1324.  * @dev_priv: i915 device instance
  1325.  *
  1326.  * This function enables runtime pm at the end of the driver load sequence.
  1327.  *
  1328.  * Note that this function does currently not enable runtime pm for the
  1329.  * subordinate display power domains. That is only done on the first modeset
  1330.  * using intel_display_set_init_power().
  1331.  */
  1332. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  1333. {
  1334.         struct drm_device *dev = dev_priv->dev;
  1335.         struct device *device = &dev->pdev->dev;
  1336.  
  1337.         if (!HAS_RUNTIME_PM(dev))
  1338.                 return;
  1339.  
  1340.         pm_runtime_set_active(device);
  1341.  
  1342.         /*
  1343.          * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
  1344.          * requirement.
  1345.          */
  1346.         if (!intel_enable_rc6(dev)) {
  1347.                 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
  1348.                 return;
  1349.         }
  1350.  
  1351. //   pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
  1352. //   pm_runtime_mark_last_busy(device);
  1353. //   pm_runtime_use_autosuspend(device);
  1354.  
  1355. //   pm_runtime_put_autosuspend(device);
  1356. }
  1357.  
  1358. /* Display audio driver power well request */
  1359. int i915_request_power_well(void)
  1360. {
  1361.         struct drm_i915_private *dev_priv;
  1362.  
  1363.         if (!hsw_pwr)
  1364.                 return -ENODEV;
  1365.  
  1366.         dev_priv = container_of(hsw_pwr, struct drm_i915_private,
  1367.                                 power_domains);
  1368.         intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
  1369.         return 0;
  1370. }
  1371. EXPORT_SYMBOL_GPL(i915_request_power_well);
  1372.  
  1373. /* Display audio driver power well release */
  1374. int i915_release_power_well(void)
  1375. {
  1376.         struct drm_i915_private *dev_priv;
  1377.  
  1378.         if (!hsw_pwr)
  1379.                 return -ENODEV;
  1380.  
  1381.         dev_priv = container_of(hsw_pwr, struct drm_i915_private,
  1382.                                 power_domains);
  1383.         intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
  1384.         return 0;
  1385. }
  1386. EXPORT_SYMBOL_GPL(i915_release_power_well);
  1387.  
  1388. /*
  1389.  * Private interface for the audio driver to get CDCLK in kHz.
  1390.  *
  1391.  * Caller must request power well using i915_request_power_well() prior to
  1392.  * making the call.
  1393.  */
  1394. int i915_get_cdclk_freq(void)
  1395. {
  1396.         struct drm_i915_private *dev_priv;
  1397.  
  1398.         if (!hsw_pwr)
  1399.                 return -ENODEV;
  1400.  
  1401.         dev_priv = container_of(hsw_pwr, struct drm_i915_private,
  1402.                                 power_domains);
  1403.  
  1404.         return intel_ddi_get_cdclk_freq(dev_priv);
  1405. }
  1406. EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
  1407.