Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Keith Packard <keithp@keithp.com>
  25.  *
  26.  */
  27.  
  28. #include <linux/i2c.h>
  29. #include <linux/slab.h>
  30. #include <linux/export.h>
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc.h>
  33. #include <drm/drm_crtc_helper.h>
  34. #include <drm/drm_edid.h>
  35. #include "intel_drv.h"
  36. #include <drm/i915_drm.h>
  37. #include "i915_drv.h"
  38.  
  39. #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
  40.  
  41. struct dp_link_dpll {
  42.         int link_bw;
  43.         struct dpll dpll;
  44. };
  45.  
  46. static const struct dp_link_dpll gen4_dpll[] = {
  47.         { DP_LINK_BW_1_62,
  48.                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  49.         { DP_LINK_BW_2_7,
  50.                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  51. };
  52.  
  53. static const struct dp_link_dpll pch_dpll[] = {
  54.         { DP_LINK_BW_1_62,
  55.                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  56.         { DP_LINK_BW_2_7,
  57.                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  58. };
  59.  
  60. static const struct dp_link_dpll vlv_dpll[] = {
  61.         { DP_LINK_BW_1_62,
  62.                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  63.         { DP_LINK_BW_2_7,
  64.                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  65. };
  66.  
  67. /*
  68.  * CHV supports eDP 1.4 that have  more link rates.
  69.  * Below only provides the fixed rate but exclude variable rate.
  70.  */
  71. static const struct dp_link_dpll chv_dpll[] = {
  72.         /*
  73.          * CHV requires to program fractional division for m2.
  74.          * m2 is stored in fixed point format using formula below
  75.          * (m2_int << 22) | m2_fraction
  76.          */
  77.         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
  78.                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  79.         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
  80.                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  81.         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
  82.                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  83. };
  84.  
  85. /**
  86.  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  87.  * @intel_dp: DP struct
  88.  *
  89.  * If a CPU or PCH DP output is attached to an eDP panel, this function
  90.  * will return true, and false otherwise.
  91.  */
  92. static bool is_edp(struct intel_dp *intel_dp)
  93. {
  94.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  95.  
  96.         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
  97. }
  98.  
  99. static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
  100. {
  101.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  102.  
  103.         return intel_dig_port->base.base.dev;
  104. }
  105.  
  106. static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
  107. {
  108.         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
  109. }
  110.  
  111. static void intel_dp_link_down(struct intel_dp *intel_dp);
  112. static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
  113. static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  114. static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
  115. static void vlv_steal_power_sequencer(struct drm_device *dev,
  116.                                       enum pipe pipe);
  117.  
  118. int
  119. intel_dp_max_link_bw(struct intel_dp *intel_dp)
  120. {
  121.         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
  122.         struct drm_device *dev = intel_dp->attached_connector->base.dev;
  123.  
  124.         switch (max_link_bw) {
  125.         case DP_LINK_BW_1_62:
  126.         case DP_LINK_BW_2_7:
  127.                 break;
  128.         case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
  129.                 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
  130.                      INTEL_INFO(dev)->gen >= 8) &&
  131.                     intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
  132.                         max_link_bw = DP_LINK_BW_5_4;
  133.                 else
  134.                         max_link_bw = DP_LINK_BW_2_7;
  135.                 break;
  136.         default:
  137.                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
  138.                      max_link_bw);
  139.                 max_link_bw = DP_LINK_BW_1_62;
  140.                 break;
  141.         }
  142.         return max_link_bw;
  143. }
  144.  
  145. static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
  146. {
  147.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  148.         struct drm_device *dev = intel_dig_port->base.base.dev;
  149.         u8 source_max, sink_max;
  150.  
  151.         source_max = 4;
  152.         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
  153.             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
  154.                 source_max = 2;
  155.  
  156.         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
  157.  
  158.         return min(source_max, sink_max);
  159. }
  160.  
  161. /*
  162.  * The units on the numbers in the next two are... bizarre.  Examples will
  163.  * make it clearer; this one parallels an example in the eDP spec.
  164.  *
  165.  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
  166.  *
  167.  *     270000 * 1 * 8 / 10 == 216000
  168.  *
  169.  * The actual data capacity of that configuration is 2.16Gbit/s, so the
  170.  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
  171.  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
  172.  * 119000.  At 18bpp that's 2142000 kilobits per second.
  173.  *
  174.  * Thus the strange-looking division by 10 in intel_dp_link_required, to
  175.  * get the result in decakilobits instead of kilobits.
  176.  */
  177.  
  178. static int
  179. intel_dp_link_required(int pixel_clock, int bpp)
  180. {
  181.         return (pixel_clock * bpp + 9) / 10;
  182. }
  183.  
  184. static int
  185. intel_dp_max_data_rate(int max_link_clock, int max_lanes)
  186. {
  187.         return (max_link_clock * max_lanes * 8) / 10;
  188. }
  189.  
  190. static enum drm_mode_status
  191. intel_dp_mode_valid(struct drm_connector *connector,
  192.                     struct drm_display_mode *mode)
  193. {
  194.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  195.         struct intel_connector *intel_connector = to_intel_connector(connector);
  196.         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
  197.         int target_clock = mode->clock;
  198.         int max_rate, mode_rate, max_lanes, max_link_clock;
  199.  
  200.         if (is_edp(intel_dp) && fixed_mode) {
  201.                 if (mode->hdisplay > fixed_mode->hdisplay)
  202.                         return MODE_PANEL;
  203.  
  204.                 if (mode->vdisplay > fixed_mode->vdisplay)
  205.                         return MODE_PANEL;
  206.  
  207.                 target_clock = fixed_mode->clock;
  208.         }
  209.  
  210.         max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
  211.         max_lanes = intel_dp_max_lane_count(intel_dp);
  212.  
  213.         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
  214.         mode_rate = intel_dp_link_required(target_clock, 18);
  215.  
  216.         if (mode_rate > max_rate)
  217.                 return MODE_CLOCK_HIGH;
  218.  
  219.         if (mode->clock < 10000)
  220.                 return MODE_CLOCK_LOW;
  221.  
  222.         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
  223.                 return MODE_H_ILLEGAL;
  224.  
  225.         return MODE_OK;
  226. }
  227.  
  228. uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
  229. {
  230.         int     i;
  231.         uint32_t v = 0;
  232.  
  233.         if (src_bytes > 4)
  234.                 src_bytes = 4;
  235.         for (i = 0; i < src_bytes; i++)
  236.                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
  237.         return v;
  238. }
  239.  
  240. void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
  241. {
  242.         int i;
  243.         if (dst_bytes > 4)
  244.                 dst_bytes = 4;
  245.         for (i = 0; i < dst_bytes; i++)
  246.                 dst[i] = src >> ((3-i) * 8);
  247. }
  248.  
  249. /* hrawclock is 1/4 the FSB frequency */
  250. static int
  251. intel_hrawclk(struct drm_device *dev)
  252. {
  253.         struct drm_i915_private *dev_priv = dev->dev_private;
  254.         uint32_t clkcfg;
  255.  
  256.         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
  257.         if (IS_VALLEYVIEW(dev))
  258.                 return 200;
  259.  
  260.         clkcfg = I915_READ(CLKCFG);
  261.         switch (clkcfg & CLKCFG_FSB_MASK) {
  262.         case CLKCFG_FSB_400:
  263.                 return 100;
  264.         case CLKCFG_FSB_533:
  265.                 return 133;
  266.         case CLKCFG_FSB_667:
  267.                 return 166;
  268.         case CLKCFG_FSB_800:
  269.                 return 200;
  270.         case CLKCFG_FSB_1067:
  271.                 return 266;
  272.         case CLKCFG_FSB_1333:
  273.                 return 333;
  274.         /* these two are just a guess; one of them might be right */
  275.         case CLKCFG_FSB_1600:
  276.         case CLKCFG_FSB_1600_ALT:
  277.                 return 400;
  278.         default:
  279.                 return 133;
  280.         }
  281. }
  282.  
  283. static void
  284. intel_dp_init_panel_power_sequencer(struct drm_device *dev,
  285.                                     struct intel_dp *intel_dp);
  286. static void
  287. intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  288.                                               struct intel_dp *intel_dp);
  289.  
  290. static void pps_lock(struct intel_dp *intel_dp)
  291. {
  292.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  293.         struct intel_encoder *encoder = &intel_dig_port->base;
  294.         struct drm_device *dev = encoder->base.dev;
  295.         struct drm_i915_private *dev_priv = dev->dev_private;
  296.         enum intel_display_power_domain power_domain;
  297.  
  298.         /*
  299.          * See vlv_power_sequencer_reset() why we need
  300.          * a power domain reference here.
  301.          */
  302.         power_domain = intel_display_port_power_domain(encoder);
  303.         intel_display_power_get(dev_priv, power_domain);
  304.  
  305.         mutex_lock(&dev_priv->pps_mutex);
  306. }
  307.  
  308. static void pps_unlock(struct intel_dp *intel_dp)
  309. {
  310.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  311.         struct intel_encoder *encoder = &intel_dig_port->base;
  312.         struct drm_device *dev = encoder->base.dev;
  313.         struct drm_i915_private *dev_priv = dev->dev_private;
  314.         enum intel_display_power_domain power_domain;
  315.  
  316.         mutex_unlock(&dev_priv->pps_mutex);
  317.  
  318.         power_domain = intel_display_port_power_domain(encoder);
  319.         intel_display_power_put(dev_priv, power_domain);
  320. }
  321.  
  322. static void
  323. vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  324. {
  325.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  326.         struct drm_device *dev = intel_dig_port->base.base.dev;
  327.         struct drm_i915_private *dev_priv = dev->dev_private;
  328.         enum pipe pipe = intel_dp->pps_pipe;
  329.         bool pll_enabled;
  330.         uint32_t DP;
  331.  
  332.         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
  333.                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
  334.                  pipe_name(pipe), port_name(intel_dig_port->port)))
  335.                 return;
  336.  
  337.         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
  338.                       pipe_name(pipe), port_name(intel_dig_port->port));
  339.  
  340.         /* Preserve the BIOS-computed detected bit. This is
  341.          * supposed to be read-only.
  342.          */
  343.         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  344.         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  345.         DP |= DP_PORT_WIDTH(1);
  346.         DP |= DP_LINK_TRAIN_PAT_1;
  347.  
  348.         if (IS_CHERRYVIEW(dev))
  349.                 DP |= DP_PIPE_SELECT_CHV(pipe);
  350.         else if (pipe == PIPE_B)
  351.                 DP |= DP_PIPEB_SELECT;
  352.  
  353.         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
  354.  
  355.         /*
  356.          * The DPLL for the pipe must be enabled for this to work.
  357.          * So enable temporarily it if it's not already enabled.
  358.          */
  359.         if (!pll_enabled)
  360.                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
  361.                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
  362.  
  363.         /*
  364.          * Similar magic as in intel_dp_enable_port().
  365.          * We _must_ do this port enable + disable trick
  366.          * to make this power seqeuencer lock onto the port.
  367.          * Otherwise even VDD force bit won't work.
  368.          */
  369.         I915_WRITE(intel_dp->output_reg, DP);
  370.         POSTING_READ(intel_dp->output_reg);
  371.  
  372.         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
  373.         POSTING_READ(intel_dp->output_reg);
  374.  
  375.         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
  376.         POSTING_READ(intel_dp->output_reg);
  377.  
  378.         if (!pll_enabled)
  379.                 vlv_force_pll_off(dev, pipe);
  380. }
  381.  
  382. static enum pipe
  383. vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  384. {
  385.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  386.         struct drm_device *dev = intel_dig_port->base.base.dev;
  387.         struct drm_i915_private *dev_priv = dev->dev_private;
  388.         struct intel_encoder *encoder;
  389.         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
  390.         enum pipe pipe;
  391.  
  392.         lockdep_assert_held(&dev_priv->pps_mutex);
  393.  
  394.         /* We should never land here with regular DP ports */
  395.         WARN_ON(!is_edp(intel_dp));
  396.  
  397.         if (intel_dp->pps_pipe != INVALID_PIPE)
  398.                 return intel_dp->pps_pipe;
  399.  
  400.         /*
  401.          * We don't have power sequencer currently.
  402.          * Pick one that's not used by other ports.
  403.          */
  404.         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
  405.                             base.head) {
  406.                 struct intel_dp *tmp;
  407.  
  408.                 if (encoder->type != INTEL_OUTPUT_EDP)
  409.                         continue;
  410.  
  411.                 tmp = enc_to_intel_dp(&encoder->base);
  412.  
  413.                 if (tmp->pps_pipe != INVALID_PIPE)
  414.                         pipes &= ~(1 << tmp->pps_pipe);
  415.         }
  416.  
  417.         /*
  418.          * Didn't find one. This should not happen since there
  419.          * are two power sequencers and up to two eDP ports.
  420.          */
  421.         if (WARN_ON(pipes == 0))
  422.                 pipe = PIPE_A;
  423.         else
  424.                 pipe = ffs(pipes) - 1;
  425.  
  426.         vlv_steal_power_sequencer(dev, pipe);
  427.         intel_dp->pps_pipe = pipe;
  428.  
  429.         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
  430.                       pipe_name(intel_dp->pps_pipe),
  431.                       port_name(intel_dig_port->port));
  432.  
  433.         /* init power sequencer on this pipe and port */
  434.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  435.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  436.  
  437.         /*
  438.          * Even vdd force doesn't work until we've made
  439.          * the power sequencer lock in on the port.
  440.          */
  441.         vlv_power_sequencer_kick(intel_dp);
  442.  
  443.         return intel_dp->pps_pipe;
  444. }
  445.  
  446. typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
  447.                                enum pipe pipe);
  448.  
  449. static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
  450.                                enum pipe pipe)
  451. {
  452.         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
  453. }
  454.  
  455. static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
  456.                                 enum pipe pipe)
  457. {
  458.         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
  459. }
  460.  
  461. static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
  462.                          enum pipe pipe)
  463. {
  464.         return true;
  465. }
  466.  
  467. static enum pipe
  468. vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
  469.                      enum port port,
  470.                      vlv_pipe_check pipe_check)
  471. {
  472.         enum pipe pipe;
  473.  
  474.         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
  475.                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
  476.                         PANEL_PORT_SELECT_MASK;
  477.  
  478.                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
  479.                         continue;
  480.  
  481.                 if (!pipe_check(dev_priv, pipe))
  482.                         continue;
  483.  
  484.                         return pipe;
  485.         }
  486.  
  487.         return INVALID_PIPE;
  488. }
  489.  
  490. static void
  491. vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
  492. {
  493.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  494.         struct drm_device *dev = intel_dig_port->base.base.dev;
  495.         struct drm_i915_private *dev_priv = dev->dev_private;
  496.         enum port port = intel_dig_port->port;
  497.  
  498.         lockdep_assert_held(&dev_priv->pps_mutex);
  499.  
  500.         /* try to find a pipe with this port selected */
  501.         /* first pick one where the panel is on */
  502.         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  503.                                                   vlv_pipe_has_pp_on);
  504.         /* didn't find one? pick one where vdd is on */
  505.         if (intel_dp->pps_pipe == INVALID_PIPE)
  506.                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  507.                                                           vlv_pipe_has_vdd_on);
  508.         /* didn't find one? pick one with just the correct port */
  509.         if (intel_dp->pps_pipe == INVALID_PIPE)
  510.                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  511.                                                           vlv_pipe_any);
  512.  
  513.         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
  514.         if (intel_dp->pps_pipe == INVALID_PIPE) {
  515.                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
  516.                               port_name(port));
  517.                 return;
  518.         }
  519.  
  520.         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
  521.                       port_name(port), pipe_name(intel_dp->pps_pipe));
  522.  
  523.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  524.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  525. }
  526.  
  527. void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
  528. {
  529.         struct drm_device *dev = dev_priv->dev;
  530.         struct intel_encoder *encoder;
  531.  
  532.         if (WARN_ON(!IS_VALLEYVIEW(dev)))
  533.                 return;
  534.  
  535.         /*
  536.          * We can't grab pps_mutex here due to deadlock with power_domain
  537.          * mutex when power_domain functions are called while holding pps_mutex.
  538.          * That also means that in order to use pps_pipe the code needs to
  539.          * hold both a power domain reference and pps_mutex, and the power domain
  540.          * reference get/put must be done while _not_ holding pps_mutex.
  541.          * pps_{lock,unlock}() do these steps in the correct order, so one
  542.          * should use them always.
  543.          */
  544.  
  545.         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
  546.                 struct intel_dp *intel_dp;
  547.  
  548.                 if (encoder->type != INTEL_OUTPUT_EDP)
  549.                         continue;
  550.  
  551.                 intel_dp = enc_to_intel_dp(&encoder->base);
  552.                 intel_dp->pps_pipe = INVALID_PIPE;
  553.         }
  554. }
  555.  
  556. static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
  557. {
  558.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  559.  
  560.         if (HAS_PCH_SPLIT(dev))
  561.                 return PCH_PP_CONTROL;
  562.         else
  563.                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
  564. }
  565.  
  566. static u32 _pp_stat_reg(struct intel_dp *intel_dp)
  567. {
  568.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  569.  
  570.         if (HAS_PCH_SPLIT(dev))
  571.                 return PCH_PP_STATUS;
  572.         else
  573.                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
  574. }
  575.  
  576. #if 0
  577. /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
  578.    This function only applicable when panel PM state is not to be tracked */
  579. static int edp_notify_handler(struct notifier_block *this, unsigned long code,
  580.                               void *unused)
  581. {
  582.         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
  583.                                                  edp_notifier);
  584.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  585.         struct drm_i915_private *dev_priv = dev->dev_private;
  586.         u32 pp_div;
  587.         u32 pp_ctrl_reg, pp_div_reg;
  588.  
  589.         if (!is_edp(intel_dp) || code != SYS_RESTART)
  590.                 return 0;
  591.  
  592.         pps_lock(intel_dp);
  593.  
  594.         if (IS_VALLEYVIEW(dev)) {
  595.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  596.  
  597.                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
  598.                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
  599.                 pp_div = I915_READ(pp_div_reg);
  600.                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
  601.  
  602.                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
  603.                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
  604.                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
  605.                 msleep(intel_dp->panel_power_cycle_delay);
  606.         }
  607.  
  608.         pps_unlock(intel_dp);
  609.  
  610.         return 0;
  611. }
  612. #endif
  613.  
  614. static bool edp_have_panel_power(struct intel_dp *intel_dp)
  615. {
  616.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  617.         struct drm_i915_private *dev_priv = dev->dev_private;
  618.  
  619.         lockdep_assert_held(&dev_priv->pps_mutex);
  620.  
  621.         if (IS_VALLEYVIEW(dev) &&
  622.             intel_dp->pps_pipe == INVALID_PIPE)
  623.                 return false;
  624.  
  625.         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
  626. }
  627.  
  628. static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  629. {
  630.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  631.         struct drm_i915_private *dev_priv = dev->dev_private;
  632.  
  633.         lockdep_assert_held(&dev_priv->pps_mutex);
  634.  
  635.         if (IS_VALLEYVIEW(dev) &&
  636.             intel_dp->pps_pipe == INVALID_PIPE)
  637.                 return false;
  638.  
  639.         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
  640. }
  641.  
  642. static void
  643. intel_dp_check_edp(struct intel_dp *intel_dp)
  644. {
  645.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  646.         struct drm_i915_private *dev_priv = dev->dev_private;
  647.  
  648.         if (!is_edp(intel_dp))
  649.                 return;
  650.  
  651.         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
  652.                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
  653.                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
  654.                               I915_READ(_pp_stat_reg(intel_dp)),
  655.                               I915_READ(_pp_ctrl_reg(intel_dp)));
  656.         }
  657. }
  658.  
  659. static uint32_t
  660. intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
  661. {
  662.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  663.         struct drm_device *dev = intel_dig_port->base.base.dev;
  664.         struct drm_i915_private *dev_priv = dev->dev_private;
  665.         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  666.         uint32_t status;
  667.         bool done;
  668.  
  669. #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  670.         if (has_aux_irq)
  671.                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
  672.                                           msecs_to_jiffies_timeout(10));
  673.         else
  674.                 done = wait_for_atomic(C, 10) == 0;
  675.         if (!done)
  676.                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
  677.                           has_aux_irq);
  678. #undef C
  679.  
  680.         return status;
  681. }
  682.  
  683. static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  684. {
  685.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  686.         struct drm_device *dev = intel_dig_port->base.base.dev;
  687.  
  688.         /*
  689.          * The clock divider is based off the hrawclk, and would like to run at
  690.          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
  691.          */
  692.         return index ? 0 : intel_hrawclk(dev) / 2;
  693. }
  694.  
  695. static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  696. {
  697.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  698.         struct drm_device *dev = intel_dig_port->base.base.dev;
  699.  
  700.         if (index)
  701.                 return 0;
  702.  
  703.         if (intel_dig_port->port == PORT_A) {
  704.                 if (IS_GEN6(dev) || IS_GEN7(dev))
  705.                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
  706.                 else
  707.                         return 225; /* eDP input clock at 450Mhz */
  708.         } else {
  709.                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  710.         }
  711. }
  712.  
  713. static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  714. {
  715.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  716.         struct drm_device *dev = intel_dig_port->base.base.dev;
  717.         struct drm_i915_private *dev_priv = dev->dev_private;
  718.  
  719.         if (intel_dig_port->port == PORT_A) {
  720.                 if (index)
  721.                         return 0;
  722.                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
  723.         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  724.                 /* Workaround for non-ULT HSW */
  725.                 switch (index) {
  726.                 case 0: return 63;
  727.                 case 1: return 72;
  728.                 default: return 0;
  729.                 }
  730.         } else  {
  731.                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  732.         }
  733. }
  734.  
  735. static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  736. {
  737.         return index ? 0 : 100;
  738. }
  739.  
  740. static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  741. {
  742.         /*
  743.          * SKL doesn't need us to program the AUX clock divider (Hardware will
  744.          * derive the clock from CDCLK automatically). We still implement the
  745.          * get_aux_clock_divider vfunc to plug-in into the existing code.
  746.          */
  747.         return index ? 0 : 1;
  748. }
  749.  
  750. static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
  751.                                       bool has_aux_irq,
  752.                                       int send_bytes,
  753.                                       uint32_t aux_clock_divider)
  754. {
  755.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  756.         struct drm_device *dev = intel_dig_port->base.base.dev;
  757.         uint32_t precharge, timeout;
  758.  
  759.         if (IS_GEN6(dev))
  760.                 precharge = 3;
  761.         else
  762.                 precharge = 5;
  763.  
  764.         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
  765.                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
  766.         else
  767.                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
  768.  
  769.         return DP_AUX_CH_CTL_SEND_BUSY |
  770.                DP_AUX_CH_CTL_DONE |
  771.                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  772.                DP_AUX_CH_CTL_TIME_OUT_ERROR |
  773.                timeout |
  774.                DP_AUX_CH_CTL_RECEIVE_ERROR |
  775.                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  776.                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  777.                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
  778. }
  779.  
  780. static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
  781.                                       bool has_aux_irq,
  782.                                       int send_bytes,
  783.                                       uint32_t unused)
  784. {
  785.         return DP_AUX_CH_CTL_SEND_BUSY |
  786.                DP_AUX_CH_CTL_DONE |
  787.                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  788.                DP_AUX_CH_CTL_TIME_OUT_ERROR |
  789.                DP_AUX_CH_CTL_TIME_OUT_1600us |
  790.                DP_AUX_CH_CTL_RECEIVE_ERROR |
  791.                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  792.                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
  793. }
  794.  
  795. static int
  796. intel_dp_aux_ch(struct intel_dp *intel_dp,
  797.                 const uint8_t *send, int send_bytes,
  798.                 uint8_t *recv, int recv_size)
  799. {
  800.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  801.         struct drm_device *dev = intel_dig_port->base.base.dev;
  802.         struct drm_i915_private *dev_priv = dev->dev_private;
  803.         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  804.         uint32_t ch_data = ch_ctl + 4;
  805.         uint32_t aux_clock_divider;
  806.         int i, ret, recv_bytes;
  807.         uint32_t status;
  808.         int try, clock = 0;
  809.         bool has_aux_irq = HAS_AUX_IRQ(dev);
  810.         bool vdd;
  811.  
  812.         pps_lock(intel_dp);
  813.  
  814.         /*
  815.          * We will be called with VDD already enabled for dpcd/edid/oui reads.
  816.          * In such cases we want to leave VDD enabled and it's up to upper layers
  817.          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
  818.          * ourselves.
  819.          */
  820.         vdd = edp_panel_vdd_on(intel_dp);
  821.  
  822.         /* dp aux is extremely sensitive to irq latency, hence request the
  823.          * lowest possible wakeup latency and so prevent the cpu from going into
  824.          * deep sleep states.
  825.          */
  826.  
  827.         intel_dp_check_edp(intel_dp);
  828.  
  829.         intel_aux_display_runtime_get(dev_priv);
  830.  
  831.         /* Try to wait for any previous AUX channel activity */
  832.         for (try = 0; try < 3; try++) {
  833.                 status = I915_READ_NOTRACE(ch_ctl);
  834.                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  835.                         break;
  836.                 msleep(1);
  837.         }
  838.  
  839.         if (try == 3) {
  840.                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
  841.                      I915_READ(ch_ctl));
  842.                 ret = -EBUSY;
  843.                 goto out;
  844.         }
  845.  
  846.         /* Only 5 data registers! */
  847.         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
  848.                 ret = -E2BIG;
  849.                 goto out;
  850.         }
  851.  
  852.         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
  853.                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
  854.                                                           has_aux_irq,
  855.                                                           send_bytes,
  856.                                                           aux_clock_divider);
  857.  
  858.         /* Must try at least 3 times according to DP spec */
  859.         for (try = 0; try < 5; try++) {
  860.                 /* Load the send data into the aux channel data registers */
  861.                 for (i = 0; i < send_bytes; i += 4)
  862.                         I915_WRITE(ch_data + i,
  863.                                            intel_dp_pack_aux(send + i,
  864.                                                              send_bytes - i));
  865.  
  866.                 /* Send the command and wait for it to complete */
  867.                         I915_WRITE(ch_ctl, send_ctl);
  868.  
  869.                 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  870.  
  871.                 /* Clear done status and any errors */
  872.                 I915_WRITE(ch_ctl,
  873.                            status |
  874.                            DP_AUX_CH_CTL_DONE |
  875.                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
  876.                            DP_AUX_CH_CTL_RECEIVE_ERROR);
  877.  
  878.                 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
  879.                               DP_AUX_CH_CTL_RECEIVE_ERROR))
  880.                         continue;
  881.                 if (status & DP_AUX_CH_CTL_DONE)
  882.                         break;
  883.         }
  884.                 if (status & DP_AUX_CH_CTL_DONE)
  885.                         break;
  886.         }
  887.  
  888.         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
  889.                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
  890.                 ret = -EBUSY;
  891.                 goto out;
  892.         }
  893.  
  894.         /* Check for timeout or receive error.
  895.          * Timeouts occur when the sink is not connected
  896.          */
  897.         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
  898.                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
  899.                 ret = -EIO;
  900.                 goto out;
  901.         }
  902.  
  903.         /* Timeouts occur when the device isn't connected, so they're
  904.          * "normal" -- don't fill the kernel log with these */
  905.         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
  906.                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
  907.                 ret = -ETIMEDOUT;
  908.                 goto out;
  909.         }
  910.  
  911.         /* Unload any bytes sent back from the other side */
  912.         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
  913.                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
  914.         if (recv_bytes > recv_size)
  915.                 recv_bytes = recv_size;
  916.  
  917.         for (i = 0; i < recv_bytes; i += 4)
  918.                 intel_dp_unpack_aux(I915_READ(ch_data + i),
  919.                            recv + i, recv_bytes - i);
  920.  
  921.         ret = recv_bytes;
  922. out:
  923. //      pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
  924.         intel_aux_display_runtime_put(dev_priv);
  925.  
  926.         if (vdd)
  927.                 edp_panel_vdd_off(intel_dp, false);
  928.  
  929.         pps_unlock(intel_dp);
  930.  
  931.         return ret;
  932. }
  933.  
  934. #define BARE_ADDRESS_SIZE       3
  935. #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
  936. static ssize_t
  937. intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
  938. {
  939.         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
  940.         uint8_t txbuf[20], rxbuf[20];
  941.         size_t txsize, rxsize;
  942.         int ret;
  943.  
  944.         txbuf[0] = msg->request << 4;
  945.         txbuf[1] = msg->address >> 8;
  946.         txbuf[2] = msg->address & 0xff;
  947.         txbuf[3] = msg->size - 1;
  948.  
  949.         switch (msg->request & ~DP_AUX_I2C_MOT) {
  950.         case DP_AUX_NATIVE_WRITE:
  951.         case DP_AUX_I2C_WRITE:
  952.                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
  953.                 rxsize = 1;
  954.  
  955.                 if (WARN_ON(txsize > 20))
  956.                 return -E2BIG;
  957.  
  958.                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
  959.  
  960.                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
  961.                 if (ret > 0) {
  962.                         msg->reply = rxbuf[0] >> 4;
  963.  
  964.                         /* Return payload size. */
  965.                         ret = msg->size;
  966.                 }
  967.                         break;
  968.  
  969.         case DP_AUX_NATIVE_READ:
  970.         case DP_AUX_I2C_READ:
  971.                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
  972.                 rxsize = msg->size + 1;
  973.  
  974.                 if (WARN_ON(rxsize > 20))
  975.                 return -E2BIG;
  976.  
  977.                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
  978.                 if (ret > 0) {
  979.                         msg->reply = rxbuf[0] >> 4;
  980.                         /*
  981.                          * Assume happy day, and copy the data. The caller is
  982.                          * expected to check msg->reply before touching it.
  983.                          *
  984.                          * Return payload size.
  985.                          */
  986.                         ret--;
  987.                         memcpy(msg->buffer, rxbuf + 1, ret);
  988.                 }
  989.                 break;
  990.  
  991.         default:
  992.                 ret = -EINVAL;
  993.                 break;
  994.         }
  995.  
  996.                         return ret;
  997. }
  998.  
  999. static void
  1000. intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
  1001. {
  1002.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1003.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1004.         enum port port = intel_dig_port->port;
  1005.         const char *name = NULL;
  1006.         int ret;
  1007.  
  1008.         switch (port) {
  1009.         case PORT_A:
  1010.                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
  1011.                 name = "DPDDC-A";
  1012.                 break;
  1013.         case PORT_B:
  1014.                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
  1015.                 name = "DPDDC-B";
  1016.                 break;
  1017.         case PORT_C:
  1018.                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
  1019.                 name = "DPDDC-C";
  1020.                 break;
  1021.         case PORT_D:
  1022.                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
  1023.                 name = "DPDDC-D";
  1024.                 break;
  1025.         default:
  1026.                 BUG();
  1027.         }
  1028.  
  1029.         /*
  1030.          * The AUX_CTL register is usually DP_CTL + 0x10.
  1031.          *
  1032.          * On Haswell and Broadwell though:
  1033.          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
  1034.          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
  1035.          *
  1036.          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
  1037.          */
  1038.         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
  1039.                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
  1040.  
  1041.         intel_dp->aux.name = name;
  1042.         intel_dp->aux.dev = dev->dev;
  1043.         intel_dp->aux.transfer = intel_dp_aux_transfer;
  1044.  
  1045.         DRM_DEBUG_KMS("registering %s bus\n", name);
  1046.  
  1047.         ret = drm_dp_aux_register(&intel_dp->aux);
  1048.                 if (ret < 0) {
  1049.                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
  1050.                           name, ret);
  1051.                 return;
  1052.         }
  1053. }
  1054.  
  1055. static void
  1056. intel_dp_connector_unregister(struct intel_connector *intel_connector)
  1057. {
  1058.         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
  1059.  
  1060.         intel_connector_unregister(intel_connector);
  1061. }
  1062.  
  1063. static void
  1064. skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
  1065. {
  1066.         u32 ctrl1;
  1067.  
  1068.         pipe_config->ddi_pll_sel = SKL_DPLL0;
  1069.         pipe_config->dpll_hw_state.cfgcr1 = 0;
  1070.         pipe_config->dpll_hw_state.cfgcr2 = 0;
  1071.  
  1072.         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  1073.         switch (link_bw) {
  1074.         case DP_LINK_BW_1_62:
  1075.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
  1076.                                               SKL_DPLL0);
  1077.                 break;
  1078.         case DP_LINK_BW_2_7:
  1079.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
  1080.                                               SKL_DPLL0);
  1081.                 break;
  1082.         case DP_LINK_BW_5_4:
  1083.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
  1084.                                               SKL_DPLL0);
  1085.                 break;
  1086.         }
  1087.         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
  1088. }
  1089.  
  1090. static void
  1091. hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
  1092. {
  1093.         switch (link_bw) {
  1094.         case DP_LINK_BW_1_62:
  1095.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
  1096.                 break;
  1097.         case DP_LINK_BW_2_7:
  1098.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
  1099.                 break;
  1100.         case DP_LINK_BW_5_4:
  1101.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
  1102.                 break;
  1103.         }
  1104. }
  1105.  
  1106. static void
  1107. intel_dp_set_clock(struct intel_encoder *encoder,
  1108.                    struct intel_crtc_config *pipe_config, int link_bw)
  1109. {
  1110.         struct drm_device *dev = encoder->base.dev;
  1111.         const struct dp_link_dpll *divisor = NULL;
  1112.         int i, count = 0;
  1113.  
  1114.         if (IS_G4X(dev)) {
  1115.                 divisor = gen4_dpll;
  1116.                 count = ARRAY_SIZE(gen4_dpll);
  1117.         } else if (HAS_PCH_SPLIT(dev)) {
  1118.                 divisor = pch_dpll;
  1119.                 count = ARRAY_SIZE(pch_dpll);
  1120.         } else if (IS_CHERRYVIEW(dev)) {
  1121.                 divisor = chv_dpll;
  1122.                 count = ARRAY_SIZE(chv_dpll);
  1123.         } else if (IS_VALLEYVIEW(dev)) {
  1124.                 divisor = vlv_dpll;
  1125.                 count = ARRAY_SIZE(vlv_dpll);
  1126.         }
  1127.  
  1128.         if (divisor && count) {
  1129.                 for (i = 0; i < count; i++) {
  1130.                         if (link_bw == divisor[i].link_bw) {
  1131.                                 pipe_config->dpll = divisor[i].dpll;
  1132.                                 pipe_config->clock_set = true;
  1133.                                 break;
  1134.                         }
  1135.                 }
  1136.         }
  1137. }
  1138.  
  1139. bool
  1140. intel_dp_compute_config(struct intel_encoder *encoder,
  1141.                         struct intel_crtc_config *pipe_config)
  1142. {
  1143.         struct drm_device *dev = encoder->base.dev;
  1144.         struct drm_i915_private *dev_priv = dev->dev_private;
  1145.         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
  1146.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1147.         enum port port = dp_to_dig_port(intel_dp)->port;
  1148.         struct intel_crtc *intel_crtc = encoder->new_crtc;
  1149.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  1150.         int lane_count, clock;
  1151.         int min_lane_count = 1;
  1152.         int max_lane_count = intel_dp_max_lane_count(intel_dp);
  1153.         /* Conveniently, the link BW constants become indices with a shift...*/
  1154.         int min_clock = 0;
  1155.         int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
  1156.         int bpp, mode_rate;
  1157.         static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
  1158.         int link_avail, link_clock;
  1159.  
  1160.         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
  1161.                 pipe_config->has_pch_encoder = true;
  1162.  
  1163.         pipe_config->has_dp_encoder = true;
  1164.         pipe_config->has_drrs = false;
  1165.         pipe_config->has_audio = intel_dp->has_audio;
  1166.  
  1167.         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
  1168.                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
  1169.                                        adjusted_mode);
  1170.                 if (!HAS_PCH_SPLIT(dev))
  1171.                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
  1172.                                                  intel_connector->panel.fitting_mode);
  1173.                 else
  1174.                         intel_pch_panel_fitting(intel_crtc, pipe_config,
  1175.                                                 intel_connector->panel.fitting_mode);
  1176.         }
  1177.  
  1178.         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
  1179.                 return false;
  1180.  
  1181.         DRM_DEBUG_KMS("DP link computation with max lane count %i "
  1182.                       "max bw %02x pixel clock %iKHz\n",
  1183.                       max_lane_count, bws[max_clock],
  1184.                       adjusted_mode->crtc_clock);
  1185.  
  1186.         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
  1187.          * bpc in between. */
  1188.         bpp = pipe_config->pipe_bpp;
  1189.         if (is_edp(intel_dp)) {
  1190.                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
  1191.                 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
  1192.                               dev_priv->vbt.edp_bpp);
  1193.                 bpp = dev_priv->vbt.edp_bpp;
  1194.         }
  1195.  
  1196.                 /*
  1197.                  * Use the maximum clock and number of lanes the eDP panel
  1198.                  * advertizes being capable of. The panels are generally
  1199.                  * designed to support only a single clock and lane
  1200.                  * configuration, and typically these values correspond to the
  1201.                  * native resolution of the panel.
  1202.                  */
  1203.                         min_lane_count = max_lane_count;
  1204.                 min_clock = max_clock;
  1205.         }
  1206.  
  1207.         for (; bpp >= 6*3; bpp -= 2*3) {
  1208.                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
  1209.                                                    bpp);
  1210.  
  1211.                 for (clock = min_clock; clock <= max_clock; clock++) {
  1212.                 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
  1213.                                 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
  1214.                                 link_avail = intel_dp_max_data_rate(link_clock,
  1215.                                                                     lane_count);
  1216.  
  1217.                                 if (mode_rate <= link_avail) {
  1218.                                         goto found;
  1219.                                 }
  1220.                         }
  1221.                 }
  1222.         }
  1223.  
  1224.                 return false;
  1225.  
  1226. found:
  1227.         if (intel_dp->color_range_auto) {
  1228.                 /*
  1229.                  * See:
  1230.                  * CEA-861-E - 5.1 Default Encoding Parameters
  1231.                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
  1232.                  */
  1233.                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
  1234.                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
  1235.                 else
  1236.                         intel_dp->color_range = 0;
  1237.         }
  1238.  
  1239.         if (intel_dp->color_range)
  1240.                 pipe_config->limited_color_range = true;
  1241.  
  1242.                                 intel_dp->link_bw = bws[clock];
  1243.                                 intel_dp->lane_count = lane_count;
  1244.         pipe_config->pipe_bpp = bpp;
  1245.         pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
  1246.  
  1247.         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
  1248.                                        intel_dp->link_bw, intel_dp->lane_count,
  1249.                       pipe_config->port_clock, bpp);
  1250.                                 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
  1251.                                               mode_rate, link_avail);
  1252.  
  1253.         intel_link_compute_m_n(bpp, lane_count,
  1254.                                adjusted_mode->crtc_clock,
  1255.                                pipe_config->port_clock,
  1256.                                &pipe_config->dp_m_n);
  1257.  
  1258.         if (intel_connector->panel.downclock_mode != NULL &&
  1259.                 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
  1260.                         pipe_config->has_drrs = true;
  1261.                         intel_link_compute_m_n(bpp, lane_count,
  1262.                                 intel_connector->panel.downclock_mode->clock,
  1263.                                 pipe_config->port_clock,
  1264.                                 &pipe_config->dp_m2_n2);
  1265.         }
  1266.  
  1267.         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
  1268.                 skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
  1269.         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  1270.                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
  1271.         else
  1272.         intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
  1273.  
  1274.         return true;
  1275. }
  1276.  
  1277. static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
  1278. {
  1279.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  1280.         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  1281.         struct drm_device *dev = crtc->base.dev;
  1282.         struct drm_i915_private *dev_priv = dev->dev_private;
  1283.         u32 dpa_ctl;
  1284.  
  1285.         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
  1286.         dpa_ctl = I915_READ(DP_A);
  1287.         dpa_ctl &= ~DP_PLL_FREQ_MASK;
  1288.  
  1289.         if (crtc->config.port_clock == 162000) {
  1290.                 /* For a long time we've carried around a ILK-DevA w/a for the
  1291.                  * 160MHz clock. If we're really unlucky, it's still required.
  1292.                  */
  1293.                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
  1294.                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
  1295.                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
  1296.         } else {
  1297.                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
  1298.                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
  1299.         }
  1300.  
  1301.         I915_WRITE(DP_A, dpa_ctl);
  1302.  
  1303.         POSTING_READ(DP_A);
  1304.         udelay(500);
  1305. }
  1306.  
  1307. static void intel_dp_prepare(struct intel_encoder *encoder)
  1308. {
  1309.         struct drm_device *dev = encoder->base.dev;
  1310.         struct drm_i915_private *dev_priv = dev->dev_private;
  1311.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1312.         enum port port = dp_to_dig_port(intel_dp)->port;
  1313.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  1314.         struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
  1315.  
  1316.         /*
  1317.          * There are four kinds of DP registers:
  1318.          *
  1319.          *      IBX PCH
  1320.          *      SNB CPU
  1321.          *      IVB CPU
  1322.          *      CPT PCH
  1323.          *
  1324.          * IBX PCH and CPU are the same for almost everything,
  1325.          * except that the CPU DP PLL is configured in this
  1326.          * register
  1327.          *
  1328.          * CPT PCH is quite different, having many bits moved
  1329.          * to the TRANS_DP_CTL register instead. That
  1330.          * configuration happens (oddly) in ironlake_pch_enable
  1331.          */
  1332.  
  1333.         /* Preserve the BIOS-computed detected bit. This is
  1334.          * supposed to be read-only.
  1335.          */
  1336.         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  1337.  
  1338.         /* Handle DP bits in common between all three register formats */
  1339.         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  1340.         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
  1341.  
  1342.         if (crtc->config.has_audio)
  1343.                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
  1344.  
  1345.         /* Split out the IBX/CPU vs CPT settings */
  1346.  
  1347.         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1348.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  1349.                         intel_dp->DP |= DP_SYNC_HS_HIGH;
  1350.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  1351.                         intel_dp->DP |= DP_SYNC_VS_HIGH;
  1352.                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  1353.  
  1354.                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  1355.                         intel_dp->DP |= DP_ENHANCED_FRAMING;
  1356.  
  1357.                 intel_dp->DP |= crtc->pipe << 29;
  1358.         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
  1359.                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
  1360.                 intel_dp->DP |= intel_dp->color_range;
  1361.  
  1362.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  1363.                         intel_dp->DP |= DP_SYNC_HS_HIGH;
  1364.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  1365.                         intel_dp->DP |= DP_SYNC_VS_HIGH;
  1366.                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
  1367.  
  1368.                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  1369.                 intel_dp->DP |= DP_ENHANCED_FRAMING;
  1370.  
  1371.                 if (!IS_CHERRYVIEW(dev)) {
  1372.                 if (crtc->pipe == 1)
  1373.                 intel_dp->DP |= DP_PIPEB_SELECT;
  1374.         } else {
  1375.                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
  1376.                 }
  1377.         } else {
  1378.                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  1379.         }
  1380. }
  1381.  
  1382. #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
  1383. #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
  1384.  
  1385. #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
  1386. #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
  1387.  
  1388. #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
  1389. #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
  1390.  
  1391. static void wait_panel_status(struct intel_dp *intel_dp,
  1392.                                        u32 mask,
  1393.                                        u32 value)
  1394. {
  1395.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1396.         struct drm_i915_private *dev_priv = dev->dev_private;
  1397.         u32 pp_stat_reg, pp_ctrl_reg;
  1398.  
  1399.         lockdep_assert_held(&dev_priv->pps_mutex);
  1400.  
  1401.         pp_stat_reg = _pp_stat_reg(intel_dp);
  1402.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1403.  
  1404.         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
  1405.                       mask, value,
  1406.                         I915_READ(pp_stat_reg),
  1407.                         I915_READ(pp_ctrl_reg));
  1408.  
  1409.         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
  1410.                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
  1411.                                 I915_READ(pp_stat_reg),
  1412.                                 I915_READ(pp_ctrl_reg));
  1413.         }
  1414.  
  1415.         DRM_DEBUG_KMS("Wait complete\n");
  1416. }
  1417.  
  1418. static void wait_panel_on(struct intel_dp *intel_dp)
  1419. {
  1420.         DRM_DEBUG_KMS("Wait for panel power on\n");
  1421.         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
  1422. }
  1423.  
  1424. static void wait_panel_off(struct intel_dp *intel_dp)
  1425. {
  1426.         DRM_DEBUG_KMS("Wait for panel power off time\n");
  1427.         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
  1428. }
  1429.  
  1430. static void wait_panel_power_cycle(struct intel_dp *intel_dp)
  1431. {
  1432.         DRM_DEBUG_KMS("Wait for panel power cycle\n");
  1433.  
  1434.         /* When we disable the VDD override bit last we have to do the manual
  1435.          * wait. */
  1436.         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
  1437.                                        intel_dp->panel_power_cycle_delay);
  1438.  
  1439.         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
  1440. }
  1441.  
  1442. static void wait_backlight_on(struct intel_dp *intel_dp)
  1443. {
  1444.         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
  1445.                                        intel_dp->backlight_on_delay);
  1446. }
  1447.  
  1448. static void edp_wait_backlight_off(struct intel_dp *intel_dp)
  1449. {
  1450.         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
  1451.                                        intel_dp->backlight_off_delay);
  1452. }
  1453.  
  1454. /* Read the current pp_control value, unlocking the register if it
  1455.  * is locked
  1456.  */
  1457.  
  1458. static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
  1459. {
  1460.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1461.         struct drm_i915_private *dev_priv = dev->dev_private;
  1462.         u32 control;
  1463.  
  1464.         lockdep_assert_held(&dev_priv->pps_mutex);
  1465.  
  1466.         control = I915_READ(_pp_ctrl_reg(intel_dp));
  1467.         control &= ~PANEL_UNLOCK_MASK;
  1468.         control |= PANEL_UNLOCK_REGS;
  1469.         return control;
  1470. }
  1471.  
  1472. /*
  1473.  * Must be paired with edp_panel_vdd_off().
  1474.  * Must hold pps_mutex around the whole on/off sequence.
  1475.  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
  1476.  */
  1477. static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  1478. {
  1479.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1480.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1481.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1482.         struct drm_i915_private *dev_priv = dev->dev_private;
  1483.         enum intel_display_power_domain power_domain;
  1484.         u32 pp;
  1485.         u32 pp_stat_reg, pp_ctrl_reg;
  1486.         bool need_to_disable = !intel_dp->want_panel_vdd;
  1487.  
  1488.         lockdep_assert_held(&dev_priv->pps_mutex);
  1489.  
  1490.         if (!is_edp(intel_dp))
  1491.                 return false;
  1492.  
  1493.         intel_dp->want_panel_vdd = true;
  1494.  
  1495.         if (edp_have_panel_vdd(intel_dp))
  1496.                 return need_to_disable;
  1497.  
  1498.         power_domain = intel_display_port_power_domain(intel_encoder);
  1499.         intel_display_power_get(dev_priv, power_domain);
  1500.  
  1501.         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
  1502.                       port_name(intel_dig_port->port));
  1503.  
  1504.         if (!edp_have_panel_power(intel_dp))
  1505.                 wait_panel_power_cycle(intel_dp);
  1506.  
  1507.         pp = ironlake_get_pp_control(intel_dp);
  1508.         pp |= EDP_FORCE_VDD;
  1509.  
  1510.         pp_stat_reg = _pp_stat_reg(intel_dp);
  1511.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1512.  
  1513.         I915_WRITE(pp_ctrl_reg, pp);
  1514.         POSTING_READ(pp_ctrl_reg);
  1515.         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1516.                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1517.         /*
  1518.          * If the panel wasn't on, delay before accessing aux channel
  1519.          */
  1520.         if (!edp_have_panel_power(intel_dp)) {
  1521.                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
  1522.                               port_name(intel_dig_port->port));
  1523.                 msleep(intel_dp->panel_power_up_delay);
  1524.         }
  1525.  
  1526.         return need_to_disable;
  1527. }
  1528.  
  1529. /*
  1530.  * Must be paired with intel_edp_panel_vdd_off() or
  1531.  * intel_edp_panel_off().
  1532.  * Nested calls to these functions are not allowed since
  1533.  * we drop the lock. Caller must use some higher level
  1534.  * locking to prevent nested calls from other threads.
  1535.  */
  1536. void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
  1537. {
  1538.         bool vdd;
  1539.  
  1540.         if (!is_edp(intel_dp))
  1541.                 return;
  1542.  
  1543.         pps_lock(intel_dp);
  1544.         vdd = edp_panel_vdd_on(intel_dp);
  1545.         pps_unlock(intel_dp);
  1546.  
  1547.         WARN(!vdd, "eDP port %c VDD already requested on\n",
  1548.              port_name(dp_to_dig_port(intel_dp)->port));
  1549. }
  1550.  
  1551. static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  1552. {
  1553.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1554.         struct drm_i915_private *dev_priv = dev->dev_private;
  1555.         struct intel_digital_port *intel_dig_port =
  1556.                 dp_to_dig_port(intel_dp);
  1557.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1558.         enum intel_display_power_domain power_domain;
  1559.         u32 pp;
  1560.         u32 pp_stat_reg, pp_ctrl_reg;
  1561.  
  1562.         lockdep_assert_held(&dev_priv->pps_mutex);
  1563.  
  1564.         WARN_ON(intel_dp->want_panel_vdd);
  1565.  
  1566.         if (!edp_have_panel_vdd(intel_dp))
  1567.                 return;
  1568.  
  1569.         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
  1570.                       port_name(intel_dig_port->port));
  1571.  
  1572.                 pp = ironlake_get_pp_control(intel_dp);
  1573.         pp &= ~EDP_FORCE_VDD;
  1574.  
  1575.                 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1576.                 pp_stat_reg = _pp_stat_reg(intel_dp);
  1577.  
  1578.                 I915_WRITE(pp_ctrl_reg, pp);
  1579.                 POSTING_READ(pp_ctrl_reg);
  1580.  
  1581.         /* Make sure sequencer is idle before allowing subsequent activity */
  1582.                 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1583.                 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1584.  
  1585.                 if ((pp & POWER_TARGET_ON) == 0)
  1586.                         intel_dp->last_power_cycle = jiffies;
  1587.  
  1588.                 power_domain = intel_display_port_power_domain(intel_encoder);
  1589.                 intel_display_power_put(dev_priv, power_domain);
  1590. }
  1591.  
  1592. static void edp_panel_vdd_work(struct work_struct *__work)
  1593. {
  1594.         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
  1595.                                                  struct intel_dp, panel_vdd_work);
  1596.  
  1597.         pps_lock(intel_dp);
  1598.         if (!intel_dp->want_panel_vdd)
  1599.         edp_panel_vdd_off_sync(intel_dp);
  1600.         pps_unlock(intel_dp);
  1601. }
  1602.  
  1603. static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
  1604. {
  1605.         unsigned long delay;
  1606.  
  1607.         /*
  1608.          * Queue the timer to fire a long time from now (relative to the power
  1609.          * down delay) to keep the panel power up across a sequence of
  1610.          * operations.
  1611.          */
  1612.         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
  1613. //   schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
  1614. }
  1615.  
  1616. /*
  1617.  * Must be paired with edp_panel_vdd_on().
  1618.  * Must hold pps_mutex around the whole on/off sequence.
  1619.  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
  1620.  */
  1621. static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  1622. {
  1623.         struct drm_i915_private *dev_priv =
  1624.                 intel_dp_to_dev(intel_dp)->dev_private;
  1625.  
  1626.         lockdep_assert_held(&dev_priv->pps_mutex);
  1627.  
  1628.         if (!is_edp(intel_dp))
  1629.                 return;
  1630.  
  1631.         WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
  1632.              port_name(dp_to_dig_port(intel_dp)->port));
  1633.  
  1634.         intel_dp->want_panel_vdd = false;
  1635.  
  1636.         if (sync)
  1637.                 edp_panel_vdd_off_sync(intel_dp);
  1638.         else
  1639.                 edp_panel_vdd_schedule_off(intel_dp);
  1640. }
  1641.  
  1642. static void edp_panel_on(struct intel_dp *intel_dp)
  1643. {
  1644.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1645.         struct drm_i915_private *dev_priv = dev->dev_private;
  1646.         u32 pp;
  1647.         u32 pp_ctrl_reg;
  1648.  
  1649.         lockdep_assert_held(&dev_priv->pps_mutex);
  1650.  
  1651.         if (!is_edp(intel_dp))
  1652.                 return;
  1653.  
  1654.         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
  1655.                       port_name(dp_to_dig_port(intel_dp)->port));
  1656.  
  1657.         if (WARN(edp_have_panel_power(intel_dp),
  1658.                  "eDP port %c panel power already on\n",
  1659.                  port_name(dp_to_dig_port(intel_dp)->port)))
  1660.                 return;
  1661.  
  1662.         wait_panel_power_cycle(intel_dp);
  1663.  
  1664.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1665.         pp = ironlake_get_pp_control(intel_dp);
  1666.         if (IS_GEN5(dev)) {
  1667.         /* ILK workaround: disable reset around power sequence */
  1668.         pp &= ~PANEL_POWER_RESET;
  1669.                 I915_WRITE(pp_ctrl_reg, pp);
  1670.                 POSTING_READ(pp_ctrl_reg);
  1671.         }
  1672.  
  1673.         pp |= POWER_TARGET_ON;
  1674.         if (!IS_GEN5(dev))
  1675.                 pp |= PANEL_POWER_RESET;
  1676.  
  1677.         I915_WRITE(pp_ctrl_reg, pp);
  1678.         POSTING_READ(pp_ctrl_reg);
  1679.  
  1680.         wait_panel_on(intel_dp);
  1681.         intel_dp->last_power_on = jiffies;
  1682.  
  1683.         if (IS_GEN5(dev)) {
  1684.         pp |= PANEL_POWER_RESET; /* restore panel reset bit */
  1685.                 I915_WRITE(pp_ctrl_reg, pp);
  1686.                 POSTING_READ(pp_ctrl_reg);
  1687.         }
  1688. }
  1689.  
  1690. void intel_edp_panel_on(struct intel_dp *intel_dp)
  1691. {
  1692.         if (!is_edp(intel_dp))
  1693.                 return;
  1694.  
  1695.         pps_lock(intel_dp);
  1696.         edp_panel_on(intel_dp);
  1697.         pps_unlock(intel_dp);
  1698. }
  1699.  
  1700.  
  1701. static void edp_panel_off(struct intel_dp *intel_dp)
  1702. {
  1703.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1704.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1705.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1706.         struct drm_i915_private *dev_priv = dev->dev_private;
  1707.         enum intel_display_power_domain power_domain;
  1708.         u32 pp;
  1709.         u32 pp_ctrl_reg;
  1710.  
  1711.         lockdep_assert_held(&dev_priv->pps_mutex);
  1712.  
  1713.         if (!is_edp(intel_dp))
  1714.                 return;
  1715.  
  1716.         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
  1717.                       port_name(dp_to_dig_port(intel_dp)->port));
  1718.  
  1719.         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
  1720.              port_name(dp_to_dig_port(intel_dp)->port));
  1721.  
  1722.         pp = ironlake_get_pp_control(intel_dp);
  1723.         /* We need to switch off panel power _and_ force vdd, for otherwise some
  1724.          * panels get very unhappy and cease to work. */
  1725.         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
  1726.                 EDP_BLC_ENABLE);
  1727.  
  1728.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1729.  
  1730.         intel_dp->want_panel_vdd = false;
  1731.  
  1732.         I915_WRITE(pp_ctrl_reg, pp);
  1733.         POSTING_READ(pp_ctrl_reg);
  1734.  
  1735.         intel_dp->last_power_cycle = jiffies;
  1736.         wait_panel_off(intel_dp);
  1737.  
  1738.         /* We got a reference when we enabled the VDD. */
  1739.         power_domain = intel_display_port_power_domain(intel_encoder);
  1740.         intel_display_power_put(dev_priv, power_domain);
  1741. }
  1742.  
  1743. void intel_edp_panel_off(struct intel_dp *intel_dp)
  1744. {
  1745.         if (!is_edp(intel_dp))
  1746.                 return;
  1747.  
  1748.         pps_lock(intel_dp);
  1749.         edp_panel_off(intel_dp);
  1750.         pps_unlock(intel_dp);
  1751. }
  1752.  
  1753. /* Enable backlight in the panel power control. */
  1754. static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
  1755. {
  1756.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1757.         struct drm_device *dev = intel_dig_port->base.base.dev;
  1758.         struct drm_i915_private *dev_priv = dev->dev_private;
  1759.         u32 pp;
  1760.         u32 pp_ctrl_reg;
  1761.  
  1762.         /*
  1763.          * If we enable the backlight right away following a panel power
  1764.          * on, we may see slight flicker as the panel syncs with the eDP
  1765.          * link.  So delay a bit to make sure the image is solid before
  1766.          * allowing it to appear.
  1767.          */
  1768.         wait_backlight_on(intel_dp);
  1769.  
  1770.         pps_lock(intel_dp);
  1771.  
  1772.         pp = ironlake_get_pp_control(intel_dp);
  1773.         pp |= EDP_BLC_ENABLE;
  1774.  
  1775.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1776.  
  1777.         I915_WRITE(pp_ctrl_reg, pp);
  1778.         POSTING_READ(pp_ctrl_reg);
  1779.  
  1780.         pps_unlock(intel_dp);
  1781. }
  1782.  
  1783. /* Enable backlight PWM and backlight PP control. */
  1784. void intel_edp_backlight_on(struct intel_dp *intel_dp)
  1785. {
  1786.         if (!is_edp(intel_dp))
  1787.                 return;
  1788.  
  1789.         DRM_DEBUG_KMS("\n");
  1790.  
  1791.         intel_panel_enable_backlight(intel_dp->attached_connector);
  1792.         _intel_edp_backlight_on(intel_dp);
  1793. }
  1794.  
  1795. /* Disable backlight in the panel power control. */
  1796. static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
  1797. {
  1798.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1799.         struct drm_i915_private *dev_priv = dev->dev_private;
  1800.         u32 pp;
  1801.         u32 pp_ctrl_reg;
  1802.  
  1803.         if (!is_edp(intel_dp))
  1804.                 return;
  1805.  
  1806.         pps_lock(intel_dp);
  1807.  
  1808.         pp = ironlake_get_pp_control(intel_dp);
  1809.         pp &= ~EDP_BLC_ENABLE;
  1810.  
  1811.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1812.  
  1813.         I915_WRITE(pp_ctrl_reg, pp);
  1814.         POSTING_READ(pp_ctrl_reg);
  1815.  
  1816.         pps_unlock(intel_dp);
  1817.  
  1818.         intel_dp->last_backlight_off = jiffies;
  1819.         edp_wait_backlight_off(intel_dp);
  1820. }
  1821.  
  1822. /* Disable backlight PP control and backlight PWM. */
  1823. void intel_edp_backlight_off(struct intel_dp *intel_dp)
  1824. {
  1825.         if (!is_edp(intel_dp))
  1826.                 return;
  1827.  
  1828.         DRM_DEBUG_KMS("\n");
  1829.  
  1830.         _intel_edp_backlight_off(intel_dp);
  1831.         intel_panel_disable_backlight(intel_dp->attached_connector);
  1832. }
  1833.  
  1834. /*
  1835.  * Hook for controlling the panel power control backlight through the bl_power
  1836.  * sysfs attribute. Take care to handle multiple calls.
  1837.  */
  1838. static void intel_edp_backlight_power(struct intel_connector *connector,
  1839.                                       bool enable)
  1840. {
  1841.         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
  1842.         bool is_enabled;
  1843.  
  1844.         pps_lock(intel_dp);
  1845.         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
  1846.         pps_unlock(intel_dp);
  1847.  
  1848.         if (is_enabled == enable)
  1849.                 return;
  1850.  
  1851.         DRM_DEBUG_KMS("panel power control backlight %s\n",
  1852.                       enable ? "enable" : "disable");
  1853.  
  1854.         if (enable)
  1855.                 _intel_edp_backlight_on(intel_dp);
  1856.         else
  1857.                 _intel_edp_backlight_off(intel_dp);
  1858. }
  1859.  
  1860. static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
  1861. {
  1862.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1863.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1864.         struct drm_device *dev = crtc->dev;
  1865.         struct drm_i915_private *dev_priv = dev->dev_private;
  1866.         u32 dpa_ctl;
  1867.  
  1868.         assert_pipe_disabled(dev_priv,
  1869.                              to_intel_crtc(crtc)->pipe);
  1870.  
  1871.         DRM_DEBUG_KMS("\n");
  1872.         dpa_ctl = I915_READ(DP_A);
  1873.         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
  1874.         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1875.  
  1876.         /* We don't adjust intel_dp->DP while tearing down the link, to
  1877.          * facilitate link retraining (e.g. after hotplug). Hence clear all
  1878.          * enable bits here to ensure that we don't enable too much. */
  1879.         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
  1880.         intel_dp->DP |= DP_PLL_ENABLE;
  1881.         I915_WRITE(DP_A, intel_dp->DP);
  1882.         POSTING_READ(DP_A);
  1883.         udelay(200);
  1884. }
  1885.  
  1886. static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
  1887. {
  1888.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1889.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1890.         struct drm_device *dev = crtc->dev;
  1891.         struct drm_i915_private *dev_priv = dev->dev_private;
  1892.         u32 dpa_ctl;
  1893.  
  1894.         assert_pipe_disabled(dev_priv,
  1895.                              to_intel_crtc(crtc)->pipe);
  1896.  
  1897.         dpa_ctl = I915_READ(DP_A);
  1898.         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
  1899.              "dp pll off, should be on\n");
  1900.         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1901.  
  1902.         /* We can't rely on the value tracked for the DP register in
  1903.          * intel_dp->DP because link_down must not change that (otherwise link
  1904.          * re-training will fail. */
  1905.         dpa_ctl &= ~DP_PLL_ENABLE;
  1906.         I915_WRITE(DP_A, dpa_ctl);
  1907.         POSTING_READ(DP_A);
  1908.         udelay(200);
  1909. }
  1910.  
  1911. /* If the sink supports it, try to set the power state appropriately */
  1912. void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
  1913. {
  1914.         int ret, i;
  1915.  
  1916.         /* Should have a valid DPCD by this point */
  1917.         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
  1918.                 return;
  1919.  
  1920.         if (mode != DRM_MODE_DPMS_ON) {
  1921.                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  1922.                                                   DP_SET_POWER_D3);
  1923.         } else {
  1924.                 /*
  1925.                  * When turning on, we need to retry for 1ms to give the sink
  1926.                  * time to wake up.
  1927.                  */
  1928.                 for (i = 0; i < 3; i++) {
  1929.                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  1930.                                                           DP_SET_POWER_D0);
  1931.                         if (ret == 1)
  1932.                                 break;
  1933.                         msleep(1);
  1934.                 }
  1935.         }
  1936.  
  1937.         if (ret != 1)
  1938.                 DRM_DEBUG_KMS("failed to %s sink power state\n",
  1939.                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
  1940. }
  1941.  
  1942. static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
  1943.                                   enum pipe *pipe)
  1944. {
  1945.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1946.         enum port port = dp_to_dig_port(intel_dp)->port;
  1947.         struct drm_device *dev = encoder->base.dev;
  1948.         struct drm_i915_private *dev_priv = dev->dev_private;
  1949.         enum intel_display_power_domain power_domain;
  1950.         u32 tmp;
  1951.  
  1952.         power_domain = intel_display_port_power_domain(encoder);
  1953.         if (!intel_display_power_is_enabled(dev_priv, power_domain))
  1954.                 return false;
  1955.  
  1956.         tmp = I915_READ(intel_dp->output_reg);
  1957.  
  1958.         if (!(tmp & DP_PORT_EN))
  1959.                 return false;
  1960.  
  1961.         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1962.                 *pipe = PORT_TO_PIPE_CPT(tmp);
  1963.         } else if (IS_CHERRYVIEW(dev)) {
  1964.                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
  1965.         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
  1966.                 *pipe = PORT_TO_PIPE(tmp);
  1967.         } else {
  1968.                 u32 trans_sel;
  1969.                 u32 trans_dp;
  1970.                 int i;
  1971.  
  1972.                 switch (intel_dp->output_reg) {
  1973.                 case PCH_DP_B:
  1974.                         trans_sel = TRANS_DP_PORT_SEL_B;
  1975.                         break;
  1976.                 case PCH_DP_C:
  1977.                         trans_sel = TRANS_DP_PORT_SEL_C;
  1978.                         break;
  1979.                 case PCH_DP_D:
  1980.                         trans_sel = TRANS_DP_PORT_SEL_D;
  1981.                         break;
  1982.                 default:
  1983.                         return true;
  1984.                 }
  1985.  
  1986.                 for_each_pipe(dev_priv, i) {
  1987.                         trans_dp = I915_READ(TRANS_DP_CTL(i));
  1988.                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
  1989.                                 *pipe = i;
  1990.                                 return true;
  1991.                         }
  1992.                 }
  1993.  
  1994.                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
  1995.                               intel_dp->output_reg);
  1996.         }
  1997.  
  1998.         return true;
  1999. }
  2000.  
  2001. static void intel_dp_get_config(struct intel_encoder *encoder,
  2002.                                 struct intel_crtc_config *pipe_config)
  2003. {
  2004.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2005.         u32 tmp, flags = 0;
  2006.         struct drm_device *dev = encoder->base.dev;
  2007.         struct drm_i915_private *dev_priv = dev->dev_private;
  2008.         enum port port = dp_to_dig_port(intel_dp)->port;
  2009.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2010.         int dotclock;
  2011.  
  2012.         tmp = I915_READ(intel_dp->output_reg);
  2013.         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
  2014.                 pipe_config->has_audio = true;
  2015.  
  2016.         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
  2017.                 if (tmp & DP_SYNC_HS_HIGH)
  2018.                         flags |= DRM_MODE_FLAG_PHSYNC;
  2019.                 else
  2020.                         flags |= DRM_MODE_FLAG_NHSYNC;
  2021.  
  2022.                 if (tmp & DP_SYNC_VS_HIGH)
  2023.                         flags |= DRM_MODE_FLAG_PVSYNC;
  2024.                 else
  2025.                         flags |= DRM_MODE_FLAG_NVSYNC;
  2026.         } else {
  2027.                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
  2028.                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
  2029.                         flags |= DRM_MODE_FLAG_PHSYNC;
  2030.                 else
  2031.                         flags |= DRM_MODE_FLAG_NHSYNC;
  2032.  
  2033.                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
  2034.                         flags |= DRM_MODE_FLAG_PVSYNC;
  2035.                 else
  2036.                         flags |= DRM_MODE_FLAG_NVSYNC;
  2037.         }
  2038.  
  2039.         pipe_config->adjusted_mode.flags |= flags;
  2040.  
  2041.         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
  2042.             tmp & DP_COLOR_RANGE_16_235)
  2043.                 pipe_config->limited_color_range = true;
  2044.  
  2045.         pipe_config->has_dp_encoder = true;
  2046.  
  2047.         intel_dp_get_m_n(crtc, pipe_config);
  2048.  
  2049.         if (port == PORT_A) {
  2050.                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
  2051.                         pipe_config->port_clock = 162000;
  2052.                 else
  2053.                         pipe_config->port_clock = 270000;
  2054.         }
  2055.  
  2056.         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
  2057.                                             &pipe_config->dp_m_n);
  2058.  
  2059.         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
  2060.                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
  2061.  
  2062.         pipe_config->adjusted_mode.crtc_clock = dotclock;
  2063.  
  2064.         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
  2065.             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
  2066.                 /*
  2067.                  * This is a big fat ugly hack.
  2068.                  *
  2069.                  * Some machines in UEFI boot mode provide us a VBT that has 18
  2070.                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
  2071.                  * unknown we fail to light up. Yet the same BIOS boots up with
  2072.                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
  2073.                  * max, not what it tells us to use.
  2074.                  *
  2075.                  * Note: This will still be broken if the eDP panel is not lit
  2076.                  * up by the BIOS, and thus we can't get the mode at module
  2077.                  * load.
  2078.                  */
  2079.                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
  2080.                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
  2081.                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
  2082.         }
  2083. }
  2084.  
  2085. static void intel_disable_dp(struct intel_encoder *encoder)
  2086. {
  2087.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2088.         struct drm_device *dev = encoder->base.dev;
  2089.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2090.  
  2091.         if (crtc->config.has_audio)
  2092.                 intel_audio_codec_disable(encoder);
  2093.  
  2094.         /* Make sure the panel is off before trying to change the mode. But also
  2095.          * ensure that we have vdd while we switch off the panel. */
  2096.         intel_edp_panel_vdd_on(intel_dp);
  2097.         intel_edp_backlight_off(intel_dp);
  2098.         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
  2099.         intel_edp_panel_off(intel_dp);
  2100.  
  2101.         /* disable the port before the pipe on g4x */
  2102.         if (INTEL_INFO(dev)->gen < 5)
  2103.                 intel_dp_link_down(intel_dp);
  2104. }
  2105.  
  2106. static void ilk_post_disable_dp(struct intel_encoder *encoder)
  2107. {
  2108.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2109.         enum port port = dp_to_dig_port(intel_dp)->port;
  2110.  
  2111.         intel_dp_link_down(intel_dp);
  2112.         if (port == PORT_A)
  2113.         ironlake_edp_pll_off(intel_dp);
  2114. }
  2115.  
  2116. static void vlv_post_disable_dp(struct intel_encoder *encoder)
  2117. {
  2118.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2119.  
  2120.         intel_dp_link_down(intel_dp);
  2121. }
  2122.  
  2123. static void chv_post_disable_dp(struct intel_encoder *encoder)
  2124. {
  2125.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2126.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2127.         struct drm_device *dev = encoder->base.dev;
  2128.         struct drm_i915_private *dev_priv = dev->dev_private;
  2129.         struct intel_crtc *intel_crtc =
  2130.                 to_intel_crtc(encoder->base.crtc);
  2131.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2132.         enum pipe pipe = intel_crtc->pipe;
  2133.         u32 val;
  2134.  
  2135.                 intel_dp_link_down(intel_dp);
  2136.  
  2137.         mutex_lock(&dev_priv->dpio_lock);
  2138.  
  2139.         /* Propagate soft reset to data lane reset */
  2140.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  2141.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2142.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  2143.  
  2144.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  2145.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2146.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  2147.  
  2148.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  2149.         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2150.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  2151.  
  2152.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  2153.         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2154.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  2155.  
  2156.         mutex_unlock(&dev_priv->dpio_lock);
  2157. }
  2158.  
  2159. static void
  2160. _intel_dp_set_link_train(struct intel_dp *intel_dp,
  2161.                          uint32_t *DP,
  2162.                          uint8_t dp_train_pat)
  2163. {
  2164.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2165.         struct drm_device *dev = intel_dig_port->base.base.dev;
  2166.         struct drm_i915_private *dev_priv = dev->dev_private;
  2167.         enum port port = intel_dig_port->port;
  2168.  
  2169.         if (HAS_DDI(dev)) {
  2170.                 uint32_t temp = I915_READ(DP_TP_CTL(port));
  2171.  
  2172.                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
  2173.                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
  2174.                 else
  2175.                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
  2176.  
  2177.                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  2178.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2179.                 case DP_TRAINING_PATTERN_DISABLE:
  2180.                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
  2181.  
  2182.                         break;
  2183.                 case DP_TRAINING_PATTERN_1:
  2184.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
  2185.                         break;
  2186.                 case DP_TRAINING_PATTERN_2:
  2187.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
  2188.                         break;
  2189.                 case DP_TRAINING_PATTERN_3:
  2190.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
  2191.                         break;
  2192.                 }
  2193.                 I915_WRITE(DP_TP_CTL(port), temp);
  2194.  
  2195.         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
  2196.                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
  2197.  
  2198.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2199.                 case DP_TRAINING_PATTERN_DISABLE:
  2200.                         *DP |= DP_LINK_TRAIN_OFF_CPT;
  2201.                         break;
  2202.                 case DP_TRAINING_PATTERN_1:
  2203.                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
  2204.                         break;
  2205.                 case DP_TRAINING_PATTERN_2:
  2206.                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
  2207.                         break;
  2208.                 case DP_TRAINING_PATTERN_3:
  2209.                         DRM_ERROR("DP training pattern 3 not supported\n");
  2210.                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
  2211.                         break;
  2212.                 }
  2213.  
  2214.         } else {
  2215.                 if (IS_CHERRYVIEW(dev))
  2216.                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
  2217.                 else
  2218.                         *DP &= ~DP_LINK_TRAIN_MASK;
  2219.  
  2220.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2221.                 case DP_TRAINING_PATTERN_DISABLE:
  2222.                         *DP |= DP_LINK_TRAIN_OFF;
  2223.                         break;
  2224.                 case DP_TRAINING_PATTERN_1:
  2225.                         *DP |= DP_LINK_TRAIN_PAT_1;
  2226.                         break;
  2227.                 case DP_TRAINING_PATTERN_2:
  2228.                         *DP |= DP_LINK_TRAIN_PAT_2;
  2229.                         break;
  2230.                 case DP_TRAINING_PATTERN_3:
  2231.                         if (IS_CHERRYVIEW(dev)) {
  2232.                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
  2233.                         } else {
  2234.                                 DRM_ERROR("DP training pattern 3 not supported\n");
  2235.                                 *DP |= DP_LINK_TRAIN_PAT_2;
  2236.                         }
  2237.                         break;
  2238.                 }
  2239.         }
  2240. }
  2241.  
  2242. static void intel_dp_enable_port(struct intel_dp *intel_dp)
  2243. {
  2244.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2245.         struct drm_i915_private *dev_priv = dev->dev_private;
  2246.  
  2247.         /* enable with pattern 1 (as per spec) */
  2248.         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
  2249.                                  DP_TRAINING_PATTERN_1);
  2250.  
  2251.         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
  2252.         POSTING_READ(intel_dp->output_reg);
  2253.  
  2254.         /*
  2255.          * Magic for VLV/CHV. We _must_ first set up the register
  2256.          * without actually enabling the port, and then do another
  2257.          * write to enable the port. Otherwise link training will
  2258.          * fail when the power sequencer is freshly used for this port.
  2259.          */
  2260.         intel_dp->DP |= DP_PORT_EN;
  2261.  
  2262.         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
  2263.         POSTING_READ(intel_dp->output_reg);
  2264. }
  2265.  
  2266. static void intel_enable_dp(struct intel_encoder *encoder)
  2267. {
  2268.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2269.         struct drm_device *dev = encoder->base.dev;
  2270.         struct drm_i915_private *dev_priv = dev->dev_private;
  2271.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2272.         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
  2273.  
  2274.         if (WARN_ON(dp_reg & DP_PORT_EN))
  2275.                 return;
  2276.  
  2277.         pps_lock(intel_dp);
  2278.  
  2279.         if (IS_VALLEYVIEW(dev))
  2280.                 vlv_init_panel_power_sequencer(intel_dp);
  2281.  
  2282.         intel_dp_enable_port(intel_dp);
  2283.  
  2284.         edp_panel_vdd_on(intel_dp);
  2285.         edp_panel_on(intel_dp);
  2286.         edp_panel_vdd_off(intel_dp, true);
  2287.  
  2288.         pps_unlock(intel_dp);
  2289.  
  2290.         if (IS_VALLEYVIEW(dev))
  2291.                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
  2292.  
  2293.         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
  2294.                         intel_dp_start_link_train(intel_dp);
  2295.                         intel_dp_complete_link_train(intel_dp);
  2296.         intel_dp_stop_link_train(intel_dp);
  2297.  
  2298.         if (crtc->config.has_audio) {
  2299.                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
  2300.                                  pipe_name(crtc->pipe));
  2301.                 intel_audio_codec_enable(encoder);
  2302.         }
  2303. }
  2304.  
  2305. static void g4x_enable_dp(struct intel_encoder *encoder)
  2306. {
  2307.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2308.  
  2309.         intel_enable_dp(encoder);
  2310.         intel_edp_backlight_on(intel_dp);
  2311. }
  2312.  
  2313. static void vlv_enable_dp(struct intel_encoder *encoder)
  2314. {
  2315.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2316.  
  2317.         intel_edp_backlight_on(intel_dp);
  2318. }
  2319.  
  2320. static void g4x_pre_enable_dp(struct intel_encoder *encoder)
  2321. {
  2322.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2323.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2324.  
  2325.         intel_dp_prepare(encoder);
  2326.  
  2327.         /* Only ilk+ has port A */
  2328.         if (dport->port == PORT_A) {
  2329.                 ironlake_set_pll_cpu_edp(intel_dp);
  2330.                 ironlake_edp_pll_on(intel_dp);
  2331.         }
  2332. }
  2333.  
  2334. static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
  2335. {
  2336.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2337.         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
  2338.         enum pipe pipe = intel_dp->pps_pipe;
  2339.         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  2340.  
  2341.         edp_panel_vdd_off_sync(intel_dp);
  2342.  
  2343.         /*
  2344.          * VLV seems to get confused when multiple power seqeuencers
  2345.          * have the same port selected (even if only one has power/vdd
  2346.          * enabled). The failure manifests as vlv_wait_port_ready() failing
  2347.          * CHV on the other hand doesn't seem to mind having the same port
  2348.          * selected in multiple power seqeuencers, but let's clear the
  2349.          * port select always when logically disconnecting a power sequencer
  2350.          * from a port.
  2351.          */
  2352.         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
  2353.                       pipe_name(pipe), port_name(intel_dig_port->port));
  2354.         I915_WRITE(pp_on_reg, 0);
  2355.         POSTING_READ(pp_on_reg);
  2356.  
  2357.         intel_dp->pps_pipe = INVALID_PIPE;
  2358. }
  2359.  
  2360. static void vlv_steal_power_sequencer(struct drm_device *dev,
  2361.                                       enum pipe pipe)
  2362. {
  2363.         struct drm_i915_private *dev_priv = dev->dev_private;
  2364.         struct intel_encoder *encoder;
  2365.  
  2366.         lockdep_assert_held(&dev_priv->pps_mutex);
  2367.  
  2368.         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
  2369.                 return;
  2370.  
  2371.         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
  2372.                             base.head) {
  2373.                 struct intel_dp *intel_dp;
  2374.                 enum port port;
  2375.  
  2376.                 if (encoder->type != INTEL_OUTPUT_EDP)
  2377.                         continue;
  2378.  
  2379.                 intel_dp = enc_to_intel_dp(&encoder->base);
  2380.                 port = dp_to_dig_port(intel_dp)->port;
  2381.  
  2382.                 if (intel_dp->pps_pipe != pipe)
  2383.                         continue;
  2384.  
  2385.                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
  2386.                               pipe_name(pipe), port_name(port));
  2387.  
  2388.                 WARN(encoder->connectors_active,
  2389.                      "stealing pipe %c power sequencer from active eDP port %c\n",
  2390.                      pipe_name(pipe), port_name(port));
  2391.  
  2392.                 /* make sure vdd is off before we steal it */
  2393.                 vlv_detach_power_sequencer(intel_dp);
  2394.         }
  2395. }
  2396.  
  2397. static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
  2398. {
  2399.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2400.         struct intel_encoder *encoder = &intel_dig_port->base;
  2401.         struct drm_device *dev = encoder->base.dev;
  2402.         struct drm_i915_private *dev_priv = dev->dev_private;
  2403.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2404.  
  2405.         lockdep_assert_held(&dev_priv->pps_mutex);
  2406.  
  2407.         if (!is_edp(intel_dp))
  2408.                 return;
  2409.  
  2410.         if (intel_dp->pps_pipe == crtc->pipe)
  2411.                 return;
  2412.  
  2413.         /*
  2414.          * If another power sequencer was being used on this
  2415.          * port previously make sure to turn off vdd there while
  2416.          * we still have control of it.
  2417.          */
  2418.         if (intel_dp->pps_pipe != INVALID_PIPE)
  2419.                 vlv_detach_power_sequencer(intel_dp);
  2420.  
  2421.         /*
  2422.          * We may be stealing the power
  2423.          * sequencer from another port.
  2424.          */
  2425.         vlv_steal_power_sequencer(dev, crtc->pipe);
  2426.  
  2427.         /* now it's all ours */
  2428.         intel_dp->pps_pipe = crtc->pipe;
  2429.  
  2430.         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
  2431.                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
  2432.  
  2433.         /* init power sequencer on this pipe and port */
  2434.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  2435.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  2436. }
  2437.  
  2438. static void vlv_pre_enable_dp(struct intel_encoder *encoder)
  2439. {
  2440.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2441.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2442.         struct drm_device *dev = encoder->base.dev;
  2443.         struct drm_i915_private *dev_priv = dev->dev_private;
  2444.         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  2445.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2446.                 int pipe = intel_crtc->pipe;
  2447.                 u32 val;
  2448.  
  2449.         mutex_lock(&dev_priv->dpio_lock);
  2450.  
  2451.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
  2452.                 val = 0;
  2453.                 if (pipe)
  2454.                         val |= (1<<21);
  2455.                 else
  2456.                         val &= ~(1<<21);
  2457.                 val |= 0x001000c4;
  2458.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
  2459.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
  2460.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  2461.  
  2462.         mutex_unlock(&dev_priv->dpio_lock);
  2463.  
  2464.         intel_enable_dp(encoder);
  2465. }
  2466.  
  2467. static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
  2468. {
  2469.         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  2470.         struct drm_device *dev = encoder->base.dev;
  2471.         struct drm_i915_private *dev_priv = dev->dev_private;
  2472.         struct intel_crtc *intel_crtc =
  2473.                 to_intel_crtc(encoder->base.crtc);
  2474.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2475.         int pipe = intel_crtc->pipe;
  2476.  
  2477.         intel_dp_prepare(encoder);
  2478.  
  2479.         /* Program Tx lane resets to default */
  2480.         mutex_lock(&dev_priv->dpio_lock);
  2481.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
  2482.                          DPIO_PCS_TX_LANE2_RESET |
  2483.                          DPIO_PCS_TX_LANE1_RESET);
  2484.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
  2485.                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
  2486.                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
  2487.                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
  2488.                                  DPIO_PCS_CLK_SOFT_RESET);
  2489.  
  2490.         /* Fix up inter-pair skew failure */
  2491.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
  2492.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
  2493.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
  2494.         mutex_unlock(&dev_priv->dpio_lock);
  2495. }
  2496.  
  2497. static void chv_pre_enable_dp(struct intel_encoder *encoder)
  2498. {
  2499.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2500.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2501.         struct drm_device *dev = encoder->base.dev;
  2502.         struct drm_i915_private *dev_priv = dev->dev_private;
  2503.         struct intel_crtc *intel_crtc =
  2504.                 to_intel_crtc(encoder->base.crtc);
  2505.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2506.         int pipe = intel_crtc->pipe;
  2507.         int data, i;
  2508.         u32 val;
  2509.  
  2510.         mutex_lock(&dev_priv->dpio_lock);
  2511.  
  2512.         /* allow hardware to manage TX FIFO reset source */
  2513.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  2514.         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  2515.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  2516.  
  2517.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  2518.         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  2519.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  2520.  
  2521.         /* Deassert soft data lane reset*/
  2522.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  2523.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2524.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  2525.  
  2526.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  2527.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2528.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  2529.  
  2530.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  2531.         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2532.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  2533.  
  2534.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  2535.         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2536.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  2537.  
  2538.         /* Program Tx lane latency optimal setting*/
  2539.         for (i = 0; i < 4; i++) {
  2540.                 /* Set the latency optimal bit */
  2541.                 data = (i == 1) ? 0x0 : 0x6;
  2542.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
  2543.                                 data << DPIO_FRC_LATENCY_SHFIT);
  2544.  
  2545.                 /* Set the upar bit */
  2546.                 data = (i == 1) ? 0x0 : 0x1;
  2547.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
  2548.                                 data << DPIO_UPAR_SHIFT);
  2549.         }
  2550.  
  2551.         /* Data lane stagger programming */
  2552.         /* FIXME: Fix up value only after power analysis */
  2553.  
  2554.         mutex_unlock(&dev_priv->dpio_lock);
  2555.  
  2556.         intel_enable_dp(encoder);
  2557. }
  2558.  
  2559. static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
  2560. {
  2561.         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  2562.         struct drm_device *dev = encoder->base.dev;
  2563.         struct drm_i915_private *dev_priv = dev->dev_private;
  2564.         struct intel_crtc *intel_crtc =
  2565.                 to_intel_crtc(encoder->base.crtc);
  2566.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2567.         enum pipe pipe = intel_crtc->pipe;
  2568.         u32 val;
  2569.  
  2570.         intel_dp_prepare(encoder);
  2571.  
  2572.         mutex_lock(&dev_priv->dpio_lock);
  2573.  
  2574.         /* program left/right clock distribution */
  2575.         if (pipe != PIPE_B) {
  2576.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  2577.                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  2578.                 if (ch == DPIO_CH0)
  2579.                         val |= CHV_BUFLEFTENA1_FORCE;
  2580.                 if (ch == DPIO_CH1)
  2581.                         val |= CHV_BUFRIGHTENA1_FORCE;
  2582.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  2583.         } else {
  2584.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  2585.                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  2586.                 if (ch == DPIO_CH0)
  2587.                         val |= CHV_BUFLEFTENA2_FORCE;
  2588.                 if (ch == DPIO_CH1)
  2589.                         val |= CHV_BUFRIGHTENA2_FORCE;
  2590.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  2591.         }
  2592.  
  2593.         /* program clock channel usage */
  2594.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
  2595.         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  2596.         if (pipe != PIPE_B)
  2597.                 val &= ~CHV_PCS_USEDCLKCHANNEL;
  2598.         else
  2599.                 val |= CHV_PCS_USEDCLKCHANNEL;
  2600.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
  2601.  
  2602.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
  2603.         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  2604.         if (pipe != PIPE_B)
  2605.                 val &= ~CHV_PCS_USEDCLKCHANNEL;
  2606.         else
  2607.                 val |= CHV_PCS_USEDCLKCHANNEL;
  2608.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
  2609.  
  2610.         /*
  2611.          * This a a bit weird since generally CL
  2612.          * matches the pipe, but here we need to
  2613.          * pick the CL based on the port.
  2614.          */
  2615.         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
  2616.         if (pipe != PIPE_B)
  2617.                 val &= ~CHV_CMN_USEDCLKCHANNEL;
  2618.         else
  2619.                 val |= CHV_CMN_USEDCLKCHANNEL;
  2620.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
  2621.  
  2622.         mutex_unlock(&dev_priv->dpio_lock);
  2623. }
  2624.  
  2625. /*
  2626.  * Native read with retry for link status and receiver capability reads for
  2627.  * cases where the sink may still be asleep.
  2628.  *
  2629.  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
  2630.  * supposed to retry 3 times per the spec.
  2631.  */
  2632. static ssize_t
  2633. intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
  2634.                         void *buffer, size_t size)
  2635. {
  2636.         ssize_t ret;
  2637.         int i;
  2638.  
  2639.         /*
  2640.          * Sometime we just get the same incorrect byte repeated
  2641.          * over the entire buffer. Doing just one throw away read
  2642.          * initially seems to "solve" it.
  2643.          */
  2644.         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
  2645.  
  2646.         for (i = 0; i < 3; i++) {
  2647.                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
  2648.                 if (ret == size)
  2649.                         return ret;
  2650.                 msleep(1);
  2651.         }
  2652.  
  2653.         return ret;
  2654. }
  2655.  
  2656. /*
  2657.  * Fetch AUX CH registers 0x202 - 0x207 which contain
  2658.  * link status information
  2659.  */
  2660. static bool
  2661. intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
  2662. {
  2663.         return intel_dp_dpcd_read_wake(&intel_dp->aux,
  2664.                                               DP_LANE0_1_STATUS,
  2665.                                               link_status,
  2666.                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
  2667. }
  2668.  
  2669. /* These are source-specific values. */
  2670. static uint8_t
  2671. intel_dp_voltage_max(struct intel_dp *intel_dp)
  2672. {
  2673.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2674.         enum port port = dp_to_dig_port(intel_dp)->port;
  2675.  
  2676.         if (INTEL_INFO(dev)->gen >= 9)
  2677.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2678.         else if (IS_VALLEYVIEW(dev))
  2679.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  2680.         else if (IS_GEN7(dev) && port == PORT_A)
  2681.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2682.         else if (HAS_PCH_CPT(dev) && port != PORT_A)
  2683.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  2684.         else
  2685.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2686. }
  2687.  
  2688. static uint8_t
  2689. intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
  2690. {
  2691.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2692.         enum port port = dp_to_dig_port(intel_dp)->port;
  2693.  
  2694.         if (INTEL_INFO(dev)->gen >= 9) {
  2695.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2696.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2697.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2698.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2699.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2700.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2701.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2702.                 default:
  2703.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2704.                 }
  2705.         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  2706.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2707.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2708.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2709.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2710.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2711.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2712.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2713.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2714.                 default:
  2715.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2716.                 }
  2717.         } else if (IS_VALLEYVIEW(dev)) {
  2718.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2719.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2720.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2721.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2722.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2723.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2724.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2725.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2726.                 default:
  2727.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2728.                 }
  2729.         } else if (IS_GEN7(dev) && port == PORT_A) {
  2730.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2731.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2732.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2733.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2734.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2735.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2736.                 default:
  2737.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2738.                 }
  2739.         } else {
  2740.         switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2741.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2742.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2743.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2744.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2745.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2746.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2747.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2748.         default:
  2749.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2750.         }
  2751.         }
  2752. }
  2753.  
  2754. static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
  2755. {
  2756.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2757.         struct drm_i915_private *dev_priv = dev->dev_private;
  2758.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2759.         struct intel_crtc *intel_crtc =
  2760.                 to_intel_crtc(dport->base.base.crtc);
  2761.         unsigned long demph_reg_value, preemph_reg_value,
  2762.                 uniqtranscale_reg_value;
  2763.         uint8_t train_set = intel_dp->train_set[0];
  2764.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2765.         int pipe = intel_crtc->pipe;
  2766.  
  2767.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  2768.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  2769.                 preemph_reg_value = 0x0004000;
  2770.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2771.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2772.                         demph_reg_value = 0x2B405555;
  2773.                         uniqtranscale_reg_value = 0x552AB83A;
  2774.                         break;
  2775.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2776.                         demph_reg_value = 0x2B404040;
  2777.                         uniqtranscale_reg_value = 0x5548B83A;
  2778.                         break;
  2779.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2780.                         demph_reg_value = 0x2B245555;
  2781.                         uniqtranscale_reg_value = 0x5560B83A;
  2782.                         break;
  2783.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2784.                         demph_reg_value = 0x2B405555;
  2785.                         uniqtranscale_reg_value = 0x5598DA3A;
  2786.                         break;
  2787.                 default:
  2788.                         return 0;
  2789.                 }
  2790.                 break;
  2791.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  2792.                 preemph_reg_value = 0x0002000;
  2793.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2794.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2795.                         demph_reg_value = 0x2B404040;
  2796.                         uniqtranscale_reg_value = 0x5552B83A;
  2797.                         break;
  2798.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2799.                         demph_reg_value = 0x2B404848;
  2800.                         uniqtranscale_reg_value = 0x5580B83A;
  2801.                         break;
  2802.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2803.                         demph_reg_value = 0x2B404040;
  2804.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2805.                         break;
  2806.                 default:
  2807.                         return 0;
  2808.                 }
  2809.                 break;
  2810.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  2811.                 preemph_reg_value = 0x0000000;
  2812.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2813.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2814.                         demph_reg_value = 0x2B305555;
  2815.                         uniqtranscale_reg_value = 0x5570B83A;
  2816.                         break;
  2817.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2818.                         demph_reg_value = 0x2B2B4040;
  2819.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2820.                         break;
  2821.                 default:
  2822.                         return 0;
  2823.                 }
  2824.                 break;
  2825.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  2826.                 preemph_reg_value = 0x0006000;
  2827.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2828.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2829.                         demph_reg_value = 0x1B405555;
  2830.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2831.                         break;
  2832.                 default:
  2833.                         return 0;
  2834.                 }
  2835.                 break;
  2836.         default:
  2837.                 return 0;
  2838.         }
  2839.