Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Keith Packard <keithp@keithp.com>
  25.  *
  26.  */
  27.  
  28. #include <linux/i2c.h>
  29. #include <linux/slab.h>
  30. #include <linux/export.h>
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc.h>
  33. #include <drm/drm_crtc_helper.h>
  34. #include <drm/drm_edid.h>
  35. #include "intel_drv.h"
  36. #include <drm/i915_drm.h>
  37. #include "i915_drv.h"
  38.  
  39. #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
  40.  
  41. struct dp_link_dpll {
  42.         int link_bw;
  43.         struct dpll dpll;
  44. };
  45.  
  46. static const struct dp_link_dpll gen4_dpll[] = {
  47.         { DP_LINK_BW_1_62,
  48.                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  49.         { DP_LINK_BW_2_7,
  50.                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  51. };
  52.  
  53. static const struct dp_link_dpll pch_dpll[] = {
  54.         { DP_LINK_BW_1_62,
  55.                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  56.         { DP_LINK_BW_2_7,
  57.                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  58. };
  59.  
  60. static const struct dp_link_dpll vlv_dpll[] = {
  61.         { DP_LINK_BW_1_62,
  62.                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  63.         { DP_LINK_BW_2_7,
  64.                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  65. };
  66.  
  67. /*
  68.  * CHV supports eDP 1.4 that have  more link rates.
  69.  * Below only provides the fixed rate but exclude variable rate.
  70.  */
  71. static const struct dp_link_dpll chv_dpll[] = {
  72.         /*
  73.          * CHV requires to program fractional division for m2.
  74.          * m2 is stored in fixed point format using formula below
  75.          * (m2_int << 22) | m2_fraction
  76.          */
  77.         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
  78.                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  79.         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
  80.                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  81.         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
  82.                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  83. };
  84.  
  85. /**
  86.  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  87.  * @intel_dp: DP struct
  88.  *
  89.  * If a CPU or PCH DP output is attached to an eDP panel, this function
  90.  * will return true, and false otherwise.
  91.  */
  92. static bool is_edp(struct intel_dp *intel_dp)
  93. {
  94.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  95.  
  96.         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
  97. }
  98.  
  99. static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
  100. {
  101.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  102.  
  103.         return intel_dig_port->base.base.dev;
  104. }
  105.  
  106. static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
  107. {
  108.         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
  109. }
  110.  
  111. static void intel_dp_link_down(struct intel_dp *intel_dp);
  112. static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
  113. static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  114. static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
  115. static void vlv_steal_power_sequencer(struct drm_device *dev,
  116.                                       enum pipe pipe);
  117.  
  118. int
  119. intel_dp_max_link_bw(struct intel_dp *intel_dp)
  120. {
  121.         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
  122.         struct drm_device *dev = intel_dp->attached_connector->base.dev;
  123.  
  124.         switch (max_link_bw) {
  125.         case DP_LINK_BW_1_62:
  126.         case DP_LINK_BW_2_7:
  127.                 break;
  128.         case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
  129.                 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
  130.                      INTEL_INFO(dev)->gen >= 8) &&
  131.                     intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
  132.                         max_link_bw = DP_LINK_BW_5_4;
  133.                 else
  134.                         max_link_bw = DP_LINK_BW_2_7;
  135.                 break;
  136.         default:
  137.                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
  138.                      max_link_bw);
  139.                 max_link_bw = DP_LINK_BW_1_62;
  140.                 break;
  141.         }
  142.         return max_link_bw;
  143. }
  144.  
  145. static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
  146. {
  147.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  148.         struct drm_device *dev = intel_dig_port->base.base.dev;
  149.         u8 source_max, sink_max;
  150.  
  151.         source_max = 4;
  152.         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
  153.             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
  154.                 source_max = 2;
  155.  
  156.         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
  157.  
  158.         return min(source_max, sink_max);
  159. }
  160.  
  161. /*
  162.  * The units on the numbers in the next two are... bizarre.  Examples will
  163.  * make it clearer; this one parallels an example in the eDP spec.
  164.  *
  165.  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
  166.  *
  167.  *     270000 * 1 * 8 / 10 == 216000
  168.  *
  169.  * The actual data capacity of that configuration is 2.16Gbit/s, so the
  170.  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
  171.  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
  172.  * 119000.  At 18bpp that's 2142000 kilobits per second.
  173.  *
  174.  * Thus the strange-looking division by 10 in intel_dp_link_required, to
  175.  * get the result in decakilobits instead of kilobits.
  176.  */
  177.  
  178. static int
  179. intel_dp_link_required(int pixel_clock, int bpp)
  180. {
  181.         return (pixel_clock * bpp + 9) / 10;
  182. }
  183.  
  184. static int
  185. intel_dp_max_data_rate(int max_link_clock, int max_lanes)
  186. {
  187.         return (max_link_clock * max_lanes * 8) / 10;
  188. }
  189.  
  190. static enum drm_mode_status
  191. intel_dp_mode_valid(struct drm_connector *connector,
  192.                     struct drm_display_mode *mode)
  193. {
  194.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  195.         struct intel_connector *intel_connector = to_intel_connector(connector);
  196.         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
  197.         int target_clock = mode->clock;
  198.         int max_rate, mode_rate, max_lanes, max_link_clock;
  199.  
  200.         if (is_edp(intel_dp) && fixed_mode) {
  201.                 if (mode->hdisplay > fixed_mode->hdisplay)
  202.                         return MODE_PANEL;
  203.  
  204.                 if (mode->vdisplay > fixed_mode->vdisplay)
  205.                         return MODE_PANEL;
  206.  
  207.                 target_clock = fixed_mode->clock;
  208.         }
  209.  
  210.         max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
  211.         max_lanes = intel_dp_max_lane_count(intel_dp);
  212.  
  213.         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
  214.         mode_rate = intel_dp_link_required(target_clock, 18);
  215.  
  216.         if (mode_rate > max_rate)
  217.                 return MODE_CLOCK_HIGH;
  218.  
  219.         if (mode->clock < 10000)
  220.                 return MODE_CLOCK_LOW;
  221.  
  222.         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
  223.                 return MODE_H_ILLEGAL;
  224.  
  225.         return MODE_OK;
  226. }
  227.  
  228. uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
  229. {
  230.         int     i;
  231.         uint32_t v = 0;
  232.  
  233.         if (src_bytes > 4)
  234.                 src_bytes = 4;
  235.         for (i = 0; i < src_bytes; i++)
  236.                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
  237.         return v;
  238. }
  239.  
  240. void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
  241. {
  242.         int i;
  243.         if (dst_bytes > 4)
  244.                 dst_bytes = 4;
  245.         for (i = 0; i < dst_bytes; i++)
  246.                 dst[i] = src >> ((3-i) * 8);
  247. }
  248.  
  249. /* hrawclock is 1/4 the FSB frequency */
  250. static int
  251. intel_hrawclk(struct drm_device *dev)
  252. {
  253.         struct drm_i915_private *dev_priv = dev->dev_private;
  254.         uint32_t clkcfg;
  255.  
  256.         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
  257.         if (IS_VALLEYVIEW(dev))
  258.                 return 200;
  259.  
  260.         clkcfg = I915_READ(CLKCFG);
  261.         switch (clkcfg & CLKCFG_FSB_MASK) {
  262.         case CLKCFG_FSB_400:
  263.                 return 100;
  264.         case CLKCFG_FSB_533:
  265.                 return 133;
  266.         case CLKCFG_FSB_667:
  267.                 return 166;
  268.         case CLKCFG_FSB_800:
  269.                 return 200;
  270.         case CLKCFG_FSB_1067:
  271.                 return 266;
  272.         case CLKCFG_FSB_1333:
  273.                 return 333;
  274.         /* these two are just a guess; one of them might be right */
  275.         case CLKCFG_FSB_1600:
  276.         case CLKCFG_FSB_1600_ALT:
  277.                 return 400;
  278.         default:
  279.                 return 133;
  280.         }
  281. }
  282.  
  283. static void
  284. intel_dp_init_panel_power_sequencer(struct drm_device *dev,
  285.                                     struct intel_dp *intel_dp);
  286. static void
  287. intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  288.                                               struct intel_dp *intel_dp);
  289.  
  290. static void pps_lock(struct intel_dp *intel_dp)
  291. {
  292.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  293.         struct intel_encoder *encoder = &intel_dig_port->base;
  294.         struct drm_device *dev = encoder->base.dev;
  295.         struct drm_i915_private *dev_priv = dev->dev_private;
  296.         enum intel_display_power_domain power_domain;
  297.  
  298.         /*
  299.          * See vlv_power_sequencer_reset() why we need
  300.          * a power domain reference here.
  301.          */
  302.         power_domain = intel_display_port_power_domain(encoder);
  303.         intel_display_power_get(dev_priv, power_domain);
  304.  
  305.         mutex_lock(&dev_priv->pps_mutex);
  306. }
  307.  
  308. static void pps_unlock(struct intel_dp *intel_dp)
  309. {
  310.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  311.         struct intel_encoder *encoder = &intel_dig_port->base;
  312.         struct drm_device *dev = encoder->base.dev;
  313.         struct drm_i915_private *dev_priv = dev->dev_private;
  314.         enum intel_display_power_domain power_domain;
  315.  
  316.         mutex_unlock(&dev_priv->pps_mutex);
  317.  
  318.         power_domain = intel_display_port_power_domain(encoder);
  319.         intel_display_power_put(dev_priv, power_domain);
  320. }
  321.  
  322. static void
  323. vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  324. {
  325.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  326.         struct drm_device *dev = intel_dig_port->base.base.dev;
  327.         struct drm_i915_private *dev_priv = dev->dev_private;
  328.         enum pipe pipe = intel_dp->pps_pipe;
  329.         bool pll_enabled;
  330.         uint32_t DP;
  331.  
  332.         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
  333.                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
  334.                  pipe_name(pipe), port_name(intel_dig_port->port)))
  335.                 return;
  336.  
  337.         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
  338.                       pipe_name(pipe), port_name(intel_dig_port->port));
  339.  
  340.         /* Preserve the BIOS-computed detected bit. This is
  341.          * supposed to be read-only.
  342.          */
  343.         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  344.         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  345.         DP |= DP_PORT_WIDTH(1);
  346.         DP |= DP_LINK_TRAIN_PAT_1;
  347.  
  348.         if (IS_CHERRYVIEW(dev))
  349.                 DP |= DP_PIPE_SELECT_CHV(pipe);
  350.         else if (pipe == PIPE_B)
  351.                 DP |= DP_PIPEB_SELECT;
  352.  
  353.         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
  354.  
  355.         /*
  356.          * The DPLL for the pipe must be enabled for this to work.
  357.          * So enable temporarily it if it's not already enabled.
  358.          */
  359.         if (!pll_enabled)
  360.                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
  361.                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
  362.  
  363.         /*
  364.          * Similar magic as in intel_dp_enable_port().
  365.          * We _must_ do this port enable + disable trick
  366.          * to make this power seqeuencer lock onto the port.
  367.          * Otherwise even VDD force bit won't work.
  368.          */
  369.         I915_WRITE(intel_dp->output_reg, DP);
  370.         POSTING_READ(intel_dp->output_reg);
  371.  
  372.         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
  373.         POSTING_READ(intel_dp->output_reg);
  374.  
  375.         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
  376.         POSTING_READ(intel_dp->output_reg);
  377.  
  378.         if (!pll_enabled)
  379.                 vlv_force_pll_off(dev, pipe);
  380. }
  381.  
  382. static enum pipe
  383. vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  384. {
  385.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  386.         struct drm_device *dev = intel_dig_port->base.base.dev;
  387.         struct drm_i915_private *dev_priv = dev->dev_private;
  388.         struct intel_encoder *encoder;
  389.         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
  390.         enum pipe pipe;
  391.  
  392.         lockdep_assert_held(&dev_priv->pps_mutex);
  393.  
  394.         /* We should never land here with regular DP ports */
  395.         WARN_ON(!is_edp(intel_dp));
  396.  
  397.         if (intel_dp->pps_pipe != INVALID_PIPE)
  398.                 return intel_dp->pps_pipe;
  399.  
  400.         /*
  401.          * We don't have power sequencer currently.
  402.          * Pick one that's not used by other ports.
  403.          */
  404.         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
  405.                             base.head) {
  406.                 struct intel_dp *tmp;
  407.  
  408.                 if (encoder->type != INTEL_OUTPUT_EDP)
  409.                         continue;
  410.  
  411.                 tmp = enc_to_intel_dp(&encoder->base);
  412.  
  413.                 if (tmp->pps_pipe != INVALID_PIPE)
  414.                         pipes &= ~(1 << tmp->pps_pipe);
  415.         }
  416.  
  417.         /*
  418.          * Didn't find one. This should not happen since there
  419.          * are two power sequencers and up to two eDP ports.
  420.          */
  421.         if (WARN_ON(pipes == 0))
  422.                 pipe = PIPE_A;
  423.         else
  424.                 pipe = ffs(pipes) - 1;
  425.  
  426.         vlv_steal_power_sequencer(dev, pipe);
  427.         intel_dp->pps_pipe = pipe;
  428.  
  429.         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
  430.                       pipe_name(intel_dp->pps_pipe),
  431.                       port_name(intel_dig_port->port));
  432.  
  433.         /* init power sequencer on this pipe and port */
  434.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  435.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  436.  
  437.         /*
  438.          * Even vdd force doesn't work until we've made
  439.          * the power sequencer lock in on the port.
  440.          */
  441.         vlv_power_sequencer_kick(intel_dp);
  442.  
  443.         return intel_dp->pps_pipe;
  444. }
  445.  
  446. typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
  447.                                enum pipe pipe);
  448.  
  449. static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
  450.                                enum pipe pipe)
  451. {
  452.         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
  453. }
  454.  
  455. static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
  456.                                 enum pipe pipe)
  457. {
  458.         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
  459. }
  460.  
  461. static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
  462.                          enum pipe pipe)
  463. {
  464.         return true;
  465. }
  466.  
  467. static enum pipe
  468. vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
  469.                      enum port port,
  470.                      vlv_pipe_check pipe_check)
  471. {
  472.         enum pipe pipe;
  473.  
  474.         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
  475.                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
  476.                         PANEL_PORT_SELECT_MASK;
  477.  
  478.                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
  479.                         continue;
  480.  
  481.                 if (!pipe_check(dev_priv, pipe))
  482.                         continue;
  483.  
  484.                         return pipe;
  485.         }
  486.  
  487.         return INVALID_PIPE;
  488. }
  489.  
  490. static void
  491. vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
  492. {
  493.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  494.         struct drm_device *dev = intel_dig_port->base.base.dev;
  495.         struct drm_i915_private *dev_priv = dev->dev_private;
  496.         enum port port = intel_dig_port->port;
  497.  
  498.         lockdep_assert_held(&dev_priv->pps_mutex);
  499.  
  500.         /* try to find a pipe with this port selected */
  501.         /* first pick one where the panel is on */
  502.         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  503.                                                   vlv_pipe_has_pp_on);
  504.         /* didn't find one? pick one where vdd is on */
  505.         if (intel_dp->pps_pipe == INVALID_PIPE)
  506.                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  507.                                                           vlv_pipe_has_vdd_on);
  508.         /* didn't find one? pick one with just the correct port */
  509.         if (intel_dp->pps_pipe == INVALID_PIPE)
  510.                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  511.                                                           vlv_pipe_any);
  512.  
  513.         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
  514.         if (intel_dp->pps_pipe == INVALID_PIPE) {
  515.                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
  516.                               port_name(port));
  517.                 return;
  518.         }
  519.  
  520.         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
  521.                       port_name(port), pipe_name(intel_dp->pps_pipe));
  522.  
  523.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  524.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  525. }
  526.  
  527. void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
  528. {
  529.         struct drm_device *dev = dev_priv->dev;
  530.         struct intel_encoder *encoder;
  531.  
  532.         if (WARN_ON(!IS_VALLEYVIEW(dev)))
  533.                 return;
  534.  
  535.         /*
  536.          * We can't grab pps_mutex here due to deadlock with power_domain
  537.          * mutex when power_domain functions are called while holding pps_mutex.
  538.          * That also means that in order to use pps_pipe the code needs to
  539.          * hold both a power domain reference and pps_mutex, and the power domain
  540.          * reference get/put must be done while _not_ holding pps_mutex.
  541.          * pps_{lock,unlock}() do these steps in the correct order, so one
  542.          * should use them always.
  543.          */
  544.  
  545.         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
  546.                 struct intel_dp *intel_dp;
  547.  
  548.                 if (encoder->type != INTEL_OUTPUT_EDP)
  549.                         continue;
  550.  
  551.                 intel_dp = enc_to_intel_dp(&encoder->base);
  552.                 intel_dp->pps_pipe = INVALID_PIPE;
  553.         }
  554. }
  555.  
  556. static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
  557. {
  558.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  559.  
  560.         if (HAS_PCH_SPLIT(dev))
  561.                 return PCH_PP_CONTROL;
  562.         else
  563.                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
  564. }
  565.  
  566. static u32 _pp_stat_reg(struct intel_dp *intel_dp)
  567. {
  568.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  569.  
  570.         if (HAS_PCH_SPLIT(dev))
  571.                 return PCH_PP_STATUS;
  572.         else
  573.                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
  574. }
  575.  
  576. #if 0
  577. /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
  578.    This function only applicable when panel PM state is not to be tracked */
  579. static int edp_notify_handler(struct notifier_block *this, unsigned long code,
  580.                               void *unused)
  581. {
  582.         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
  583.                                                  edp_notifier);
  584.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  585.         struct drm_i915_private *dev_priv = dev->dev_private;
  586.         u32 pp_div;
  587.         u32 pp_ctrl_reg, pp_div_reg;
  588.  
  589.         if (!is_edp(intel_dp) || code != SYS_RESTART)
  590.                 return 0;
  591.  
  592.         pps_lock(intel_dp);
  593.  
  594.         if (IS_VALLEYVIEW(dev)) {
  595.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  596.  
  597.                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
  598.                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
  599.                 pp_div = I915_READ(pp_div_reg);
  600.                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
  601.  
  602.                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
  603.                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
  604.                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
  605.                 msleep(intel_dp->panel_power_cycle_delay);
  606.         }
  607.  
  608.         pps_unlock(intel_dp);
  609.  
  610.         return 0;
  611. }
  612. #endif
  613.  
  614. static bool edp_have_panel_power(struct intel_dp *intel_dp)
  615. {
  616.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  617.         struct drm_i915_private *dev_priv = dev->dev_private;
  618.  
  619.         lockdep_assert_held(&dev_priv->pps_mutex);
  620.  
  621.         if (IS_VALLEYVIEW(dev) &&
  622.             intel_dp->pps_pipe == INVALID_PIPE)
  623.                 return false;
  624.  
  625.         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
  626. }
  627.  
  628. static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  629. {
  630.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  631.         struct drm_i915_private *dev_priv = dev->dev_private;
  632.  
  633.         lockdep_assert_held(&dev_priv->pps_mutex);
  634.  
  635.         if (IS_VALLEYVIEW(dev) &&
  636.             intel_dp->pps_pipe == INVALID_PIPE)
  637.                 return false;
  638.  
  639.         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
  640. }
  641.  
  642. static void
  643. intel_dp_check_edp(struct intel_dp *intel_dp)
  644. {
  645.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  646.         struct drm_i915_private *dev_priv = dev->dev_private;
  647.  
  648.         if (!is_edp(intel_dp))
  649.                 return;
  650.  
  651.         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
  652.                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
  653.                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
  654.                               I915_READ(_pp_stat_reg(intel_dp)),
  655.                               I915_READ(_pp_ctrl_reg(intel_dp)));
  656.         }
  657. }
  658.  
  659. static uint32_t
  660. intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
  661. {
  662.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  663.         struct drm_device *dev = intel_dig_port->base.base.dev;
  664.         struct drm_i915_private *dev_priv = dev->dev_private;
  665.         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  666.         uint32_t status;
  667.         bool done;
  668.  
  669. #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  670.         if (has_aux_irq)
  671.                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
  672.                                           msecs_to_jiffies_timeout(10));
  673.         else
  674.                 done = wait_for_atomic(C, 10) == 0;
  675.         if (!done)
  676.                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
  677.                           has_aux_irq);
  678. #undef C
  679.  
  680.         return status;
  681. }
  682.  
  683. static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  684. {
  685.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  686.         struct drm_device *dev = intel_dig_port->base.base.dev;
  687.  
  688.         /*
  689.          * The clock divider is based off the hrawclk, and would like to run at
  690.          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
  691.          */
  692.         return index ? 0 : intel_hrawclk(dev) / 2;
  693. }
  694.  
  695. static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  696. {
  697.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  698.         struct drm_device *dev = intel_dig_port->base.base.dev;
  699.  
  700.         if (index)
  701.                 return 0;
  702.  
  703.         if (intel_dig_port->port == PORT_A) {
  704.                 if (IS_GEN6(dev) || IS_GEN7(dev))
  705.                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
  706.                 else
  707.                         return 225; /* eDP input clock at 450Mhz */
  708.         } else {
  709.                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  710.         }
  711. }
  712.  
  713. static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  714. {
  715.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  716.         struct drm_device *dev = intel_dig_port->base.base.dev;
  717.         struct drm_i915_private *dev_priv = dev->dev_private;
  718.  
  719.         if (intel_dig_port->port == PORT_A) {
  720.                 if (index)
  721.                         return 0;
  722.                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
  723.         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  724.                 /* Workaround for non-ULT HSW */
  725.                 switch (index) {
  726.                 case 0: return 63;
  727.                 case 1: return 72;
  728.                 default: return 0;
  729.                 }
  730.         } else  {
  731.                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  732.         }
  733. }
  734.  
  735. static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  736. {
  737.         return index ? 0 : 100;
  738. }
  739.  
  740. static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  741. {
  742.         /*
  743.          * SKL doesn't need us to program the AUX clock divider (Hardware will
  744.          * derive the clock from CDCLK automatically). We still implement the
  745.          * get_aux_clock_divider vfunc to plug-in into the existing code.
  746.          */
  747.         return index ? 0 : 1;
  748. }
  749.  
  750. static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
  751.                                       bool has_aux_irq,
  752.                                       int send_bytes,
  753.                                       uint32_t aux_clock_divider)
  754. {
  755.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  756.         struct drm_device *dev = intel_dig_port->base.base.dev;
  757.         uint32_t precharge, timeout;
  758.  
  759.         if (IS_GEN6(dev))
  760.                 precharge = 3;
  761.         else
  762.                 precharge = 5;
  763.  
  764.         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
  765.                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
  766.         else
  767.                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
  768.  
  769.         return DP_AUX_CH_CTL_SEND_BUSY |
  770.                DP_AUX_CH_CTL_DONE |
  771.                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  772.                DP_AUX_CH_CTL_TIME_OUT_ERROR |
  773.                timeout |
  774.                DP_AUX_CH_CTL_RECEIVE_ERROR |
  775.                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  776.                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  777.                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
  778. }
  779.  
  780. static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
  781.                                       bool has_aux_irq,
  782.                                       int send_bytes,
  783.                                       uint32_t unused)
  784. {
  785.         return DP_AUX_CH_CTL_SEND_BUSY |
  786.                DP_AUX_CH_CTL_DONE |
  787.                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  788.                DP_AUX_CH_CTL_TIME_OUT_ERROR |
  789.                DP_AUX_CH_CTL_TIME_OUT_1600us |
  790.                DP_AUX_CH_CTL_RECEIVE_ERROR |
  791.                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  792.                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
  793. }
  794.  
  795. static int
  796. intel_dp_aux_ch(struct intel_dp *intel_dp,
  797.                 const uint8_t *send, int send_bytes,
  798.                 uint8_t *recv, int recv_size)
  799. {
  800.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  801.         struct drm_device *dev = intel_dig_port->base.base.dev;
  802.         struct drm_i915_private *dev_priv = dev->dev_private;
  803.         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  804.         uint32_t ch_data = ch_ctl + 4;
  805.         uint32_t aux_clock_divider;
  806.         int i, ret, recv_bytes;
  807.         uint32_t status;
  808.         int try, clock = 0;
  809.         bool has_aux_irq = HAS_AUX_IRQ(dev);
  810.         bool vdd;
  811.  
  812.         pps_lock(intel_dp);
  813.  
  814.         /*
  815.          * We will be called with VDD already enabled for dpcd/edid/oui reads.
  816.          * In such cases we want to leave VDD enabled and it's up to upper layers
  817.          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
  818.          * ourselves.
  819.          */
  820.         vdd = edp_panel_vdd_on(intel_dp);
  821.  
  822.         /* dp aux is extremely sensitive to irq latency, hence request the
  823.          * lowest possible wakeup latency and so prevent the cpu from going into
  824.          * deep sleep states.
  825.          */
  826.  
  827.         intel_dp_check_edp(intel_dp);
  828.  
  829.         intel_aux_display_runtime_get(dev_priv);
  830.  
  831.         /* Try to wait for any previous AUX channel activity */
  832.         for (try = 0; try < 3; try++) {
  833.                 status = I915_READ_NOTRACE(ch_ctl);
  834.                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  835.                         break;
  836.                 msleep(1);
  837.         }
  838.  
  839.         if (try == 3) {
  840.                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
  841.                      I915_READ(ch_ctl));
  842.                 ret = -EBUSY;
  843.                 goto out;
  844.         }
  845.  
  846.         /* Only 5 data registers! */
  847.         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
  848.                 ret = -E2BIG;
  849.                 goto out;
  850.         }
  851.  
  852.         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
  853.                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
  854.                                                           has_aux_irq,
  855.                                                           send_bytes,
  856.                                                           aux_clock_divider);
  857.  
  858.         /* Must try at least 3 times according to DP spec */
  859.         for (try = 0; try < 5; try++) {
  860.                 /* Load the send data into the aux channel data registers */
  861.                 for (i = 0; i < send_bytes; i += 4)
  862.                         I915_WRITE(ch_data + i,
  863.                                            intel_dp_pack_aux(send + i,
  864.                                                              send_bytes - i));
  865.  
  866.                 /* Send the command and wait for it to complete */
  867.                         I915_WRITE(ch_ctl, send_ctl);
  868.  
  869.                 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  870.  
  871.                 /* Clear done status and any errors */
  872.                 I915_WRITE(ch_ctl,
  873.                            status |
  874.                            DP_AUX_CH_CTL_DONE |
  875.                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
  876.                            DP_AUX_CH_CTL_RECEIVE_ERROR);
  877.  
  878.                 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
  879.                               DP_AUX_CH_CTL_RECEIVE_ERROR))
  880.                         continue;
  881.                 if (status & DP_AUX_CH_CTL_DONE)
  882.                         break;
  883.         }
  884.                 if (status & DP_AUX_CH_CTL_DONE)
  885.                         break;
  886.         }
  887.  
  888.         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
  889.                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
  890.                 ret = -EBUSY;
  891.                 goto out;
  892.         }
  893.  
  894.         /* Check for timeout or receive error.
  895.          * Timeouts occur when the sink is not connected
  896.          */
  897.         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
  898.                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
  899.                 ret = -EIO;
  900.                 goto out;
  901.         }
  902.  
  903.         /* Timeouts occur when the device isn't connected, so they're
  904.          * "normal" -- don't fill the kernel log with these */
  905.         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
  906.                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
  907.                 ret = -ETIMEDOUT;
  908.                 goto out;
  909.         }
  910.  
  911.         /* Unload any bytes sent back from the other side */
  912.         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
  913.                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
  914.         if (recv_bytes > recv_size)
  915.                 recv_bytes = recv_size;
  916.  
  917.         for (i = 0; i < recv_bytes; i += 4)
  918.                 intel_dp_unpack_aux(I915_READ(ch_data + i),
  919.                            recv + i, recv_bytes - i);
  920.  
  921.         ret = recv_bytes;
  922. out:
  923. //      pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
  924.         intel_aux_display_runtime_put(dev_priv);
  925.  
  926.         if (vdd)
  927.                 edp_panel_vdd_off(intel_dp, false);
  928.  
  929.         pps_unlock(intel_dp);
  930.  
  931.         return ret;
  932. }
  933.  
  934. #define BARE_ADDRESS_SIZE       3
  935. #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
  936. static ssize_t
  937. intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
  938. {
  939.         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
  940.         uint8_t txbuf[20], rxbuf[20];
  941.         size_t txsize, rxsize;
  942.         int ret;
  943.  
  944.         txbuf[0] = msg->request << 4;
  945.         txbuf[1] = msg->address >> 8;
  946.         txbuf[2] = msg->address & 0xff;
  947.         txbuf[3] = msg->size - 1;
  948.  
  949.         switch (msg->request & ~DP_AUX_I2C_MOT) {
  950.         case DP_AUX_NATIVE_WRITE:
  951.         case DP_AUX_I2C_WRITE:
  952.                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
  953.                 rxsize = 1;
  954.  
  955.                 if (WARN_ON(txsize > 20))
  956.                 return -E2BIG;
  957.  
  958.                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
  959.  
  960.                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
  961.                 if (ret > 0) {
  962.                         msg->reply = rxbuf[0] >> 4;
  963.  
  964.                         /* Return payload size. */
  965.                         ret = msg->size;
  966.                 }
  967.                         break;
  968.  
  969.         case DP_AUX_NATIVE_READ:
  970.         case DP_AUX_I2C_READ:
  971.                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
  972.                 rxsize = msg->size + 1;
  973.  
  974.                 if (WARN_ON(rxsize > 20))
  975.                 return -E2BIG;
  976.  
  977.                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
  978.                 if (ret > 0) {
  979.                         msg->reply = rxbuf[0] >> 4;
  980.                         /*
  981.                          * Assume happy day, and copy the data. The caller is
  982.                          * expected to check msg->reply before touching it.
  983.                          *
  984.                          * Return payload size.
  985.                          */
  986.                         ret--;
  987.                         memcpy(msg->buffer, rxbuf + 1, ret);
  988.                 }
  989.                 break;
  990.  
  991.         default:
  992.                 ret = -EINVAL;
  993.                 break;
  994.         }
  995.  
  996.                         return ret;
  997. }
  998.  
  999. static void
  1000. intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
  1001. {
  1002.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1003.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1004.         enum port port = intel_dig_port->port;
  1005.         const char *name = NULL;
  1006.         int ret;
  1007.  
  1008.         switch (port) {
  1009.         case PORT_A:
  1010.                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
  1011.                 name = "DPDDC-A";
  1012.                 break;
  1013.         case PORT_B:
  1014.                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
  1015.                 name = "DPDDC-B";
  1016.                 break;
  1017.         case PORT_C:
  1018.                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
  1019.                 name = "DPDDC-C";
  1020.                 break;
  1021.         case PORT_D:
  1022.                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
  1023.                 name = "DPDDC-D";
  1024.                 break;
  1025.         default:
  1026.                 BUG();
  1027.         }
  1028.  
  1029.         /*
  1030.          * The AUX_CTL register is usually DP_CTL + 0x10.
  1031.          *
  1032.          * On Haswell and Broadwell though:
  1033.          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
  1034.          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
  1035.          *
  1036.          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
  1037.          */
  1038.         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
  1039.                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
  1040.  
  1041.         intel_dp->aux.name = name;
  1042.         intel_dp->aux.dev = dev->dev;
  1043.         intel_dp->aux.transfer = intel_dp_aux_transfer;
  1044.  
  1045.         DRM_DEBUG_KMS("registering %s bus\n", name);
  1046.  
  1047.         ret = drm_dp_aux_register(&intel_dp->aux);
  1048.                 if (ret < 0) {
  1049.                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
  1050.                           name, ret);
  1051.                 return;
  1052.         }
  1053. }
  1054.  
  1055. static void
  1056. intel_dp_connector_unregister(struct intel_connector *intel_connector)
  1057. {
  1058.         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
  1059.  
  1060.         intel_connector_unregister(intel_connector);
  1061. }
  1062.  
  1063. static void
  1064. skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
  1065. {
  1066.         u32 ctrl1;
  1067.  
  1068.         pipe_config->ddi_pll_sel = SKL_DPLL0;
  1069.         pipe_config->dpll_hw_state.cfgcr1 = 0;
  1070.         pipe_config->dpll_hw_state.cfgcr2 = 0;
  1071.  
  1072.         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  1073.         switch (link_bw) {
  1074.         case DP_LINK_BW_1_62:
  1075.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
  1076.                                               SKL_DPLL0);
  1077.                 break;
  1078.         case DP_LINK_BW_2_7:
  1079.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
  1080.                                               SKL_DPLL0);
  1081.                 break;
  1082.         case DP_LINK_BW_5_4:
  1083.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
  1084.                                               SKL_DPLL0);
  1085.                 break;
  1086.         }
  1087.         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
  1088. }
  1089.  
  1090. static void
  1091. hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
  1092. {
  1093.         switch (link_bw) {
  1094.         case DP_LINK_BW_1_62:
  1095.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
  1096.                 break;
  1097.         case DP_LINK_BW_2_7:
  1098.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
  1099.                 break;
  1100.         case DP_LINK_BW_5_4:
  1101.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
  1102.                 break;
  1103.         }
  1104. }
  1105.  
  1106. static void
  1107. intel_dp_set_clock(struct intel_encoder *encoder,
  1108.                    struct intel_crtc_config *pipe_config, int link_bw)
  1109. {
  1110.         struct drm_device *dev = encoder->base.dev;
  1111.         const struct dp_link_dpll *divisor = NULL;
  1112.         int i, count = 0;
  1113.  
  1114.         if (IS_G4X(dev)) {
  1115.                 divisor = gen4_dpll;
  1116.                 count = ARRAY_SIZE(gen4_dpll);
  1117.         } else if (HAS_PCH_SPLIT(dev)) {
  1118.                 divisor = pch_dpll;
  1119.                 count = ARRAY_SIZE(pch_dpll);
  1120.         } else if (IS_CHERRYVIEW(dev)) {
  1121.                 divisor = chv_dpll;
  1122.                 count = ARRAY_SIZE(chv_dpll);
  1123.         } else if (IS_VALLEYVIEW(dev)) {
  1124.                 divisor = vlv_dpll;
  1125.                 count = ARRAY_SIZE(vlv_dpll);
  1126.         }
  1127.  
  1128.         if (divisor && count) {
  1129.                 for (i = 0; i < count; i++) {
  1130.                         if (link_bw == divisor[i].link_bw) {
  1131.                                 pipe_config->dpll = divisor[i].dpll;
  1132.                                 pipe_config->clock_set = true;
  1133.                                 break;
  1134.                         }
  1135.                 }
  1136.         }
  1137. }
  1138.  
  1139. bool
  1140. intel_dp_compute_config(struct intel_encoder *encoder,
  1141.                         struct intel_crtc_config *pipe_config)
  1142. {
  1143.         struct drm_device *dev = encoder->base.dev;
  1144.         struct drm_i915_private *dev_priv = dev->dev_private;
  1145.         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
  1146.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1147.         enum port port = dp_to_dig_port(intel_dp)->port;
  1148.         struct intel_crtc *intel_crtc = encoder->new_crtc;
  1149.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  1150.         int lane_count, clock;
  1151.         int min_lane_count = 1;
  1152.         int max_lane_count = intel_dp_max_lane_count(intel_dp);
  1153.         /* Conveniently, the link BW constants become indices with a shift...*/
  1154.         int min_clock = 0;
  1155.         int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
  1156.         int bpp, mode_rate;
  1157.         static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
  1158.         int link_avail, link_clock;
  1159.  
  1160.         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
  1161.                 pipe_config->has_pch_encoder = true;
  1162.  
  1163.         pipe_config->has_dp_encoder = true;
  1164.         pipe_config->has_drrs = false;
  1165.         pipe_config->has_audio = intel_dp->has_audio;
  1166.  
  1167.         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
  1168.                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
  1169.                                        adjusted_mode);
  1170.                 if (!HAS_PCH_SPLIT(dev))
  1171.                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
  1172.                                                  intel_connector->panel.fitting_mode);
  1173.                 else
  1174.                         intel_pch_panel_fitting(intel_crtc, pipe_config,
  1175.                                                 intel_connector->panel.fitting_mode);
  1176.         }
  1177.  
  1178.         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
  1179.                 return false;
  1180.  
  1181.         DRM_DEBUG_KMS("DP link computation with max lane count %i "
  1182.                       "max bw %02x pixel clock %iKHz\n",
  1183.                       max_lane_count, bws[max_clock],
  1184.                       adjusted_mode->crtc_clock);
  1185.  
  1186.         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
  1187.          * bpc in between. */
  1188.         bpp = pipe_config->pipe_bpp;
  1189.         if (is_edp(intel_dp)) {
  1190.                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
  1191.                 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
  1192.                               dev_priv->vbt.edp_bpp);
  1193.                 bpp = dev_priv->vbt.edp_bpp;
  1194.         }
  1195.  
  1196.                 /*
  1197.                  * Use the maximum clock and number of lanes the eDP panel
  1198.                  * advertizes being capable of. The panels are generally
  1199.                  * designed to support only a single clock and lane
  1200.                  * configuration, and typically these values correspond to the
  1201.                  * native resolution of the panel.
  1202.                  */
  1203.                         min_lane_count = max_lane_count;
  1204.                 min_clock = max_clock;
  1205.         }
  1206.  
  1207.         for (; bpp >= 6*3; bpp -= 2*3) {
  1208.                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
  1209.                                                    bpp);
  1210.  
  1211.                 for (clock = min_clock; clock <= max_clock; clock++) {
  1212.                 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
  1213.                                 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
  1214.                                 link_avail = intel_dp_max_data_rate(link_clock,
  1215.                                                                     lane_count);
  1216.  
  1217.                                 if (mode_rate <= link_avail) {
  1218.                                         goto found;
  1219.                                 }
  1220.                         }
  1221.                 }
  1222.         }
  1223.  
  1224.                 return false;
  1225.  
  1226. found:
  1227.         if (intel_dp->color_range_auto) {
  1228.                 /*
  1229.                  * See:
  1230.                  * CEA-861-E - 5.1 Default Encoding Parameters
  1231.                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
  1232.                  */
  1233.                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
  1234.                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
  1235.                 else
  1236.                         intel_dp->color_range = 0;
  1237.         }
  1238.  
  1239.         if (intel_dp->color_range)
  1240.                 pipe_config->limited_color_range = true;
  1241.  
  1242.                                 intel_dp->link_bw = bws[clock];
  1243.                                 intel_dp->lane_count = lane_count;
  1244.         pipe_config->pipe_bpp = bpp;
  1245.         pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
  1246.  
  1247.         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
  1248.                                        intel_dp->link_bw, intel_dp->lane_count,
  1249.                       pipe_config->port_clock, bpp);
  1250.                                 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
  1251.                                               mode_rate, link_avail);
  1252.  
  1253.         intel_link_compute_m_n(bpp, lane_count,
  1254.                                adjusted_mode->crtc_clock,
  1255.                                pipe_config->port_clock,
  1256.                                &pipe_config->dp_m_n);
  1257.  
  1258.         if (intel_connector->panel.downclock_mode != NULL &&
  1259.                 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
  1260.                         pipe_config->has_drrs = true;
  1261.                         intel_link_compute_m_n(bpp, lane_count,
  1262.                                 intel_connector->panel.downclock_mode->clock,
  1263.                                 pipe_config->port_clock,
  1264.                                 &pipe_config->dp_m2_n2);
  1265.         }
  1266.  
  1267.         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
  1268.                 skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
  1269.         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  1270.                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
  1271.         else
  1272.         intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
  1273.  
  1274.         return true;
  1275. }
  1276.  
  1277. static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
  1278. {
  1279.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  1280.         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  1281.         struct drm_device *dev = crtc->base.dev;
  1282.         struct drm_i915_private *dev_priv = dev->dev_private;
  1283.         u32 dpa_ctl;
  1284.  
  1285.         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
  1286.         dpa_ctl = I915_READ(DP_A);
  1287.         dpa_ctl &= ~DP_PLL_FREQ_MASK;
  1288.  
  1289.         if (crtc->config.port_clock == 162000) {
  1290.                 /* For a long time we've carried around a ILK-DevA w/a for the
  1291.                  * 160MHz clock. If we're really unlucky, it's still required.
  1292.                  */
  1293.                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
  1294.                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
  1295.                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
  1296.         } else {
  1297.                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
  1298.                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
  1299.         }
  1300.  
  1301.         I915_WRITE(DP_A, dpa_ctl);
  1302.  
  1303.         POSTING_READ(DP_A);
  1304.         udelay(500);
  1305. }
  1306.  
  1307. static void intel_dp_prepare(struct intel_encoder *encoder)
  1308. {
  1309.         struct drm_device *dev = encoder->base.dev;
  1310.         struct drm_i915_private *dev_priv = dev->dev_private;
  1311.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1312.         enum port port = dp_to_dig_port(intel_dp)->port;
  1313.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  1314.         struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
  1315.  
  1316.         /*
  1317.          * There are four kinds of DP registers:
  1318.          *
  1319.          *      IBX PCH
  1320.          *      SNB CPU
  1321.          *      IVB CPU
  1322.          *      CPT PCH
  1323.          *
  1324.          * IBX PCH and CPU are the same for almost everything,
  1325.          * except that the CPU DP PLL is configured in this
  1326.          * register
  1327.          *
  1328.          * CPT PCH is quite different, having many bits moved
  1329.          * to the TRANS_DP_CTL register instead. That
  1330.          * configuration happens (oddly) in ironlake_pch_enable
  1331.          */
  1332.  
  1333.         /* Preserve the BIOS-computed detected bit. This is
  1334.          * supposed to be read-only.
  1335.          */
  1336.         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  1337.  
  1338.         /* Handle DP bits in common between all three register formats */
  1339.         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  1340.         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
  1341.  
  1342.         if (crtc->config.has_audio)
  1343.                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
  1344.  
  1345.         /* Split out the IBX/CPU vs CPT settings */
  1346.  
  1347.         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1348.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  1349.                         intel_dp->DP |= DP_SYNC_HS_HIGH;
  1350.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  1351.                         intel_dp->DP |= DP_SYNC_VS_HIGH;
  1352.                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  1353.  
  1354.                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  1355.                         intel_dp->DP |= DP_ENHANCED_FRAMING;
  1356.  
  1357.                 intel_dp->DP |= crtc->pipe << 29;
  1358.         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
  1359.                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
  1360.                 intel_dp->DP |= intel_dp->color_range;
  1361.  
  1362.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  1363.                         intel_dp->DP |= DP_SYNC_HS_HIGH;
  1364.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  1365.                         intel_dp->DP |= DP_SYNC_VS_HIGH;
  1366.                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
  1367.  
  1368.                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  1369.                 intel_dp->DP |= DP_ENHANCED_FRAMING;
  1370.  
  1371.                 if (!IS_CHERRYVIEW(dev)) {
  1372.                 if (crtc->pipe == 1)
  1373.                 intel_dp->DP |= DP_PIPEB_SELECT;
  1374.         } else {
  1375.                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
  1376.                 }
  1377.         } else {
  1378.                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  1379.         }
  1380. }
  1381.  
  1382. #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
  1383. #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
  1384.  
  1385. #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
  1386. #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
  1387.  
  1388. #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
  1389. #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
  1390.  
  1391. static void wait_panel_status(struct intel_dp *intel_dp,
  1392.                                        u32 mask,
  1393.                                        u32 value)
  1394. {
  1395.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1396.         struct drm_i915_private *dev_priv = dev->dev_private;
  1397.         u32 pp_stat_reg, pp_ctrl_reg;
  1398.  
  1399.         lockdep_assert_held(&dev_priv->pps_mutex);
  1400.  
  1401.         pp_stat_reg = _pp_stat_reg(intel_dp);
  1402.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1403.  
  1404.         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
  1405.                       mask, value,
  1406.                         I915_READ(pp_stat_reg),
  1407.                         I915_READ(pp_ctrl_reg));
  1408.  
  1409.         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
  1410.                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
  1411.                                 I915_READ(pp_stat_reg),
  1412.                                 I915_READ(pp_ctrl_reg));
  1413.         }
  1414.  
  1415.         DRM_DEBUG_KMS("Wait complete\n");
  1416. }
  1417.  
  1418. static void wait_panel_on(struct intel_dp *intel_dp)
  1419. {
  1420.         DRM_DEBUG_KMS("Wait for panel power on\n");
  1421.         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
  1422. }
  1423.  
  1424. static void wait_panel_off(struct intel_dp *intel_dp)
  1425. {
  1426.         DRM_DEBUG_KMS("Wait for panel power off time\n");
  1427.         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
  1428. }
  1429.  
  1430. static void wait_panel_power_cycle(struct intel_dp *intel_dp)
  1431. {
  1432.         DRM_DEBUG_KMS("Wait for panel power cycle\n");
  1433.  
  1434.         /* When we disable the VDD override bit last we have to do the manual
  1435.          * wait. */
  1436.         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
  1437.                                        intel_dp->panel_power_cycle_delay);
  1438.  
  1439.         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
  1440. }
  1441.  
  1442. static void wait_backlight_on(struct intel_dp *intel_dp)
  1443. {
  1444.         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
  1445.                                        intel_dp->backlight_on_delay);
  1446. }
  1447.  
  1448. static void edp_wait_backlight_off(struct intel_dp *intel_dp)
  1449. {
  1450.         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
  1451.                                        intel_dp->backlight_off_delay);
  1452. }
  1453.  
  1454. /* Read the current pp_control value, unlocking the register if it
  1455.  * is locked
  1456.  */
  1457.  
  1458. static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
  1459. {
  1460.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1461.         struct drm_i915_private *dev_priv = dev->dev_private;
  1462.         u32 control;
  1463.  
  1464.         lockdep_assert_held(&dev_priv->pps_mutex);
  1465.  
  1466.         control = I915_READ(_pp_ctrl_reg(intel_dp));
  1467.         control &= ~PANEL_UNLOCK_MASK;
  1468.         control |= PANEL_UNLOCK_REGS;
  1469.         return control;
  1470. }
  1471.  
  1472. /*
  1473.  * Must be paired with edp_panel_vdd_off().
  1474.  * Must hold pps_mutex around the whole on/off sequence.
  1475.  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
  1476.  */
  1477. static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  1478. {
  1479.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1480.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1481.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1482.         struct drm_i915_private *dev_priv = dev->dev_private;
  1483.         enum intel_display_power_domain power_domain;
  1484.         u32 pp;
  1485.         u32 pp_stat_reg, pp_ctrl_reg;
  1486.         bool need_to_disable = !intel_dp->want_panel_vdd;
  1487.  
  1488.         lockdep_assert_held(&dev_priv->pps_mutex);
  1489.  
  1490.         if (!is_edp(intel_dp))
  1491.                 return false;
  1492.  
  1493.         intel_dp->want_panel_vdd = true;
  1494.  
  1495.         if (edp_have_panel_vdd(intel_dp))
  1496.                 return need_to_disable;
  1497.  
  1498.         power_domain = intel_display_port_power_domain(intel_encoder);
  1499.         intel_display_power_get(dev_priv, power_domain);
  1500.  
  1501.         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
  1502.                       port_name(intel_dig_port->port));
  1503.  
  1504.         if (!edp_have_panel_power(intel_dp))
  1505.                 wait_panel_power_cycle(intel_dp);
  1506.  
  1507.         pp = ironlake_get_pp_control(intel_dp);
  1508.         pp |= EDP_FORCE_VDD;
  1509.  
  1510.         pp_stat_reg = _pp_stat_reg(intel_dp);
  1511.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1512.  
  1513.         I915_WRITE(pp_ctrl_reg, pp);
  1514.         POSTING_READ(pp_ctrl_reg);
  1515.         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1516.                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1517.         /*
  1518.          * If the panel wasn't on, delay before accessing aux channel
  1519.          */
  1520.         if (!edp_have_panel_power(intel_dp)) {
  1521.                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
  1522.                               port_name(intel_dig_port->port));
  1523.                 msleep(intel_dp->panel_power_up_delay);
  1524.         }
  1525.  
  1526.         return need_to_disable;
  1527. }
  1528.  
  1529. /*
  1530.  * Must be paired with intel_edp_panel_vdd_off() or
  1531.  * intel_edp_panel_off().
  1532.  * Nested calls to these functions are not allowed since
  1533.  * we drop the lock. Caller must use some higher level
  1534.  * locking to prevent nested calls from other threads.
  1535.  */
  1536. void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
  1537. {
  1538.         bool vdd;
  1539.  
  1540.         if (!is_edp(intel_dp))
  1541.                 return;
  1542.  
  1543.         pps_lock(intel_dp);
  1544.         vdd = edp_panel_vdd_on(intel_dp);
  1545.         pps_unlock(intel_dp);
  1546.  
  1547.         WARN(!vdd, "eDP port %c VDD already requested on\n",
  1548.              port_name(dp_to_dig_port(intel_dp)->port));
  1549. }
  1550.  
  1551. static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  1552. {
  1553.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1554.         struct drm_i915_private *dev_priv = dev->dev_private;
  1555.         struct intel_digital_port *intel_dig_port =
  1556.                 dp_to_dig_port(intel_dp);
  1557.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1558.         enum intel_display_power_domain power_domain;
  1559.         u32 pp;
  1560.         u32 pp_stat_reg, pp_ctrl_reg;
  1561.  
  1562.         lockdep_assert_held(&dev_priv->pps_mutex);
  1563.  
  1564.         WARN_ON(intel_dp->want_panel_vdd);
  1565.  
  1566.         if (!edp_have_panel_vdd(intel_dp))
  1567.                 return;
  1568.  
  1569.         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
  1570.                       port_name(intel_dig_port->port));
  1571.  
  1572.                 pp = ironlake_get_pp_control(intel_dp);
  1573.         pp &= ~EDP_FORCE_VDD;
  1574.  
  1575.                 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1576.                 pp_stat_reg = _pp_stat_reg(intel_dp);
  1577.  
  1578.                 I915_WRITE(pp_ctrl_reg, pp);
  1579.                 POSTING_READ(pp_ctrl_reg);
  1580.  
  1581.         /* Make sure sequencer is idle before allowing subsequent activity */
  1582.                 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1583.                 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1584.  
  1585.                 if ((pp & POWER_TARGET_ON) == 0)
  1586.                         intel_dp->last_power_cycle = jiffies;
  1587.  
  1588.                 power_domain = intel_display_port_power_domain(intel_encoder);
  1589.                 intel_display_power_put(dev_priv, power_domain);
  1590. }
  1591.  
  1592. static void edp_panel_vdd_work(struct work_struct *__work)
  1593. {
  1594.         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
  1595.                                                  struct intel_dp, panel_vdd_work);
  1596.  
  1597.         pps_lock(intel_dp);
  1598.         if (!intel_dp->want_panel_vdd)
  1599.         edp_panel_vdd_off_sync(intel_dp);
  1600.         pps_unlock(intel_dp);
  1601. }
  1602.  
  1603. static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
  1604. {
  1605.         unsigned long delay;
  1606.  
  1607.         /*
  1608.          * Queue the timer to fire a long time from now (relative to the power
  1609.          * down delay) to keep the panel power up across a sequence of
  1610.          * operations.
  1611.          */
  1612.         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
  1613. //   schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
  1614. }
  1615.  
  1616. /*
  1617.  * Must be paired with edp_panel_vdd_on().
  1618.  * Must hold pps_mutex around the whole on/off sequence.
  1619.  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
  1620.  */
  1621. static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  1622. {
  1623.         struct drm_i915_private *dev_priv =
  1624.                 intel_dp_to_dev(intel_dp)->dev_private;
  1625.  
  1626.         lockdep_assert_held(&dev_priv->pps_mutex);
  1627.  
  1628.         if (!is_edp(intel_dp))
  1629.                 return;
  1630.  
  1631.         WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
  1632.              port_name(dp_to_dig_port(intel_dp)->port));
  1633.  
  1634.         intel_dp->want_panel_vdd = false;
  1635.  
  1636.         if (sync)
  1637.                 edp_panel_vdd_off_sync(intel_dp);
  1638.         else
  1639.                 edp_panel_vdd_schedule_off(intel_dp);
  1640. }
  1641.  
  1642. static void edp_panel_on(struct intel_dp *intel_dp)
  1643. {
  1644.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1645.         struct drm_i915_private *dev_priv = dev->dev_private;
  1646.         u32 pp;
  1647.         u32 pp_ctrl_reg;
  1648.  
  1649.         lockdep_assert_held(&dev_priv->pps_mutex);
  1650.  
  1651.         if (!is_edp(intel_dp))
  1652.                 return;
  1653.  
  1654.         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
  1655.                       port_name(dp_to_dig_port(intel_dp)->port));
  1656.  
  1657.         if (WARN(edp_have_panel_power(intel_dp),
  1658.                  "eDP port %c panel power already on\n",
  1659.                  port_name(dp_to_dig_port(intel_dp)->port)))
  1660.                 return;
  1661.  
  1662.         wait_panel_power_cycle(intel_dp);
  1663.  
  1664.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1665.         pp = ironlake_get_pp_control(intel_dp);
  1666.         if (IS_GEN5(dev)) {
  1667.         /* ILK workaround: disable reset around power sequence */
  1668.         pp &= ~PANEL_POWER_RESET;
  1669.                 I915_WRITE(pp_ctrl_reg, pp);
  1670.                 POSTING_READ(pp_ctrl_reg);
  1671.         }
  1672.  
  1673.         pp |= POWER_TARGET_ON;
  1674.         if (!IS_GEN5(dev))
  1675.                 pp |= PANEL_POWER_RESET;
  1676.  
  1677.         I915_WRITE(pp_ctrl_reg, pp);
  1678.         POSTING_READ(pp_ctrl_reg);
  1679.  
  1680.         wait_panel_on(intel_dp);
  1681.         intel_dp->last_power_on = jiffies;
  1682.  
  1683.         if (IS_GEN5(dev)) {
  1684.         pp |= PANEL_POWER_RESET; /* restore panel reset bit */
  1685.                 I915_WRITE(pp_ctrl_reg, pp);
  1686.                 POSTING_READ(pp_ctrl_reg);
  1687.         }
  1688. }
  1689.  
  1690. void intel_edp_panel_on(struct intel_dp *intel_dp)
  1691. {
  1692.         if (!is_edp(intel_dp))
  1693.                 return;
  1694.  
  1695.         pps_lock(intel_dp);
  1696.         edp_panel_on(intel_dp);
  1697.         pps_unlock(intel_dp);
  1698. }
  1699.  
  1700.  
  1701. static void edp_panel_off(struct intel_dp *intel_dp)
  1702. {
  1703.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1704.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1705.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1706.         struct drm_i915_private *dev_priv = dev->dev_private;
  1707.         enum intel_display_power_domain power_domain;
  1708.         u32 pp;
  1709.         u32 pp_ctrl_reg;
  1710.  
  1711.         lockdep_assert_held(&dev_priv->pps_mutex);
  1712.  
  1713.         if (!is_edp(intel_dp))
  1714.                 return;
  1715.  
  1716.         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
  1717.                       port_name(dp_to_dig_port(intel_dp)->port));
  1718.  
  1719.         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
  1720.              port_name(dp_to_dig_port(intel_dp)->port));
  1721.  
  1722.         pp = ironlake_get_pp_control(intel_dp);
  1723.         /* We need to switch off panel power _and_ force vdd, for otherwise some
  1724.          * panels get very unhappy and cease to work. */
  1725.         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
  1726.                 EDP_BLC_ENABLE);
  1727.  
  1728.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1729.  
  1730.         intel_dp->want_panel_vdd = false;
  1731.  
  1732.         I915_WRITE(pp_ctrl_reg, pp);
  1733.         POSTING_READ(pp_ctrl_reg);
  1734.  
  1735.         intel_dp->last_power_cycle = jiffies;
  1736.         wait_panel_off(intel_dp);
  1737.  
  1738.         /* We got a reference when we enabled the VDD. */
  1739.         power_domain = intel_display_port_power_domain(intel_encoder);
  1740.         intel_display_power_put(dev_priv, power_domain);
  1741. }
  1742.  
  1743. void intel_edp_panel_off(struct intel_dp *intel_dp)
  1744. {
  1745.         if (!is_edp(intel_dp))
  1746.                 return;
  1747.  
  1748.         pps_lock(intel_dp);
  1749.         edp_panel_off(intel_dp);
  1750.         pps_unlock(intel_dp);
  1751. }
  1752.  
  1753. /* Enable backlight in the panel power control. */
  1754. static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
  1755. {
  1756.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1757.         struct drm_device *dev = intel_dig_port->base.base.dev;
  1758.         struct drm_i915_private *dev_priv = dev->dev_private;
  1759.         u32 pp;
  1760.         u32 pp_ctrl_reg;
  1761.  
  1762.         /*
  1763.          * If we enable the backlight right away following a panel power
  1764.          * on, we may see slight flicker as the panel syncs with the eDP
  1765.          * link.  So delay a bit to make sure the image is solid before
  1766.          * allowing it to appear.
  1767.          */
  1768.         wait_backlight_on(intel_dp);
  1769.  
  1770.         pps_lock(intel_dp);
  1771.  
  1772.         pp = ironlake_get_pp_control(intel_dp);
  1773.         pp |= EDP_BLC_ENABLE;
  1774.  
  1775.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1776.  
  1777.         I915_WRITE(pp_ctrl_reg, pp);
  1778.         POSTING_READ(pp_ctrl_reg);
  1779.  
  1780.         pps_unlock(intel_dp);
  1781. }
  1782.  
  1783. /* Enable backlight PWM and backlight PP control. */
  1784. void intel_edp_backlight_on(struct intel_dp *intel_dp)
  1785. {
  1786.         if (!is_edp(intel_dp))
  1787.                 return;
  1788.  
  1789.         DRM_DEBUG_KMS("\n");
  1790.  
  1791.         intel_panel_enable_backlight(intel_dp->attached_connector);
  1792.         _intel_edp_backlight_on(intel_dp);
  1793. }
  1794.  
  1795. /* Disable backlight in the panel power control. */
  1796. static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
  1797. {
  1798.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1799.         struct drm_i915_private *dev_priv = dev->dev_private;
  1800.         u32 pp;
  1801.         u32 pp_ctrl_reg;
  1802.  
  1803.         if (!is_edp(intel_dp))
  1804.                 return;
  1805.  
  1806.         pps_lock(intel_dp);
  1807.  
  1808.         pp = ironlake_get_pp_control(intel_dp);
  1809.         pp &= ~EDP_BLC_ENABLE;
  1810.  
  1811.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1812.  
  1813.         I915_WRITE(pp_ctrl_reg, pp);
  1814.         POSTING_READ(pp_ctrl_reg);
  1815.  
  1816.         pps_unlock(intel_dp);
  1817.  
  1818.         intel_dp->last_backlight_off = jiffies;
  1819.         edp_wait_backlight_off(intel_dp);
  1820. }
  1821.  
  1822. /* Disable backlight PP control and backlight PWM. */
  1823. void intel_edp_backlight_off(struct intel_dp *intel_dp)
  1824. {
  1825.         if (!is_edp(intel_dp))
  1826.                 return;
  1827.  
  1828.         DRM_DEBUG_KMS("\n");
  1829.  
  1830.         _intel_edp_backlight_off(intel_dp);
  1831.         intel_panel_disable_backlight(intel_dp->attached_connector);
  1832. }
  1833.  
  1834. /*
  1835.  * Hook for controlling the panel power control backlight through the bl_power
  1836.  * sysfs attribute. Take care to handle multiple calls.
  1837.  */
  1838. static void intel_edp_backlight_power(struct intel_connector *connector,
  1839.                                       bool enable)
  1840. {
  1841.         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
  1842.         bool is_enabled;
  1843.  
  1844.         pps_lock(intel_dp);
  1845.         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
  1846.         pps_unlock(intel_dp);
  1847.  
  1848.         if (is_enabled == enable)
  1849.                 return;
  1850.  
  1851.         DRM_DEBUG_KMS("panel power control backlight %s\n",
  1852.                       enable ? "enable" : "disable");
  1853.  
  1854.         if (enable)
  1855.                 _intel_edp_backlight_on(intel_dp);
  1856.         else
  1857.                 _intel_edp_backlight_off(intel_dp);
  1858. }
  1859.  
  1860. static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
  1861. {
  1862.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1863.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1864.         struct drm_device *dev = crtc->dev;
  1865.         struct drm_i915_private *dev_priv = dev->dev_private;
  1866.         u32 dpa_ctl;
  1867.  
  1868.         assert_pipe_disabled(dev_priv,
  1869.                              to_intel_crtc(crtc)->pipe);
  1870.  
  1871.         DRM_DEBUG_KMS("\n");
  1872.         dpa_ctl = I915_READ(DP_A);
  1873.         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
  1874.         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1875.  
  1876.         /* We don't adjust intel_dp->DP while tearing down the link, to
  1877.          * facilitate link retraining (e.g. after hotplug). Hence clear all
  1878.          * enable bits here to ensure that we don't enable too much. */
  1879.         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
  1880.         intel_dp->DP |= DP_PLL_ENABLE;
  1881.         I915_WRITE(DP_A, intel_dp->DP);
  1882.         POSTING_READ(DP_A);
  1883.         udelay(200);
  1884. }
  1885.  
  1886. static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
  1887. {
  1888.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1889.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1890.         struct drm_device *dev = crtc->dev;
  1891.         struct drm_i915_private *dev_priv = dev->dev_private;
  1892.         u32 dpa_ctl;
  1893.  
  1894.         assert_pipe_disabled(dev_priv,
  1895.                              to_intel_crtc(crtc)->pipe);
  1896.  
  1897.         dpa_ctl = I915_READ(DP_A);
  1898.         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
  1899.              "dp pll off, should be on\n");
  1900.         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1901.  
  1902.         /* We can't rely on the value tracked for the DP register in
  1903.          * intel_dp->DP because link_down must not change that (otherwise link
  1904.          * re-training will fail. */
  1905.         dpa_ctl &= ~DP_PLL_ENABLE;
  1906.         I915_WRITE(DP_A, dpa_ctl);
  1907.         POSTING_READ(DP_A);
  1908.         udelay(200);
  1909. }
  1910.  
  1911. /* If the sink supports it, try to set the power state appropriately */
  1912. void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
  1913. {
  1914.         int ret, i;
  1915.  
  1916.         /* Should have a valid DPCD by this point */
  1917.         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
  1918.                 return;
  1919.  
  1920.         if (mode != DRM_MODE_DPMS_ON) {
  1921.                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  1922.                                                   DP_SET_POWER_D3);
  1923.         } else {
  1924.                 /*
  1925.                  * When turning on, we need to retry for 1ms to give the sink
  1926.                  * time to wake up.
  1927.                  */
  1928.                 for (i = 0; i < 3; i++) {
  1929.                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  1930.                                                           DP_SET_POWER_D0);
  1931.                         if (ret == 1)
  1932.                                 break;
  1933.                         msleep(1);
  1934.                 }
  1935.         }
  1936.  
  1937.         if (ret != 1)
  1938.                 DRM_DEBUG_KMS("failed to %s sink power state\n",
  1939.                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
  1940. }
  1941.  
  1942. static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
  1943.                                   enum pipe *pipe)
  1944. {
  1945.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1946.         enum port port = dp_to_dig_port(intel_dp)->port;
  1947.         struct drm_device *dev = encoder->base.dev;
  1948.         struct drm_i915_private *dev_priv = dev->dev_private;
  1949.         enum intel_display_power_domain power_domain;
  1950.         u32 tmp;
  1951.  
  1952.         power_domain = intel_display_port_power_domain(encoder);
  1953.         if (!intel_display_power_is_enabled(dev_priv, power_domain))
  1954.                 return false;
  1955.  
  1956.         tmp = I915_READ(intel_dp->output_reg);
  1957.  
  1958.         if (!(tmp & DP_PORT_EN))
  1959.                 return false;
  1960.  
  1961.         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1962.                 *pipe = PORT_TO_PIPE_CPT(tmp);
  1963.         } else if (IS_CHERRYVIEW(dev)) {
  1964.                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
  1965.         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
  1966.                 *pipe = PORT_TO_PIPE(tmp);
  1967.         } else {
  1968.                 u32 trans_sel;
  1969.                 u32 trans_dp;
  1970.                 int i;
  1971.  
  1972.                 switch (intel_dp->output_reg) {
  1973.                 case PCH_DP_B:
  1974.                         trans_sel = TRANS_DP_PORT_SEL_B;
  1975.                         break;
  1976.                 case PCH_DP_C:
  1977.                         trans_sel = TRANS_DP_PORT_SEL_C;
  1978.                         break;
  1979.                 case PCH_DP_D:
  1980.                         trans_sel = TRANS_DP_PORT_SEL_D;
  1981.                         break;
  1982.                 default:
  1983.                         return true;
  1984.                 }
  1985.  
  1986.                 for_each_pipe(dev_priv, i) {
  1987.                         trans_dp = I915_READ(TRANS_DP_CTL(i));
  1988.                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
  1989.                                 *pipe = i;
  1990.                                 return true;
  1991.                         }
  1992.                 }
  1993.  
  1994.                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
  1995.                               intel_dp->output_reg);
  1996.         }
  1997.  
  1998.         return true;
  1999. }
  2000.  
  2001. static void intel_dp_get_config(struct intel_encoder *encoder,
  2002.                                 struct intel_crtc_config *pipe_config)
  2003. {
  2004.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2005.         u32 tmp, flags = 0;
  2006.         struct drm_device *dev = encoder->base.dev;
  2007.         struct drm_i915_private *dev_priv = dev->dev_private;
  2008.         enum port port = dp_to_dig_port(intel_dp)->port;
  2009.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2010.         int dotclock;
  2011.  
  2012.         tmp = I915_READ(intel_dp->output_reg);
  2013.         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
  2014.                 pipe_config->has_audio = true;
  2015.  
  2016.         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
  2017.                 if (tmp & DP_SYNC_HS_HIGH)
  2018.                         flags |= DRM_MODE_FLAG_PHSYNC;
  2019.                 else
  2020.                         flags |= DRM_MODE_FLAG_NHSYNC;
  2021.  
  2022.                 if (tmp & DP_SYNC_VS_HIGH)
  2023.                         flags |= DRM_MODE_FLAG_PVSYNC;
  2024.                 else
  2025.                         flags |= DRM_MODE_FLAG_NVSYNC;
  2026.         } else {
  2027.                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
  2028.                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
  2029.                         flags |= DRM_MODE_FLAG_PHSYNC;
  2030.                 else
  2031.                         flags |= DRM_MODE_FLAG_NHSYNC;
  2032.  
  2033.                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
  2034.                         flags |= DRM_MODE_FLAG_PVSYNC;
  2035.                 else
  2036.                         flags |= DRM_MODE_FLAG_NVSYNC;
  2037.         }
  2038.  
  2039.         pipe_config->adjusted_mode.flags |= flags;
  2040.  
  2041.         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
  2042.             tmp & DP_COLOR_RANGE_16_235)
  2043.                 pipe_config->limited_color_range = true;
  2044.  
  2045.         pipe_config->has_dp_encoder = true;
  2046.  
  2047.         intel_dp_get_m_n(crtc, pipe_config);
  2048.  
  2049.         if (port == PORT_A) {
  2050.                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
  2051.                         pipe_config->port_clock = 162000;
  2052.                 else
  2053.                         pipe_config->port_clock = 270000;
  2054.         }
  2055.  
  2056.         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
  2057.                                             &pipe_config->dp_m_n);
  2058.  
  2059.         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
  2060.                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
  2061.  
  2062.         pipe_config->adjusted_mode.crtc_clock = dotclock;
  2063.  
  2064.         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
  2065.             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
  2066.                 /*
  2067.                  * This is a big fat ugly hack.
  2068.                  *
  2069.                  * Some machines in UEFI boot mode provide us a VBT that has 18
  2070.                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
  2071.                  * unknown we fail to light up. Yet the same BIOS boots up with
  2072.                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
  2073.                  * max, not what it tells us to use.
  2074.                  *
  2075.                  * Note: This will still be broken if the eDP panel is not lit
  2076.                  * up by the BIOS, and thus we can't get the mode at module
  2077.                  * load.
  2078.                  */
  2079.                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
  2080.                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
  2081.                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
  2082.         }
  2083. }
  2084.  
  2085. static void intel_disable_dp(struct intel_encoder *encoder)
  2086. {
  2087.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2088.         struct drm_device *dev = encoder->base.dev;
  2089.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2090.  
  2091.         if (crtc->config.has_audio)
  2092.                 intel_audio_codec_disable(encoder);
  2093.  
  2094.         /* Make sure the panel is off before trying to change the mode. But also
  2095.          * ensure that we have vdd while we switch off the panel. */
  2096.         intel_edp_panel_vdd_on(intel_dp);
  2097.         intel_edp_backlight_off(intel_dp);
  2098.         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
  2099.         intel_edp_panel_off(intel_dp);
  2100.  
  2101.         /* disable the port before the pipe on g4x */
  2102.         if (INTEL_INFO(dev)->gen < 5)
  2103.                 intel_dp_link_down(intel_dp);
  2104. }
  2105.  
  2106. static void ilk_post_disable_dp(struct intel_encoder *encoder)
  2107. {
  2108.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2109.         enum port port = dp_to_dig_port(intel_dp)->port;
  2110.  
  2111.         intel_dp_link_down(intel_dp);
  2112.         if (port == PORT_A)
  2113.         ironlake_edp_pll_off(intel_dp);
  2114. }
  2115.  
  2116. static void vlv_post_disable_dp(struct intel_encoder *encoder)
  2117. {
  2118.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2119.  
  2120.         intel_dp_link_down(intel_dp);
  2121. }
  2122.  
  2123. static void chv_post_disable_dp(struct intel_encoder *encoder)
  2124. {
  2125.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2126.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2127.         struct drm_device *dev = encoder->base.dev;
  2128.         struct drm_i915_private *dev_priv = dev->dev_private;
  2129.         struct intel_crtc *intel_crtc =
  2130.                 to_intel_crtc(encoder->base.crtc);
  2131.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2132.         enum pipe pipe = intel_crtc->pipe;
  2133.         u32 val;
  2134.  
  2135.                 intel_dp_link_down(intel_dp);
  2136.  
  2137.         mutex_lock(&dev_priv->dpio_lock);
  2138.  
  2139.         /* Propagate soft reset to data lane reset */
  2140.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  2141.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2142.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  2143.  
  2144.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  2145.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2146.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  2147.  
  2148.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  2149.         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2150.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  2151.  
  2152.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  2153.         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2154.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  2155.  
  2156.         mutex_unlock(&dev_priv->dpio_lock);
  2157. }
  2158.  
  2159. static void
  2160. _intel_dp_set_link_train(struct intel_dp *intel_dp,
  2161.                          uint32_t *DP,
  2162.                          uint8_t dp_train_pat)
  2163. {
  2164.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2165.         struct drm_device *dev = intel_dig_port->base.base.dev;
  2166.         struct drm_i915_private *dev_priv = dev->dev_private;
  2167.         enum port port = intel_dig_port->port;
  2168.  
  2169.         if (HAS_DDI(dev)) {
  2170.                 uint32_t temp = I915_READ(DP_TP_CTL(port));
  2171.  
  2172.                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
  2173.                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
  2174.                 else
  2175.                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
  2176.  
  2177.                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  2178.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2179.                 case DP_TRAINING_PATTERN_DISABLE:
  2180.                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
  2181.  
  2182.                         break;
  2183.                 case DP_TRAINING_PATTERN_1:
  2184.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
  2185.                         break;
  2186.                 case DP_TRAINING_PATTERN_2:
  2187.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
  2188.                         break;
  2189.                 case DP_TRAINING_PATTERN_3:
  2190.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
  2191.                         break;
  2192.                 }
  2193.                 I915_WRITE(DP_TP_CTL(port), temp);
  2194.  
  2195.         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
  2196.                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
  2197.  
  2198.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2199.                 case DP_TRAINING_PATTERN_DISABLE:
  2200.                         *DP |= DP_LINK_TRAIN_OFF_CPT;
  2201.                         break;
  2202.                 case DP_TRAINING_PATTERN_1:
  2203.                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
  2204.                         break;
  2205.                 case DP_TRAINING_PATTERN_2:
  2206.                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
  2207.                         break;
  2208.                 case DP_TRAINING_PATTERN_3:
  2209.                         DRM_ERROR("DP training pattern 3 not supported\n");
  2210.                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
  2211.                         break;
  2212.                 }
  2213.  
  2214.         } else {
  2215.                 if (IS_CHERRYVIEW(dev))
  2216.                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
  2217.                 else
  2218.                         *DP &= ~DP_LINK_TRAIN_MASK;
  2219.  
  2220.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2221.                 case DP_TRAINING_PATTERN_DISABLE:
  2222.                         *DP |= DP_LINK_TRAIN_OFF;
  2223.                         break;
  2224.                 case DP_TRAINING_PATTERN_1:
  2225.                         *DP |= DP_LINK_TRAIN_PAT_1;
  2226.                         break;
  2227.                 case DP_TRAINING_PATTERN_2:
  2228.                         *DP |= DP_LINK_TRAIN_PAT_2;
  2229.                         break;
  2230.                 case DP_TRAINING_PATTERN_3:
  2231.                         if (IS_CHERRYVIEW(dev)) {
  2232.                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
  2233.                         } else {
  2234.                                 DRM_ERROR("DP training pattern 3 not supported\n");
  2235.                                 *DP |= DP_LINK_TRAIN_PAT_2;
  2236.                         }
  2237.                         break;
  2238.                 }
  2239.         }
  2240. }
  2241.  
  2242. static void intel_dp_enable_port(struct intel_dp *intel_dp)
  2243. {
  2244.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2245.         struct drm_i915_private *dev_priv = dev->dev_private;
  2246.  
  2247.         /* enable with pattern 1 (as per spec) */
  2248.         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
  2249.                                  DP_TRAINING_PATTERN_1);
  2250.  
  2251.         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
  2252.         POSTING_READ(intel_dp->output_reg);
  2253.  
  2254.         /*
  2255.          * Magic for VLV/CHV. We _must_ first set up the register
  2256.          * without actually enabling the port, and then do another
  2257.          * write to enable the port. Otherwise link training will
  2258.          * fail when the power sequencer is freshly used for this port.
  2259.          */
  2260.         intel_dp->DP |= DP_PORT_EN;
  2261.  
  2262.         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
  2263.         POSTING_READ(intel_dp->output_reg);
  2264. }
  2265.  
  2266. static void intel_enable_dp(struct intel_encoder *encoder)
  2267. {
  2268.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2269.         struct drm_device *dev = encoder->base.dev;
  2270.         struct drm_i915_private *dev_priv = dev->dev_private;
  2271.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2272.         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
  2273.  
  2274.         if (WARN_ON(dp_reg & DP_PORT_EN))
  2275.                 return;
  2276.  
  2277.         pps_lock(intel_dp);
  2278.  
  2279.         if (IS_VALLEYVIEW(dev))
  2280.                 vlv_init_panel_power_sequencer(intel_dp);
  2281.  
  2282.         intel_dp_enable_port(intel_dp);
  2283.  
  2284.         edp_panel_vdd_on(intel_dp);
  2285.         edp_panel_on(intel_dp);
  2286.         edp_panel_vdd_off(intel_dp, true);
  2287.  
  2288.         pps_unlock(intel_dp);
  2289.  
  2290.         if (IS_VALLEYVIEW(dev))
  2291.                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
  2292.  
  2293.         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
  2294.                         intel_dp_start_link_train(intel_dp);
  2295.                         intel_dp_complete_link_train(intel_dp);
  2296.         intel_dp_stop_link_train(intel_dp);
  2297.  
  2298.         if (crtc->config.has_audio) {
  2299.                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
  2300.                                  pipe_name(crtc->pipe));
  2301.                 intel_audio_codec_enable(encoder);
  2302.         }
  2303. }
  2304.  
  2305. static void g4x_enable_dp(struct intel_encoder *encoder)
  2306. {
  2307.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2308.  
  2309.         intel_enable_dp(encoder);
  2310.         intel_edp_backlight_on(intel_dp);
  2311. }
  2312.  
  2313. static void vlv_enable_dp(struct intel_encoder *encoder)
  2314. {
  2315.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2316.  
  2317.         intel_edp_backlight_on(intel_dp);
  2318. }
  2319.  
  2320. static void g4x_pre_enable_dp(struct intel_encoder *encoder)
  2321. {
  2322.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2323.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2324.  
  2325.         intel_dp_prepare(encoder);
  2326.  
  2327.         /* Only ilk+ has port A */
  2328.         if (dport->port == PORT_A) {
  2329.                 ironlake_set_pll_cpu_edp(intel_dp);
  2330.                 ironlake_edp_pll_on(intel_dp);
  2331.         }
  2332. }
  2333.  
  2334. static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
  2335. {
  2336.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2337.         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
  2338.         enum pipe pipe = intel_dp->pps_pipe;
  2339.         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  2340.  
  2341.         edp_panel_vdd_off_sync(intel_dp);
  2342.  
  2343.         /*
  2344.          * VLV seems to get confused when multiple power seqeuencers
  2345.          * have the same port selected (even if only one has power/vdd
  2346.          * enabled). The failure manifests as vlv_wait_port_ready() failing
  2347.          * CHV on the other hand doesn't seem to mind having the same port
  2348.          * selected in multiple power seqeuencers, but let's clear the
  2349.          * port select always when logically disconnecting a power sequencer
  2350.          * from a port.
  2351.          */
  2352.         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
  2353.                       pipe_name(pipe), port_name(intel_dig_port->port));
  2354.         I915_WRITE(pp_on_reg, 0);
  2355.         POSTING_READ(pp_on_reg);
  2356.  
  2357.         intel_dp->pps_pipe = INVALID_PIPE;
  2358. }
  2359.  
  2360. static void vlv_steal_power_sequencer(struct drm_device *dev,
  2361.                                       enum pipe pipe)
  2362. {
  2363.         struct drm_i915_private *dev_priv = dev->dev_private;
  2364.         struct intel_encoder *encoder;
  2365.  
  2366.         lockdep_assert_held(&dev_priv->pps_mutex);
  2367.  
  2368.         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
  2369.                 return;
  2370.  
  2371.         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
  2372.                             base.head) {
  2373.                 struct intel_dp *intel_dp;
  2374.                 enum port port;
  2375.  
  2376.                 if (encoder->type != INTEL_OUTPUT_EDP)
  2377.                         continue;
  2378.  
  2379.                 intel_dp = enc_to_intel_dp(&encoder->base);
  2380.                 port = dp_to_dig_port(intel_dp)->port;
  2381.  
  2382.                 if (intel_dp->pps_pipe != pipe)
  2383.                         continue;
  2384.  
  2385.                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
  2386.                               pipe_name(pipe), port_name(port));
  2387.  
  2388.                 WARN(encoder->connectors_active,
  2389.                      "stealing pipe %c power sequencer from active eDP port %c\n",
  2390.                      pipe_name(pipe), port_name(port));
  2391.  
  2392.                 /* make sure vdd is off before we steal it */
  2393.                 vlv_detach_power_sequencer(intel_dp);
  2394.         }
  2395. }
  2396.  
  2397. static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
  2398. {
  2399.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2400.         struct intel_encoder *encoder = &intel_dig_port->base;
  2401.         struct drm_device *dev = encoder->base.dev;
  2402.         struct drm_i915_private *dev_priv = dev->dev_private;
  2403.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2404.  
  2405.         lockdep_assert_held(&dev_priv->pps_mutex);
  2406.  
  2407.         if (!is_edp(intel_dp))
  2408.                 return;
  2409.  
  2410.         if (intel_dp->pps_pipe == crtc->pipe)
  2411.                 return;
  2412.  
  2413.         /*
  2414.          * If another power sequencer was being used on this
  2415.          * port previously make sure to turn off vdd there while
  2416.          * we still have control of it.
  2417.          */
  2418.         if (intel_dp->pps_pipe != INVALID_PIPE)
  2419.                 vlv_detach_power_sequencer(intel_dp);
  2420.  
  2421.         /*
  2422.          * We may be stealing the power
  2423.          * sequencer from another port.
  2424.          */
  2425.         vlv_steal_power_sequencer(dev, crtc->pipe);
  2426.  
  2427.         /* now it's all ours */
  2428.         intel_dp->pps_pipe = crtc->pipe;
  2429.  
  2430.         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
  2431.                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
  2432.  
  2433.         /* init power sequencer on this pipe and port */
  2434.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  2435.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  2436. }
  2437.  
  2438. static void vlv_pre_enable_dp(struct intel_encoder *encoder)
  2439. {
  2440.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2441.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2442.         struct drm_device *dev = encoder->base.dev;
  2443.         struct drm_i915_private *dev_priv = dev->dev_private;
  2444.         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  2445.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2446.                 int pipe = intel_crtc->pipe;
  2447.                 u32 val;
  2448.  
  2449.         mutex_lock(&dev_priv->dpio_lock);
  2450.  
  2451.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
  2452.                 val = 0;
  2453.                 if (pipe)
  2454.                         val |= (1<<21);
  2455.                 else
  2456.                         val &= ~(1<<21);
  2457.                 val |= 0x001000c4;
  2458.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
  2459.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
  2460.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  2461.  
  2462.         mutex_unlock(&dev_priv->dpio_lock);
  2463.  
  2464.         intel_enable_dp(encoder);
  2465. }
  2466.  
  2467. static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
  2468. {
  2469.         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  2470.         struct drm_device *dev = encoder->base.dev;
  2471.         struct drm_i915_private *dev_priv = dev->dev_private;
  2472.         struct intel_crtc *intel_crtc =
  2473.                 to_intel_crtc(encoder->base.crtc);
  2474.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2475.         int pipe = intel_crtc->pipe;
  2476.  
  2477.         intel_dp_prepare(encoder);
  2478.  
  2479.         /* Program Tx lane resets to default */
  2480.         mutex_lock(&dev_priv->dpio_lock);
  2481.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
  2482.                          DPIO_PCS_TX_LANE2_RESET |
  2483.                          DPIO_PCS_TX_LANE1_RESET);
  2484.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
  2485.                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
  2486.                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
  2487.                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
  2488.                                  DPIO_PCS_CLK_SOFT_RESET);
  2489.  
  2490.         /* Fix up inter-pair skew failure */
  2491.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
  2492.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
  2493.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
  2494.         mutex_unlock(&dev_priv->dpio_lock);
  2495. }
  2496.  
  2497. static void chv_pre_enable_dp(struct intel_encoder *encoder)
  2498. {
  2499.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2500.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2501.         struct drm_device *dev = encoder->base.dev;
  2502.         struct drm_i915_private *dev_priv = dev->dev_private;
  2503.         struct intel_crtc *intel_crtc =
  2504.                 to_intel_crtc(encoder->base.crtc);
  2505.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2506.         int pipe = intel_crtc->pipe;
  2507.         int data, i;
  2508.         u32 val;
  2509.  
  2510.         mutex_lock(&dev_priv->dpio_lock);
  2511.  
  2512.         /* allow hardware to manage TX FIFO reset source */
  2513.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  2514.         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  2515.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  2516.  
  2517.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  2518.         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  2519.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  2520.  
  2521.         /* Deassert soft data lane reset*/
  2522.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  2523.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2524.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  2525.  
  2526.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  2527.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2528.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  2529.  
  2530.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  2531.         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2532.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  2533.  
  2534.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  2535.         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2536.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  2537.  
  2538.         /* Program Tx lane latency optimal setting*/
  2539.         for (i = 0; i < 4; i++) {
  2540.                 /* Set the latency optimal bit */
  2541.                 data = (i == 1) ? 0x0 : 0x6;
  2542.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
  2543.                                 data << DPIO_FRC_LATENCY_SHFIT);
  2544.  
  2545.                 /* Set the upar bit */
  2546.                 data = (i == 1) ? 0x0 : 0x1;
  2547.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
  2548.                                 data << DPIO_UPAR_SHIFT);
  2549.         }
  2550.  
  2551.         /* Data lane stagger programming */
  2552.         /* FIXME: Fix up value only after power analysis */
  2553.  
  2554.         mutex_unlock(&dev_priv->dpio_lock);
  2555.  
  2556.         intel_enable_dp(encoder);
  2557. }
  2558.  
  2559. static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
  2560. {
  2561.         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  2562.         struct drm_device *dev = encoder->base.dev;
  2563.         struct drm_i915_private *dev_priv = dev->dev_private;
  2564.         struct intel_crtc *intel_crtc =
  2565.                 to_intel_crtc(encoder->base.crtc);
  2566.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2567.         enum pipe pipe = intel_crtc->pipe;
  2568.         u32 val;
  2569.  
  2570.         intel_dp_prepare(encoder);
  2571.  
  2572.         mutex_lock(&dev_priv->dpio_lock);
  2573.  
  2574.         /* program left/right clock distribution */
  2575.         if (pipe != PIPE_B) {
  2576.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  2577.                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  2578.                 if (ch == DPIO_CH0)
  2579.                         val |= CHV_BUFLEFTENA1_FORCE;
  2580.                 if (ch == DPIO_CH1)
  2581.                         val |= CHV_BUFRIGHTENA1_FORCE;
  2582.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  2583.         } else {
  2584.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  2585.                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  2586.                 if (ch == DPIO_CH0)
  2587.                         val |= CHV_BUFLEFTENA2_FORCE;
  2588.                 if (ch == DPIO_CH1)
  2589.                         val |= CHV_BUFRIGHTENA2_FORCE;
  2590.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  2591.         }
  2592.  
  2593.         /* program clock channel usage */
  2594.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
  2595.         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  2596.         if (pipe != PIPE_B)
  2597.                 val &= ~CHV_PCS_USEDCLKCHANNEL;
  2598.         else
  2599.                 val |= CHV_PCS_USEDCLKCHANNEL;
  2600.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
  2601.  
  2602.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
  2603.         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  2604.         if (pipe != PIPE_B)
  2605.                 val &= ~CHV_PCS_USEDCLKCHANNEL;
  2606.         else
  2607.                 val |= CHV_PCS_USEDCLKCHANNEL;
  2608.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
  2609.  
  2610.         /*
  2611.          * This a a bit weird since generally CL
  2612.          * matches the pipe, but here we need to
  2613.          * pick the CL based on the port.
  2614.          */
  2615.         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
  2616.         if (pipe != PIPE_B)
  2617.                 val &= ~CHV_CMN_USEDCLKCHANNEL;
  2618.         else
  2619.                 val |= CHV_CMN_USEDCLKCHANNEL;
  2620.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
  2621.  
  2622.         mutex_unlock(&dev_priv->dpio_lock);
  2623. }
  2624.  
  2625. /*
  2626.  * Native read with retry for link status and receiver capability reads for
  2627.  * cases where the sink may still be asleep.
  2628.  *
  2629.  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
  2630.  * supposed to retry 3 times per the spec.
  2631.  */
  2632. static ssize_t
  2633. intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
  2634.                         void *buffer, size_t size)
  2635. {
  2636.         ssize_t ret;
  2637.         int i;
  2638.  
  2639.         /*
  2640.          * Sometime we just get the same incorrect byte repeated
  2641.          * over the entire buffer. Doing just one throw away read
  2642.          * initially seems to "solve" it.
  2643.          */
  2644.         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
  2645.  
  2646.         for (i = 0; i < 3; i++) {
  2647.                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
  2648.                 if (ret == size)
  2649.                         return ret;
  2650.                 msleep(1);
  2651.         }
  2652.  
  2653.         return ret;
  2654. }
  2655.  
  2656. /*
  2657.  * Fetch AUX CH registers 0x202 - 0x207 which contain
  2658.  * link status information
  2659.  */
  2660. static bool
  2661. intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
  2662. {
  2663.         return intel_dp_dpcd_read_wake(&intel_dp->aux,
  2664.                                               DP_LANE0_1_STATUS,
  2665.                                               link_status,
  2666.                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
  2667. }
  2668.  
  2669. /* These are source-specific values. */
  2670. static uint8_t
  2671. intel_dp_voltage_max(struct intel_dp *intel_dp)
  2672. {
  2673.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2674.         enum port port = dp_to_dig_port(intel_dp)->port;
  2675.  
  2676.         if (INTEL_INFO(dev)->gen >= 9)
  2677.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2678.         else if (IS_VALLEYVIEW(dev))
  2679.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  2680.         else if (IS_GEN7(dev) && port == PORT_A)
  2681.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2682.         else if (HAS_PCH_CPT(dev) && port != PORT_A)
  2683.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  2684.         else
  2685.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2686. }
  2687.  
  2688. static uint8_t
  2689. intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
  2690. {
  2691.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2692.         enum port port = dp_to_dig_port(intel_dp)->port;
  2693.  
  2694.         if (INTEL_INFO(dev)->gen >= 9) {
  2695.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2696.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2697.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2698.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2699.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2700.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2701.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2702.                 default:
  2703.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2704.                 }
  2705.         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  2706.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2707.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2708.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2709.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2710.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2711.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2712.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2713.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2714.                 default:
  2715.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2716.                 }
  2717.         } else if (IS_VALLEYVIEW(dev)) {
  2718.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2719.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2720.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2721.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2722.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2723.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2724.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2725.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2726.                 default:
  2727.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2728.                 }
  2729.         } else if (IS_GEN7(dev) && port == PORT_A) {
  2730.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2731.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2732.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2733.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2734.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2735.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2736.                 default:
  2737.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2738.                 }
  2739.         } else {
  2740.         switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2741.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2742.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2743.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2744.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2745.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2746.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2747.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2748.         default:
  2749.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2750.         }
  2751.         }
  2752. }
  2753.  
  2754. static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
  2755. {
  2756.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2757.         struct drm_i915_private *dev_priv = dev->dev_private;
  2758.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2759.         struct intel_crtc *intel_crtc =
  2760.                 to_intel_crtc(dport->base.base.crtc);
  2761.         unsigned long demph_reg_value, preemph_reg_value,
  2762.                 uniqtranscale_reg_value;
  2763.         uint8_t train_set = intel_dp->train_set[0];
  2764.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2765.         int pipe = intel_crtc->pipe;
  2766.  
  2767.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  2768.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  2769.                 preemph_reg_value = 0x0004000;
  2770.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2771.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2772.                         demph_reg_value = 0x2B405555;
  2773.                         uniqtranscale_reg_value = 0x552AB83A;
  2774.                         break;
  2775.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2776.                         demph_reg_value = 0x2B404040;
  2777.                         uniqtranscale_reg_value = 0x5548B83A;
  2778.                         break;
  2779.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2780.                         demph_reg_value = 0x2B245555;
  2781.                         uniqtranscale_reg_value = 0x5560B83A;
  2782.                         break;
  2783.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2784.                         demph_reg_value = 0x2B405555;
  2785.                         uniqtranscale_reg_value = 0x5598DA3A;
  2786.                         break;
  2787.                 default:
  2788.                         return 0;
  2789.                 }
  2790.                 break;
  2791.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  2792.                 preemph_reg_value = 0x0002000;
  2793.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2794.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2795.                         demph_reg_value = 0x2B404040;
  2796.                         uniqtranscale_reg_value = 0x5552B83A;
  2797.                         break;
  2798.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2799.                         demph_reg_value = 0x2B404848;
  2800.                         uniqtranscale_reg_value = 0x5580B83A;
  2801.                         break;
  2802.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2803.                         demph_reg_value = 0x2B404040;
  2804.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2805.                         break;
  2806.                 default:
  2807.                         return 0;
  2808.                 }
  2809.                 break;
  2810.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  2811.                 preemph_reg_value = 0x0000000;
  2812.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2813.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2814.                         demph_reg_value = 0x2B305555;
  2815.                         uniqtranscale_reg_value = 0x5570B83A;
  2816.                         break;
  2817.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2818.                         demph_reg_value = 0x2B2B4040;
  2819.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2820.                         break;
  2821.                 default:
  2822.                         return 0;
  2823.                 }
  2824.                 break;
  2825.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  2826.                 preemph_reg_value = 0x0006000;
  2827.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2828.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2829.                         demph_reg_value = 0x1B405555;
  2830.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2831.                         break;
  2832.                 default:
  2833.                         return 0;
  2834.                 }
  2835.                 break;
  2836.         default:
  2837.                 return 0;
  2838.         }
  2839.  
  2840.         mutex_lock(&dev_priv->dpio_lock);
  2841.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
  2842.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
  2843.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
  2844.                          uniqtranscale_reg_value);
  2845.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
  2846.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
  2847.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
  2848.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
  2849.         mutex_unlock(&dev_priv->dpio_lock);
  2850.  
  2851.         return 0;
  2852. }
  2853.  
  2854. static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
  2855. {
  2856.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2857.         struct drm_i915_private *dev_priv = dev->dev_private;
  2858.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2859.         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
  2860.         u32 deemph_reg_value, margin_reg_value, val;
  2861.         uint8_t train_set = intel_dp->train_set[0];
  2862.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2863.         enum pipe pipe = intel_crtc->pipe;
  2864.         int i;
  2865.  
  2866.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  2867.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  2868.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2869.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2870.                         deemph_reg_value = 128;
  2871.                         margin_reg_value = 52;
  2872.                         break;
  2873.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2874.                         deemph_reg_value = 128;
  2875.                         margin_reg_value = 77;
  2876.                         break;
  2877.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2878.                         deemph_reg_value = 128;
  2879.                         margin_reg_value = 102;
  2880.                         break;
  2881.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2882.                         deemph_reg_value = 128;
  2883.                         margin_reg_value = 154;
  2884.                         /* FIXME extra to set for 1200 */
  2885.                         break;
  2886.                 default:
  2887.                         return 0;
  2888.                 }
  2889.                 break;
  2890.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  2891.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2892.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2893.                         deemph_reg_value = 85;
  2894.                         margin_reg_value = 78;
  2895.                         break;
  2896.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2897.                         deemph_reg_value = 85;
  2898.                         margin_reg_value = 116;
  2899.                         break;
  2900.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2901.                         deemph_reg_value = 85;
  2902.                         margin_reg_value = 154;
  2903.                         break;
  2904.                 default:
  2905.                         return 0;
  2906.                 }
  2907.                 break;
  2908.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  2909.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2910.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2911.                         deemph_reg_value = 64;
  2912.                         margin_reg_value = 104;
  2913.                         break;
  2914.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2915.                         deemph_reg_value = 64;
  2916.                         margin_reg_value = 154;
  2917.                         break;
  2918.                 default:
  2919.                         return 0;
  2920.                 }
  2921.                 break;
  2922.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  2923.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2924.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2925.                         deemph_reg_value = 43;
  2926.                         margin_reg_value = 154;
  2927.                         break;
  2928.                 default:
  2929.                         return 0;
  2930.                 }
  2931.                 break;
  2932.         default:
  2933.                 return 0;
  2934.         }
  2935.  
  2936.         mutex_lock(&dev_priv->dpio_lock);
  2937.  
  2938.         /* Clear calc init */
  2939.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  2940.         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  2941.         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  2942.         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  2943.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  2944.  
  2945.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  2946.         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  2947.         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  2948.         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  2949.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  2950.  
  2951.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
  2952.         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  2953.         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  2954.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
  2955.  
  2956.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
  2957.         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  2958.         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  2959.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
  2960.  
  2961.         /* Program swing deemph */
  2962.         for (i = 0; i < 4; i++) {
  2963.                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
  2964.                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
  2965.                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
  2966.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
  2967.         }
  2968.  
  2969.         /* Program swing margin */
  2970.         for (i = 0; i < 4; i++) {
  2971.                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
  2972.                 val &= ~DPIO_SWING_MARGIN000_MASK;
  2973.                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
  2974.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
  2975.         }
  2976.  
  2977.         /* Disable unique transition scale */
  2978.         for (i = 0; i < 4; i++) {
  2979.                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
  2980.                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
  2981.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
  2982.         }
  2983.  
  2984.         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
  2985.                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
  2986.                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
  2987.                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
  2988.  
  2989.                 /*
  2990.                  * The document said it needs to set bit 27 for ch0 and bit 26
  2991.                  * for ch1. Might be a typo in the doc.
  2992.                  * For now, for this unique transition scale selection, set bit
  2993.                  * 27 for ch0 and ch1.
  2994.                  */
  2995.                 for (i = 0; i < 4; i++) {
  2996.                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
  2997.                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
  2998.                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
  2999.                 }
  3000.  
  3001.                 for (i = 0; i < 4; i++) {
  3002.                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
  3003.                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  3004.                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  3005.                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
  3006.                 }
  3007.         }
  3008.  
  3009.         /* Start swing calculation */
  3010.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  3011.         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  3012.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  3013.  
  3014.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  3015.         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  3016.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  3017.  
  3018.         /* LRC Bypass */
  3019.         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
  3020.         val |= DPIO_LRC_BYPASS;
  3021.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
  3022.  
  3023.         mutex_unlock(&dev_priv->dpio_lock);
  3024.  
  3025.         return 0;
  3026. }
  3027.  
  3028. static void
  3029. intel_get_adjust_train(struct intel_dp *intel_dp,
  3030.                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
  3031. {
  3032.         uint8_t v = 0;
  3033.         uint8_t p = 0;
  3034.         int lane;
  3035.         uint8_t voltage_max;
  3036.         uint8_t preemph_max;
  3037.  
  3038.         for (lane = 0; lane < intel_dp->lane_count; lane++) {
  3039.                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
  3040.                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
  3041.  
  3042.                 if (this_v > v)
  3043.                         v = this_v;
  3044.                 if (this_p > p)
  3045.                         p = this_p;
  3046.         }
  3047.  
  3048.         voltage_max = intel_dp_voltage_max(intel_dp);
  3049.         if (v >= voltage_max)
  3050.                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
  3051.  
  3052.         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
  3053.         if (p >= preemph_max)
  3054.                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
  3055.  
  3056.         for (lane = 0; lane < 4; lane++)
  3057.                 intel_dp->train_set[lane] = v | p;
  3058. }
  3059.  
  3060. static uint32_t
  3061. intel_gen4_signal_levels(uint8_t train_set)
  3062. {
  3063.         uint32_t        signal_levels = 0;
  3064.  
  3065.         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  3066.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  3067.         default:
  3068.                 signal_levels |= DP_VOLTAGE_0_4;
  3069.                 break;
  3070.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  3071.                 signal_levels |= DP_VOLTAGE_0_6;
  3072.                 break;
  3073.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  3074.                 signal_levels |= DP_VOLTAGE_0_8;
  3075.                 break;
  3076.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  3077.                 signal_levels |= DP_VOLTAGE_1_2;
  3078.                 break;
  3079.         }
  3080.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  3081.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  3082.         default:
  3083.                 signal_levels |= DP_PRE_EMPHASIS_0;
  3084.                 break;
  3085.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  3086.                 signal_levels |= DP_PRE_EMPHASIS_3_5;
  3087.                 break;
  3088.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  3089.                 signal_levels |= DP_PRE_EMPHASIS_6;
  3090.                 break;
  3091.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  3092.                 signal_levels |= DP_PRE_EMPHASIS_9_5;
  3093.                 break;
  3094.         }
  3095.         return signal_levels;
  3096. }
  3097.  
  3098. /* Gen6's DP voltage swing and pre-emphasis control */
  3099. static uint32_t
  3100. intel_gen6_edp_signal_levels(uint8_t train_set)
  3101. {
  3102.         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  3103.                                          DP_TRAIN_PRE_EMPHASIS_MASK);
  3104.         switch (signal_levels) {
  3105.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3106.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3107.                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
  3108.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3109.                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
  3110.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3111.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3112.                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
  3113.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3114.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3115.                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
  3116.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3117.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3118.                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
  3119.         default:
  3120.                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  3121.                               "0x%x\n", signal_levels);
  3122.                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
  3123.         }
  3124. }
  3125.  
  3126. /* Gen7's DP voltage swing and pre-emphasis control */
  3127. static uint32_t
  3128. intel_gen7_edp_signal_levels(uint8_t train_set)
  3129. {
  3130.         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  3131.                                          DP_TRAIN_PRE_EMPHASIS_MASK);
  3132.         switch (signal_levels) {
  3133.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3134.                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
  3135.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3136.                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
  3137.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3138.                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
  3139.  
  3140.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3141.                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
  3142.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3143.                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
  3144.  
  3145.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3146.                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
  3147.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3148.                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
  3149.  
  3150.         default:
  3151.                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  3152.                               "0x%x\n", signal_levels);
  3153.                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
  3154.         }
  3155. }
  3156.  
  3157. /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
  3158. static uint32_t
  3159. intel_hsw_signal_levels(uint8_t train_set)
  3160. {
  3161.         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  3162.                                          DP_TRAIN_PRE_EMPHASIS_MASK);
  3163.         switch (signal_levels) {
  3164.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3165.                 return DDI_BUF_TRANS_SELECT(0);
  3166.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3167.                 return DDI_BUF_TRANS_SELECT(1);
  3168.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3169.                 return DDI_BUF_TRANS_SELECT(2);
  3170.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
  3171.                 return DDI_BUF_TRANS_SELECT(3);
  3172.  
  3173.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3174.                 return DDI_BUF_TRANS_SELECT(4);
  3175.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3176.                 return DDI_BUF_TRANS_SELECT(5);
  3177.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3178.                 return DDI_BUF_TRANS_SELECT(6);
  3179.  
  3180.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3181.                 return DDI_BUF_TRANS_SELECT(7);
  3182.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3183.                 return DDI_BUF_TRANS_SELECT(8);
  3184.         default:
  3185.                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  3186.                               "0x%x\n", signal_levels);
  3187.                 return DDI_BUF_TRANS_SELECT(0);
  3188.         }
  3189. }
  3190.  
  3191. /* Properly updates "DP" with the correct signal levels. */
  3192. static void
  3193. intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
  3194. {
  3195.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3196.         enum port port = intel_dig_port->port;
  3197.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3198.         uint32_t signal_levels, mask;
  3199.         uint8_t train_set = intel_dp->train_set[0];
  3200.  
  3201.         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
  3202.                 signal_levels = intel_hsw_signal_levels(train_set);
  3203.                 mask = DDI_BUF_EMP_MASK;
  3204.         } else if (IS_CHERRYVIEW(dev)) {
  3205.                 signal_levels = intel_chv_signal_levels(intel_dp);
  3206.                 mask = 0;
  3207.         } else if (IS_VALLEYVIEW(dev)) {
  3208.                 signal_levels = intel_vlv_signal_levels(intel_dp);
  3209.                 mask = 0;
  3210.         } else if (IS_GEN7(dev) && port == PORT_A) {
  3211.                 signal_levels = intel_gen7_edp_signal_levels(train_set);
  3212.                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
  3213.         } else if (IS_GEN6(dev) && port == PORT_A) {
  3214.                 signal_levels = intel_gen6_edp_signal_levels(train_set);
  3215.                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
  3216.         } else {
  3217.                 signal_levels = intel_gen4_signal_levels(train_set);
  3218.                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
  3219.         }
  3220.  
  3221.         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
  3222.  
  3223.         *DP = (*DP & ~mask) | signal_levels;
  3224. }
  3225.  
  3226. static bool
  3227. intel_dp_set_link_train(struct intel_dp *intel_dp,
  3228.                         uint32_t *DP,
  3229.                         uint8_t dp_train_pat)
  3230. {
  3231.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3232.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3233.         struct drm_i915_private *dev_priv = dev->dev_private;
  3234.         uint8_t buf[sizeof(intel_dp->train_set) + 1];
  3235.         int ret, len;
  3236.  
  3237.         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
  3238.  
  3239.         I915_WRITE(intel_dp->output_reg, *DP);
  3240.         POSTING_READ(intel_dp->output_reg);
  3241.  
  3242.         buf[0] = dp_train_pat;
  3243.         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
  3244.             DP_TRAINING_PATTERN_DISABLE) {
  3245.                 /* don't write DP_TRAINING_LANEx_SET on disable */
  3246.                 len = 1;
  3247.         } else {
  3248.                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
  3249.                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
  3250.                 len = intel_dp->lane_count + 1;
  3251.         }
  3252.  
  3253.         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
  3254.                                         buf, len);
  3255.  
  3256.         return ret == len;
  3257. }
  3258.  
  3259. static bool
  3260. intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
  3261.                         uint8_t dp_train_pat)
  3262. {
  3263.         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
  3264.         intel_dp_set_signal_levels(intel_dp, DP);
  3265.         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
  3266. }
  3267.  
  3268. static bool
  3269. intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
  3270.                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
  3271. {
  3272.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3273.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3274.         struct drm_i915_private *dev_priv = dev->dev_private;
  3275.         int ret;
  3276.  
  3277.         intel_get_adjust_train(intel_dp, link_status);
  3278.         intel_dp_set_signal_levels(intel_dp, DP);
  3279.  
  3280.         I915_WRITE(intel_dp->output_reg, *DP);
  3281.         POSTING_READ(intel_dp->output_reg);
  3282.  
  3283.         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
  3284.                                 intel_dp->train_set, intel_dp->lane_count);
  3285.  
  3286.         return ret == intel_dp->lane_count;
  3287. }
  3288.  
  3289. static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
  3290. {
  3291.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3292.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3293.         struct drm_i915_private *dev_priv = dev->dev_private;
  3294.         enum port port = intel_dig_port->port;
  3295.         uint32_t val;
  3296.  
  3297.         if (!HAS_DDI(dev))
  3298.                 return;
  3299.  
  3300.         val = I915_READ(DP_TP_CTL(port));
  3301.         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  3302.         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
  3303.         I915_WRITE(DP_TP_CTL(port), val);
  3304.  
  3305.         /*
  3306.          * On PORT_A we can have only eDP in SST mode. There the only reason
  3307.          * we need to set idle transmission mode is to work around a HW issue
  3308.          * where we enable the pipe while not in idle link-training mode.
  3309.          * In this case there is requirement to wait for a minimum number of
  3310.          * idle patterns to be sent.
  3311.          */
  3312.         if (port == PORT_A)
  3313.                 return;
  3314.  
  3315.         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
  3316.                      1))
  3317.                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
  3318. }
  3319.  
  3320. /* Enable corresponding port and start training pattern 1 */
  3321. void
  3322. intel_dp_start_link_train(struct intel_dp *intel_dp)
  3323. {
  3324.         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
  3325.         struct drm_device *dev = encoder->dev;
  3326.         int i;
  3327.         uint8_t voltage;
  3328.         int voltage_tries, loop_tries;
  3329.         uint32_t DP = intel_dp->DP;
  3330.         uint8_t link_config[2];
  3331.  
  3332.         if (HAS_DDI(dev))
  3333.                 intel_ddi_prepare_link_retrain(encoder);
  3334.  
  3335.         /* Write the link configuration data */
  3336.         link_config[0] = intel_dp->link_bw;
  3337.         link_config[1] = intel_dp->lane_count;
  3338.         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  3339.                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
  3340.         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
  3341.  
  3342.         link_config[0] = 0;
  3343.         link_config[1] = DP_SET_ANSI_8B10B;
  3344.         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
  3345.  
  3346.         DP |= DP_PORT_EN;
  3347.  
  3348.         /* clock recovery */
  3349.         if (!intel_dp_reset_link_train(intel_dp, &DP,
  3350.                                        DP_TRAINING_PATTERN_1 |
  3351.                                        DP_LINK_SCRAMBLING_DISABLE)) {
  3352.                 DRM_ERROR("failed to enable link training\n");
  3353.                 return;
  3354.         }
  3355.  
  3356.         voltage = 0xff;
  3357.         voltage_tries = 0;
  3358.         loop_tries = 0;
  3359.         for (;;) {
  3360.                 uint8_t     link_status[DP_LINK_STATUS_SIZE];
  3361.  
  3362.                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
  3363.                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
  3364.                         DRM_ERROR("failed to get link status\n");
  3365.                         break;
  3366.                 }
  3367.  
  3368.                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
  3369.                         DRM_DEBUG_KMS("clock recovery OK\n");
  3370.                         break;
  3371.                 }
  3372.  
  3373.                 /* Check to see if we've tried the max voltage */
  3374.                 for (i = 0; i < intel_dp->lane_count; i++)
  3375.                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
  3376.                                 break;
  3377.                 if (i == intel_dp->lane_count) {
  3378.                         ++loop_tries;
  3379.                         if (loop_tries == 5) {
  3380.                                 DRM_ERROR("too many full retries, give up\n");
  3381.                         break;
  3382.                         }
  3383.                         intel_dp_reset_link_train(intel_dp, &DP,
  3384.                                                   DP_TRAINING_PATTERN_1 |
  3385.                                                   DP_LINK_SCRAMBLING_DISABLE);
  3386.                         voltage_tries = 0;
  3387.                         continue;
  3388.                 }
  3389.  
  3390.                 /* Check to see if we've tried the same voltage 5 times */
  3391.                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
  3392.                         ++voltage_tries;
  3393.                         if (voltage_tries == 5) {
  3394.                                 DRM_ERROR("too many voltage retries, give up\n");
  3395.                                 break;
  3396.                         }
  3397.                 } else
  3398.                         voltage_tries = 0;
  3399.                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
  3400.  
  3401.                 /* Update training set as requested by target */
  3402.                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
  3403.                         DRM_ERROR("failed to update link training\n");
  3404.                         break;
  3405.                 }
  3406.         }
  3407.  
  3408.         intel_dp->DP = DP;
  3409. }
  3410.  
  3411. void
  3412. intel_dp_complete_link_train(struct intel_dp *intel_dp)
  3413. {
  3414.         bool channel_eq = false;
  3415.         int tries, cr_tries;
  3416.         uint32_t DP = intel_dp->DP;
  3417.         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
  3418.  
  3419.         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
  3420.         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
  3421.                 training_pattern = DP_TRAINING_PATTERN_3;
  3422.  
  3423.         /* channel equalization */
  3424.         if (!intel_dp_set_link_train(intel_dp, &DP,
  3425.                                      training_pattern |
  3426.                                      DP_LINK_SCRAMBLING_DISABLE)) {
  3427.                 DRM_ERROR("failed to start channel equalization\n");
  3428.                 return;
  3429.         }
  3430.  
  3431.         tries = 0;
  3432.         cr_tries = 0;
  3433.         channel_eq = false;
  3434.         for (;;) {
  3435.                 uint8_t     link_status[DP_LINK_STATUS_SIZE];
  3436.  
  3437.                 if (cr_tries > 5) {
  3438.                         DRM_ERROR("failed to train DP, aborting\n");
  3439.                         break;
  3440.                 }
  3441.  
  3442.                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
  3443.                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
  3444.                         DRM_ERROR("failed to get link status\n");
  3445.                         break;
  3446.                 }
  3447.  
  3448.                 /* Make sure clock is still ok */
  3449.                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
  3450.                         intel_dp_start_link_train(intel_dp);
  3451.                         intel_dp_set_link_train(intel_dp, &DP,
  3452.                                                 training_pattern |
  3453.                                                 DP_LINK_SCRAMBLING_DISABLE);
  3454.                         cr_tries++;
  3455.                         continue;
  3456.                 }
  3457.  
  3458.                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
  3459.                         channel_eq = true;
  3460.                         break;
  3461.                 }
  3462.  
  3463.                 /* Try 5 times, then try clock recovery if that fails */
  3464.                 if (tries > 5) {
  3465.                         intel_dp_start_link_train(intel_dp);
  3466.                         intel_dp_set_link_train(intel_dp, &DP,
  3467.                                                 training_pattern |
  3468.                                                 DP_LINK_SCRAMBLING_DISABLE);
  3469.                         tries = 0;
  3470.                         cr_tries++;
  3471.                         continue;
  3472.                 }
  3473.  
  3474.                 /* Update training set as requested by target */
  3475.                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
  3476.                         DRM_ERROR("failed to update link training\n");
  3477.                         break;
  3478.                 }
  3479.                 ++tries;
  3480.         }
  3481.  
  3482.         intel_dp_set_idle_link_train(intel_dp);
  3483.  
  3484.         intel_dp->DP = DP;
  3485.  
  3486.         if (channel_eq)
  3487.                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
  3488.  
  3489. }
  3490.  
  3491. void intel_dp_stop_link_train(struct intel_dp *intel_dp)
  3492. {
  3493.         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
  3494.                                 DP_TRAINING_PATTERN_DISABLE);
  3495. }
  3496.  
  3497. static void
  3498. intel_dp_link_down(struct intel_dp *intel_dp)
  3499. {
  3500.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3501.         enum port port = intel_dig_port->port;
  3502.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3503.         struct drm_i915_private *dev_priv = dev->dev_private;
  3504.         struct intel_crtc *intel_crtc =
  3505.                 to_intel_crtc(intel_dig_port->base.base.crtc);
  3506.         uint32_t DP = intel_dp->DP;
  3507.  
  3508.         if (WARN_ON(HAS_DDI(dev)))
  3509.                 return;
  3510.  
  3511.         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
  3512.                 return;
  3513.  
  3514.         DRM_DEBUG_KMS("\n");
  3515.  
  3516.         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
  3517.                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
  3518.                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
  3519.         } else {
  3520.                 if (IS_CHERRYVIEW(dev))
  3521.                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
  3522.                 else
  3523.                 DP &= ~DP_LINK_TRAIN_MASK;
  3524.                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
  3525.         }
  3526.         POSTING_READ(intel_dp->output_reg);
  3527.  
  3528.         if (HAS_PCH_IBX(dev) &&
  3529.             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
  3530.                 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  3531.  
  3532.                 /* Hardware workaround: leaving our transcoder select
  3533.                  * set to transcoder B while it's off will prevent the
  3534.                  * corresponding HDMI output on transcoder A.
  3535.                  *
  3536.                  * Combine this with another hardware workaround:
  3537.                  * transcoder select bit can only be cleared while the
  3538.                  * port is enabled.
  3539.                  */
  3540.                 DP &= ~DP_PIPEB_SELECT;
  3541.                 I915_WRITE(intel_dp->output_reg, DP);
  3542.  
  3543.                 /* Changes to enable or select take place the vblank
  3544.                  * after being written.
  3545.                  */
  3546.                 if (WARN_ON(crtc == NULL)) {
  3547.                         /* We should never try to disable a port without a crtc
  3548.                          * attached. For paranoia keep the code around for a
  3549.                          * bit. */
  3550.                         POSTING_READ(intel_dp->output_reg);
  3551.                         msleep(50);
  3552.                 } else
  3553.                         intel_wait_for_vblank(dev, intel_crtc->pipe);
  3554.         }
  3555.  
  3556.         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
  3557.         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
  3558.         POSTING_READ(intel_dp->output_reg);
  3559.         msleep(intel_dp->panel_power_down_delay);
  3560. }
  3561.  
  3562. static bool
  3563. intel_dp_get_dpcd(struct intel_dp *intel_dp)
  3564. {
  3565.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  3566.         struct drm_device *dev = dig_port->base.base.dev;
  3567.         struct drm_i915_private *dev_priv = dev->dev_private;
  3568.  
  3569.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
  3570.                                     sizeof(intel_dp->dpcd)) < 0)
  3571.                 return false; /* aux transfer failed */
  3572.  
  3573.         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
  3574.  
  3575.         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
  3576.                 return false; /* DPCD not present */
  3577.  
  3578.         /* Check if the panel supports PSR */
  3579.         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
  3580.         if (is_edp(intel_dp)) {
  3581.                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
  3582.                                        intel_dp->psr_dpcd,
  3583.                                        sizeof(intel_dp->psr_dpcd));
  3584.                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
  3585.                         dev_priv->psr.sink_support = true;
  3586.                 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
  3587.                 }
  3588.         }
  3589.  
  3590.         /* Training Pattern 3 support, both source and sink */
  3591.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
  3592.             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
  3593.             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
  3594.                 intel_dp->use_tps3 = true;
  3595.                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
  3596.         } else
  3597.                 intel_dp->use_tps3 = false;
  3598.  
  3599.         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
  3600.               DP_DWN_STRM_PORT_PRESENT))
  3601.                 return true; /* native DP sink */
  3602.  
  3603.         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
  3604.                 return true; /* no per-port downstream info */
  3605.  
  3606.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
  3607.                                            intel_dp->downstream_ports,
  3608.                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
  3609.                 return false; /* downstream port status fetch failed */
  3610.  
  3611.                 return true;
  3612. }
  3613.  
  3614. static void
  3615. intel_dp_probe_oui(struct intel_dp *intel_dp)
  3616. {
  3617.         u8 buf[3];
  3618.  
  3619.         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
  3620.                 return;
  3621.  
  3622.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
  3623.                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
  3624.                               buf[0], buf[1], buf[2]);
  3625.  
  3626.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
  3627.                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
  3628.                               buf[0], buf[1], buf[2]);
  3629. }
  3630.  
  3631. static bool
  3632. intel_dp_probe_mst(struct intel_dp *intel_dp)
  3633. {
  3634.         u8 buf[1];
  3635.  
  3636.         if (!intel_dp->can_mst)
  3637.                 return false;
  3638.  
  3639.         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
  3640.                 return false;
  3641.  
  3642.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
  3643.                 if (buf[0] & DP_MST_CAP) {
  3644.                         DRM_DEBUG_KMS("Sink is MST capable\n");
  3645.                         intel_dp->is_mst = true;
  3646.                 } else {
  3647.                         DRM_DEBUG_KMS("Sink is not MST capable\n");
  3648.                         intel_dp->is_mst = false;
  3649.                 }
  3650.         }
  3651.  
  3652.         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
  3653.         return intel_dp->is_mst;
  3654. }
  3655.  
  3656. int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
  3657. {
  3658.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3659.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3660.         struct intel_crtc *intel_crtc =
  3661.                 to_intel_crtc(intel_dig_port->base.base.crtc);
  3662.         u8 buf;
  3663.         int test_crc_count;
  3664.         int attempts = 6;
  3665.  
  3666.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
  3667.                 return -EIO;
  3668.  
  3669.         if (!(buf & DP_TEST_CRC_SUPPORTED))
  3670.                 return -ENOTTY;
  3671.  
  3672.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
  3673.                 return -EIO;
  3674.  
  3675.         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
  3676.                                 buf | DP_TEST_SINK_START) < 0)
  3677.                 return -EIO;
  3678.  
  3679.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
  3680.                 return -EIO;
  3681.         test_crc_count = buf & DP_TEST_COUNT_MASK;
  3682.  
  3683.         do {
  3684.                 if (drm_dp_dpcd_readb(&intel_dp->aux,
  3685.                                       DP_TEST_SINK_MISC, &buf) < 0)
  3686.                         return -EIO;
  3687.         intel_wait_for_vblank(dev, intel_crtc->pipe);
  3688.         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
  3689.  
  3690.         if (attempts == 0) {
  3691.                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
  3692.                 return -ETIMEDOUT;
  3693.         }
  3694.  
  3695.         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
  3696.                 return -EIO;
  3697.  
  3698.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
  3699.                 return -EIO;
  3700.         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
  3701.                                buf & ~DP_TEST_SINK_START) < 0)
  3702.                 return -EIO;
  3703.  
  3704.         return 0;
  3705. }
  3706.  
  3707. static bool
  3708. intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  3709. {
  3710.         return intel_dp_dpcd_read_wake(&intel_dp->aux,
  3711.                                              DP_DEVICE_SERVICE_IRQ_VECTOR,
  3712.                                        sink_irq_vector, 1) == 1;
  3713. }
  3714.  
  3715. static bool
  3716. intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  3717. {
  3718.         int ret;
  3719.  
  3720.         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
  3721.                                              DP_SINK_COUNT_ESI,
  3722.                                              sink_irq_vector, 14);
  3723.         if (ret != 14)
  3724.                 return false;
  3725.  
  3726.         return true;
  3727. }
  3728.  
  3729. static void
  3730. intel_dp_handle_test_request(struct intel_dp *intel_dp)
  3731. {
  3732.         /* NAK by default */
  3733.         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
  3734. }
  3735.  
  3736. static int
  3737. intel_dp_check_mst_status(struct intel_dp *intel_dp)
  3738. {
  3739.         bool bret;
  3740.  
  3741.         if (intel_dp->is_mst) {
  3742.                 u8 esi[16] = { 0 };
  3743.                 int ret = 0;
  3744.                 int retry;
  3745.                 bool handled;
  3746.                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
  3747. go_again:
  3748.                 if (bret == true) {
  3749.  
  3750.                         /* check link status - esi[10] = 0x200c */
  3751.                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
  3752.                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
  3753.                                 intel_dp_start_link_train(intel_dp);
  3754.                                 intel_dp_complete_link_train(intel_dp);
  3755.                                 intel_dp_stop_link_train(intel_dp);
  3756.                         }
  3757.  
  3758.                         DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  3759.                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
  3760.  
  3761.                         if (handled) {
  3762.                                 for (retry = 0; retry < 3; retry++) {
  3763.                                         int wret;
  3764.                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
  3765.                                                                  DP_SINK_COUNT_ESI+1,
  3766.                                                                  &esi[1], 3);
  3767.                                         if (wret == 3) {
  3768.                                                 break;
  3769.                                         }
  3770.                                 }
  3771.  
  3772.                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
  3773.                                 if (bret == true) {
  3774.                                         DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  3775.                                         goto go_again;
  3776.                                 }
  3777.                         } else
  3778.                                 ret = 0;
  3779.  
  3780.                         return ret;
  3781.                 } else {
  3782.                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3783.                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
  3784.                         intel_dp->is_mst = false;
  3785.                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
  3786.                         /* send a hotplug event */
  3787.                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
  3788.                 }
  3789.         }
  3790.         return -EINVAL;
  3791. }
  3792.  
  3793. /*
  3794.  * According to DP spec
  3795.  * 5.1.2:
  3796.  *  1. Read DPCD
  3797.  *  2. Configure link according to Receiver Capabilities
  3798.  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
  3799.  *  4. Check link status on receipt of hot-plug interrupt
  3800.  */
  3801. void
  3802. intel_dp_check_link_status(struct intel_dp *intel_dp)
  3803. {
  3804.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3805.         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
  3806.         u8 sink_irq_vector;
  3807.         u8 link_status[DP_LINK_STATUS_SIZE];
  3808.  
  3809.         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  3810.  
  3811.         if (!intel_encoder->connectors_active)
  3812.                 return;
  3813.  
  3814.         if (WARN_ON(!intel_encoder->base.crtc))
  3815.                 return;
  3816.  
  3817.         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
  3818.                 return;
  3819.  
  3820.         /* Try to read receiver status if the link appears to be up */
  3821.         if (!intel_dp_get_link_status(intel_dp, link_status)) {
  3822.                 return;
  3823.         }
  3824.  
  3825.         /* Now read the DPCD to see if it's actually running */
  3826.         if (!intel_dp_get_dpcd(intel_dp)) {
  3827.                 return;
  3828.         }
  3829.  
  3830.         /* Try to read the source of the interrupt */
  3831.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
  3832.             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
  3833.                 /* Clear interrupt source */
  3834.                 drm_dp_dpcd_writeb(&intel_dp->aux,
  3835.                                             DP_DEVICE_SERVICE_IRQ_VECTOR,
  3836.                                             sink_irq_vector);
  3837.  
  3838.                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
  3839.                         intel_dp_handle_test_request(intel_dp);
  3840.                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
  3841.                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
  3842.         }
  3843.  
  3844.         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
  3845.                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
  3846.                               intel_encoder->base.name);
  3847.                 intel_dp_start_link_train(intel_dp);
  3848.                 intel_dp_complete_link_train(intel_dp);
  3849.                 intel_dp_stop_link_train(intel_dp);
  3850.         }
  3851. }
  3852.  
  3853. /* XXX this is probably wrong for multiple downstream ports */
  3854. static enum drm_connector_status
  3855. intel_dp_detect_dpcd(struct intel_dp *intel_dp)
  3856. {
  3857.         uint8_t *dpcd = intel_dp->dpcd;
  3858.         uint8_t type;
  3859.  
  3860.         if (!intel_dp_get_dpcd(intel_dp))
  3861.                 return connector_status_disconnected;
  3862.  
  3863.         /* if there's no downstream port, we're done */
  3864.         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
  3865.                 return connector_status_connected;
  3866.  
  3867.         /* If we're HPD-aware, SINK_COUNT changes dynamically */
  3868.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
  3869.             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
  3870.                 uint8_t reg;
  3871.  
  3872.                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
  3873.                                             &reg, 1) < 0)
  3874.                         return connector_status_unknown;
  3875.  
  3876.                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
  3877.                                               : connector_status_disconnected;
  3878.         }
  3879.  
  3880.         /* If no HPD, poke DDC gently */
  3881.         if (drm_probe_ddc(&intel_dp->aux.ddc))
  3882.                 return connector_status_connected;
  3883.  
  3884.         /* Well we tried, say unknown for unreliable port types */
  3885.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
  3886.         type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
  3887.                 if (type == DP_DS_PORT_TYPE_VGA ||
  3888.                     type == DP_DS_PORT_TYPE_NON_EDID)
  3889.                 return connector_status_unknown;
  3890.         } else {
  3891.                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
  3892.                         DP_DWN_STRM_PORT_TYPE_MASK;
  3893.                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
  3894.                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
  3895.                         return connector_status_unknown;
  3896.         }
  3897.  
  3898.         /* Anything else is out of spec, warn and ignore */
  3899.         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
  3900.         return connector_status_disconnected;
  3901. }
  3902.  
  3903. static enum drm_connector_status
  3904. edp_detect(struct intel_dp *intel_dp)
  3905. {
  3906.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3907.         enum drm_connector_status status;
  3908.  
  3909.                 status = intel_panel_detect(dev);
  3910.                 if (status == connector_status_unknown)
  3911.                         status = connector_status_connected;
  3912.  
  3913.                 return status;
  3914. }
  3915.  
  3916. static enum drm_connector_status
  3917. ironlake_dp_detect(struct intel_dp *intel_dp)
  3918. {
  3919.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3920.         struct drm_i915_private *dev_priv = dev->dev_private;
  3921.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3922.  
  3923.         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
  3924.                 return connector_status_disconnected;
  3925.  
  3926.         return intel_dp_detect_dpcd(intel_dp);
  3927. }
  3928.  
  3929. static int g4x_digital_port_connected(struct drm_device *dev,
  3930.                                        struct intel_digital_port *intel_dig_port)
  3931. {
  3932.         struct drm_i915_private *dev_priv = dev->dev_private;
  3933.         uint32_t bit;
  3934.  
  3935.         if (IS_VALLEYVIEW(dev)) {
  3936.                 switch (intel_dig_port->port) {
  3937.                 case PORT_B:
  3938.                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
  3939.                         break;
  3940.                 case PORT_C:
  3941.                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
  3942.                         break;
  3943.                 case PORT_D:
  3944.                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
  3945.                         break;
  3946.                 default:
  3947.                         return -EINVAL;
  3948.                 }
  3949.         } else {
  3950.         switch (intel_dig_port->port) {
  3951.         case PORT_B:
  3952.                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
  3953.                 break;
  3954.         case PORT_C:
  3955.                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
  3956.                 break;
  3957.         case PORT_D:
  3958.                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
  3959.                 break;
  3960.         default:
  3961.                         return -EINVAL;
  3962.         }
  3963.         }
  3964.  
  3965.         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
  3966.                 return 0;
  3967.         return 1;
  3968. }
  3969.  
  3970. static enum drm_connector_status
  3971. g4x_dp_detect(struct intel_dp *intel_dp)
  3972. {
  3973.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3974.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3975.         int ret;
  3976.  
  3977.         /* Can't disconnect eDP, but you can close the lid... */
  3978.         if (is_edp(intel_dp)) {
  3979.                 enum drm_connector_status status;
  3980.  
  3981.                 status = intel_panel_detect(dev);
  3982.                 if (status == connector_status_unknown)
  3983.                         status = connector_status_connected;
  3984.                 return status;
  3985.         }
  3986.  
  3987.         ret = g4x_digital_port_connected(dev, intel_dig_port);
  3988.         if (ret == -EINVAL)
  3989.                 return connector_status_unknown;
  3990.         else if (ret == 0)
  3991.                 return connector_status_disconnected;
  3992.  
  3993.         return intel_dp_detect_dpcd(intel_dp);
  3994. }
  3995.  
  3996. static struct edid *
  3997. intel_dp_get_edid(struct intel_dp *intel_dp)
  3998. {
  3999.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  4000.  
  4001.         /* use cached edid if we have one */
  4002.         if (intel_connector->edid) {
  4003.                 /* invalid edid */
  4004.                 if (IS_ERR(intel_connector->edid))
  4005.                         return NULL;
  4006.  
  4007.                 return drm_edid_duplicate(intel_connector->edid);
  4008.         } else
  4009.                 return drm_get_edid(&intel_connector->base,
  4010.                                     &intel_dp->aux.ddc);
  4011. }
  4012.  
  4013. static void
  4014. intel_dp_set_edid(struct intel_dp *intel_dp)
  4015. {
  4016.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  4017.         struct edid *edid;
  4018.  
  4019.         edid = intel_dp_get_edid(intel_dp);
  4020.         intel_connector->detect_edid = edid;
  4021.  
  4022.         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
  4023.                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
  4024.         else
  4025.                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
  4026. }
  4027.  
  4028. static void
  4029. intel_dp_unset_edid(struct intel_dp *intel_dp)
  4030. {
  4031.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  4032.  
  4033.         kfree(intel_connector->detect_edid);
  4034.         intel_connector->detect_edid = NULL;
  4035.  
  4036.         intel_dp->has_audio = false;
  4037. }
  4038.  
  4039. static enum intel_display_power_domain
  4040. intel_dp_power_get(struct intel_dp *dp)
  4041. {
  4042.         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
  4043.         enum intel_display_power_domain power_domain;
  4044.  
  4045.         power_domain = intel_display_port_power_domain(encoder);
  4046.         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
  4047.  
  4048.         return power_domain;
  4049. }
  4050.  
  4051. static void
  4052. intel_dp_power_put(struct intel_dp *dp,
  4053.                    enum intel_display_power_domain power_domain)
  4054. {
  4055.         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
  4056.         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
  4057. }
  4058.  
  4059. static enum drm_connector_status
  4060. intel_dp_detect(struct drm_connector *connector, bool force)
  4061. {
  4062.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  4063.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  4064.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4065.         struct drm_device *dev = connector->dev;
  4066.         enum drm_connector_status status;
  4067.         enum intel_display_power_domain power_domain;
  4068.         bool ret;
  4069.  
  4070.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  4071.                       connector->base.id, connector->name);
  4072.         intel_dp_unset_edid(intel_dp);
  4073.  
  4074.         if (intel_dp->is_mst) {
  4075.                 /* MST devices are disconnected from a monitor POV */
  4076.                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4077.                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4078.                 return connector_status_disconnected;
  4079.         }
  4080.  
  4081.         power_domain = intel_dp_power_get(intel_dp);
  4082.  
  4083.         /* Can't disconnect eDP, but you can close the lid... */
  4084.         if (is_edp(intel_dp))
  4085.                 status = edp_detect(intel_dp);
  4086.         else if (HAS_PCH_SPLIT(dev))
  4087.                 status = ironlake_dp_detect(intel_dp);
  4088.         else
  4089.                 status = g4x_dp_detect(intel_dp);
  4090.         if (status != connector_status_connected)
  4091.                 goto out;
  4092.  
  4093.         intel_dp_probe_oui(intel_dp);
  4094.  
  4095.         ret = intel_dp_probe_mst(intel_dp);
  4096.         if (ret) {
  4097.                 /* if we are in MST mode then this connector
  4098.                    won't appear connected or have anything with EDID on it */
  4099.                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4100.                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4101.                 status = connector_status_disconnected;
  4102.                 goto out;
  4103.         }
  4104.  
  4105.         intel_dp_set_edid(intel_dp);
  4106.  
  4107.         if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4108.                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4109.         status = connector_status_connected;
  4110.  
  4111. out:
  4112.         intel_dp_power_put(intel_dp, power_domain);
  4113.         return status;
  4114. }
  4115.  
  4116. static void
  4117. intel_dp_force(struct drm_connector *connector)
  4118. {
  4119.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  4120.         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
  4121.         enum intel_display_power_domain power_domain;
  4122.  
  4123.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  4124.                       connector->base.id, connector->name);
  4125.         intel_dp_unset_edid(intel_dp);
  4126.  
  4127.         if (connector->status != connector_status_connected)
  4128.                 return;
  4129.  
  4130.         power_domain = intel_dp_power_get(intel_dp);
  4131.  
  4132.         intel_dp_set_edid(intel_dp);
  4133.  
  4134.         intel_dp_power_put(intel_dp, power_domain);
  4135.  
  4136.         if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4137.                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4138. }
  4139.  
  4140. static int intel_dp_get_modes(struct drm_connector *connector)
  4141. {
  4142.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4143.         struct edid *edid;
  4144.  
  4145.         edid = intel_connector->detect_edid;
  4146.         if (edid) {
  4147.                 int ret = intel_connector_update_modes(connector, edid);
  4148.         if (ret)
  4149.                 return ret;
  4150.         }
  4151.  
  4152.         /* if eDP has no EDID, fall back to fixed mode */
  4153.         if (is_edp(intel_attached_dp(connector)) &&
  4154.             intel_connector->panel.fixed_mode) {
  4155.                         struct drm_display_mode *mode;
  4156.  
  4157.                 mode = drm_mode_duplicate(connector->dev,
  4158.                                           intel_connector->panel.fixed_mode);
  4159.                 if (mode) {
  4160.                         drm_mode_probed_add(connector, mode);
  4161.                         return 1;
  4162.                 }
  4163.         }
  4164.  
  4165.         return 0;
  4166. }
  4167.  
  4168. static bool
  4169. intel_dp_detect_audio(struct drm_connector *connector)
  4170. {
  4171.         bool has_audio = false;
  4172.         struct edid *edid;
  4173.  
  4174.         edid = to_intel_connector(connector)->detect_edid;
  4175.         if (edid)
  4176.                 has_audio = drm_detect_monitor_audio(edid);
  4177.  
  4178.         return has_audio;
  4179. }
  4180.  
  4181. static int
  4182. intel_dp_set_property(struct drm_connector *connector,
  4183.                       struct drm_property *property,
  4184.                       uint64_t val)
  4185. {
  4186.         struct drm_i915_private *dev_priv = connector->dev->dev_private;
  4187.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4188.         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
  4189.         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  4190.         int ret;
  4191.  
  4192.         ret = drm_object_property_set_value(&connector->base, property, val);
  4193.         if (ret)
  4194.                 return ret;
  4195.  
  4196.         if (property == dev_priv->force_audio_property) {
  4197.                 int i = val;
  4198.                 bool has_audio;
  4199.  
  4200.                 if (i == intel_dp->force_audio)
  4201.                         return 0;
  4202.  
  4203.                 intel_dp->force_audio = i;
  4204.  
  4205.                 if (i == HDMI_AUDIO_AUTO)
  4206.                         has_audio = intel_dp_detect_audio(connector);
  4207.                 else
  4208.                         has_audio = (i == HDMI_AUDIO_ON);
  4209.  
  4210.                 if (has_audio == intel_dp->has_audio)
  4211.                         return 0;
  4212.  
  4213.                 intel_dp->has_audio = has_audio;
  4214.                 goto done;
  4215.         }
  4216.  
  4217.         if (property == dev_priv->broadcast_rgb_property) {
  4218.                 bool old_auto = intel_dp->color_range_auto;
  4219.                 uint32_t old_range = intel_dp->color_range;
  4220.  
  4221.                 switch (val) {
  4222.                 case INTEL_BROADCAST_RGB_AUTO:
  4223.                         intel_dp->color_range_auto = true;
  4224.                         break;
  4225.                 case INTEL_BROADCAST_RGB_FULL:
  4226.                         intel_dp->color_range_auto = false;
  4227.                         intel_dp->color_range = 0;
  4228.                         break;
  4229.                 case INTEL_BROADCAST_RGB_LIMITED:
  4230.                         intel_dp->color_range_auto = false;
  4231.                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
  4232.                         break;
  4233.                 default:
  4234.                         return -EINVAL;
  4235.                 }
  4236.  
  4237.                 if (old_auto == intel_dp->color_range_auto &&
  4238.                     old_range == intel_dp->color_range)
  4239.                         return 0;
  4240.  
  4241.         goto done;
  4242.         }
  4243.  
  4244.         if (is_edp(intel_dp) &&
  4245.             property == connector->dev->mode_config.scaling_mode_property) {
  4246.                 if (val == DRM_MODE_SCALE_NONE) {
  4247.                         DRM_DEBUG_KMS("no scaling not supported\n");
  4248.                         return -EINVAL;
  4249.                 }
  4250.  
  4251.                 if (intel_connector->panel.fitting_mode == val) {
  4252.                         /* the eDP scaling property is not changed */
  4253.                         return 0;
  4254.                 }
  4255.                 intel_connector->panel.fitting_mode = val;
  4256.  
  4257.                 goto done;
  4258.         }
  4259.  
  4260.         return -EINVAL;
  4261.  
  4262. done:
  4263.         if (intel_encoder->base.crtc)
  4264.                 intel_crtc_restore_mode(intel_encoder->base.crtc);
  4265.  
  4266.         return 0;
  4267. }
  4268.  
  4269. static void
  4270. intel_dp_connector_destroy(struct drm_connector *connector)
  4271. {
  4272.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4273.  
  4274.         kfree(intel_connector->detect_edid);
  4275.  
  4276.         if (!IS_ERR_OR_NULL(intel_connector->edid))
  4277.                 kfree(intel_connector->edid);
  4278.  
  4279.         /* Can't call is_edp() since the encoder may have been destroyed
  4280.          * already. */
  4281.         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
  4282.                 intel_panel_fini(&intel_connector->panel);
  4283.  
  4284.         drm_connector_cleanup(connector);
  4285.         kfree(connector);
  4286. }
  4287.  
  4288. void intel_dp_encoder_destroy(struct drm_encoder *encoder)
  4289. {
  4290.         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
  4291.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4292.  
  4293.         drm_dp_aux_unregister(&intel_dp->aux);
  4294.         intel_dp_mst_encoder_cleanup(intel_dig_port);
  4295.         drm_encoder_cleanup(encoder);
  4296.         if (is_edp(intel_dp)) {
  4297.                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  4298.                 /*
  4299.                  * vdd might still be enabled do to the delayed vdd off.
  4300.                  * Make sure vdd is actually turned off here.
  4301.                  */
  4302.                 pps_lock(intel_dp);
  4303.                 edp_panel_vdd_off_sync(intel_dp);
  4304.                 pps_unlock(intel_dp);
  4305.  
  4306.         }
  4307.         kfree(intel_dig_port);
  4308. }
  4309.  
  4310. static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
  4311. {
  4312.         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  4313.  
  4314.         if (!is_edp(intel_dp))
  4315.                 return;
  4316.  
  4317.         /*
  4318.          * vdd might still be enabled do to the delayed vdd off.
  4319.          * Make sure vdd is actually turned off here.
  4320.          */
  4321.         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  4322.         pps_lock(intel_dp);
  4323.         edp_panel_vdd_off_sync(intel_dp);
  4324.         pps_unlock(intel_dp);
  4325. }
  4326.  
  4327. static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
  4328. {
  4329.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  4330.         struct drm_device *dev = intel_dig_port->base.base.dev;
  4331.         struct drm_i915_private *dev_priv = dev->dev_private;
  4332.         enum intel_display_power_domain power_domain;
  4333.  
  4334.         lockdep_assert_held(&dev_priv->pps_mutex);
  4335.  
  4336.         if (!edp_have_panel_vdd(intel_dp))
  4337.                 return;
  4338.  
  4339.         /*
  4340.          * The VDD bit needs a power domain reference, so if the bit is
  4341.          * already enabled when we boot or resume, grab this reference and
  4342.          * schedule a vdd off, so we don't hold on to the reference
  4343.          * indefinitely.
  4344.          */
  4345.         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
  4346.         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
  4347.         intel_display_power_get(dev_priv, power_domain);
  4348.  
  4349.         edp_panel_vdd_schedule_off(intel_dp);
  4350. }
  4351.  
  4352. static void intel_dp_encoder_reset(struct drm_encoder *encoder)
  4353. {
  4354.         struct intel_dp *intel_dp;
  4355.  
  4356.         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
  4357.                 return;
  4358.  
  4359.         intel_dp = enc_to_intel_dp(encoder);
  4360.  
  4361.         pps_lock(intel_dp);
  4362.  
  4363.         /*
  4364.          * Read out the current power sequencer assignment,
  4365.          * in case the BIOS did something with it.
  4366.          */
  4367.         if (IS_VALLEYVIEW(encoder->dev))
  4368.                 vlv_initial_power_sequencer_setup(intel_dp);
  4369.  
  4370.         intel_edp_panel_vdd_sanitize(intel_dp);
  4371.  
  4372.         pps_unlock(intel_dp);
  4373. }
  4374.  
  4375. static const struct drm_connector_funcs intel_dp_connector_funcs = {
  4376.         .dpms = intel_connector_dpms,
  4377.         .detect = intel_dp_detect,
  4378.         .force = intel_dp_force,
  4379.         .fill_modes = drm_helper_probe_single_connector_modes,
  4380.         .set_property = intel_dp_set_property,
  4381.         .destroy = intel_dp_connector_destroy,
  4382. };
  4383.  
  4384. static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
  4385.         .get_modes = intel_dp_get_modes,
  4386.         .mode_valid = intel_dp_mode_valid,
  4387.         .best_encoder = intel_best_encoder,
  4388. };
  4389.  
  4390. static const struct drm_encoder_funcs intel_dp_enc_funcs = {
  4391.         .reset = intel_dp_encoder_reset,
  4392.         .destroy = intel_dp_encoder_destroy,
  4393. };
  4394.  
  4395. void
  4396. intel_dp_hot_plug(struct intel_encoder *intel_encoder)
  4397. {
  4398.         return;
  4399. }
  4400.  
  4401. bool
  4402. intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
  4403. {
  4404.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4405.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4406.         struct drm_device *dev = intel_dig_port->base.base.dev;
  4407.         struct drm_i915_private *dev_priv = dev->dev_private;
  4408.         enum intel_display_power_domain power_domain;
  4409.         bool ret = true;
  4410.  
  4411.         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
  4412.                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
  4413.  
  4414.         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
  4415.                 /*
  4416.                  * vdd off can generate a long pulse on eDP which
  4417.                  * would require vdd on to handle it, and thus we
  4418.                  * would end up in an endless cycle of
  4419.                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
  4420.                  */
  4421.                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
  4422.                               port_name(intel_dig_port->port));
  4423.                 return false;
  4424.         }
  4425.  
  4426.         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
  4427.                       port_name(intel_dig_port->port),
  4428.                       long_hpd ? "long" : "short");
  4429.  
  4430.         power_domain = intel_display_port_power_domain(intel_encoder);
  4431.         intel_display_power_get(dev_priv, power_domain);
  4432.  
  4433.         if (long_hpd) {
  4434.  
  4435.                 if (HAS_PCH_SPLIT(dev)) {
  4436.                 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
  4437.                         goto mst_fail;
  4438.                 } else {
  4439.                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
  4440.                                 goto mst_fail;
  4441.                 }
  4442.  
  4443.                 if (!intel_dp_get_dpcd(intel_dp)) {
  4444.                         goto mst_fail;
  4445.                 }
  4446.  
  4447.                 intel_dp_probe_oui(intel_dp);
  4448.  
  4449.                 if (!intel_dp_probe_mst(intel_dp))
  4450.                         goto mst_fail;
  4451.  
  4452.         } else {
  4453.                 if (intel_dp->is_mst) {
  4454.                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
  4455.                                 goto mst_fail;
  4456.                 }
  4457.  
  4458.                 if (!intel_dp->is_mst) {
  4459.                         /*
  4460.                          * we'll check the link status via the normal hot plug path later -
  4461.                          * but for short hpds we should check it now
  4462.                          */
  4463.                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  4464.                         intel_dp_check_link_status(intel_dp);
  4465.                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
  4466.                 }
  4467.         }
  4468.         ret = false;
  4469.         goto put_power;
  4470. mst_fail:
  4471.         /* if we were in MST mode, and device is not there get out of MST mode */
  4472.         if (intel_dp->is_mst) {
  4473.                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
  4474.                 intel_dp->is_mst = false;
  4475.                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
  4476.         }
  4477. put_power:
  4478.         intel_display_power_put(dev_priv, power_domain);
  4479.  
  4480.         return ret;
  4481. }
  4482.  
  4483. /* Return which DP Port should be selected for Transcoder DP control */
  4484. int
  4485. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  4486. {
  4487.         struct drm_device *dev = crtc->dev;
  4488.         struct intel_encoder *intel_encoder;
  4489.         struct intel_dp *intel_dp;
  4490.  
  4491.         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
  4492.                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
  4493.  
  4494.                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
  4495.                     intel_encoder->type == INTEL_OUTPUT_EDP)
  4496.                         return intel_dp->output_reg;
  4497.         }
  4498.  
  4499.         return -1;
  4500. }
  4501.  
  4502. /* check the VBT to see whether the eDP is on DP-D port */
  4503. bool intel_dp_is_edp(struct drm_device *dev, enum port port)
  4504. {
  4505.         struct drm_i915_private *dev_priv = dev->dev_private;
  4506.         union child_device_config *p_child;
  4507.         int i;
  4508.         static const short port_mapping[] = {
  4509.                 [PORT_B] = PORT_IDPB,
  4510.                 [PORT_C] = PORT_IDPC,
  4511.                 [PORT_D] = PORT_IDPD,
  4512.         };
  4513.  
  4514.         if (port == PORT_A)
  4515.                 return true;
  4516.  
  4517.         if (!dev_priv->vbt.child_dev_num)
  4518.                 return false;
  4519.  
  4520.         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
  4521.                 p_child = dev_priv->vbt.child_dev + i;
  4522.  
  4523.                 if (p_child->common.dvo_port == port_mapping[port] &&
  4524.                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
  4525.                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
  4526.                         return true;
  4527.         }
  4528.         return false;
  4529. }
  4530.  
  4531. void
  4532. intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
  4533. {
  4534.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4535.  
  4536.         intel_attach_force_audio_property(connector);
  4537.         intel_attach_broadcast_rgb_property(connector);
  4538.         intel_dp->color_range_auto = true;
  4539.  
  4540.         if (is_edp(intel_dp)) {
  4541.                 drm_mode_create_scaling_mode_property(connector->dev);
  4542.                 drm_object_attach_property(
  4543.                         &connector->base,
  4544.                         connector->dev->mode_config.scaling_mode_property,
  4545.                         DRM_MODE_SCALE_ASPECT);
  4546.                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
  4547.         }
  4548. }
  4549.  
  4550. static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
  4551. {
  4552.         intel_dp->last_power_cycle = jiffies;
  4553.         intel_dp->last_power_on = jiffies;
  4554.         intel_dp->last_backlight_off = jiffies;
  4555. }
  4556.  
  4557. static void
  4558. intel_dp_init_panel_power_sequencer(struct drm_device *dev,
  4559.                                     struct intel_dp *intel_dp)
  4560. {
  4561.         struct drm_i915_private *dev_priv = dev->dev_private;
  4562.         struct edp_power_seq cur, vbt, spec,
  4563.                 *final = &intel_dp->pps_delays;
  4564.         u32 pp_on, pp_off, pp_div, pp;
  4565.         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
  4566.  
  4567.         lockdep_assert_held(&dev_priv->pps_mutex);
  4568.  
  4569.         /* already initialized? */
  4570.         if (final->t11_t12 != 0)
  4571.                 return;
  4572.  
  4573.         if (HAS_PCH_SPLIT(dev)) {
  4574.                 pp_ctrl_reg = PCH_PP_CONTROL;
  4575.                 pp_on_reg = PCH_PP_ON_DELAYS;
  4576.                 pp_off_reg = PCH_PP_OFF_DELAYS;
  4577.                 pp_div_reg = PCH_PP_DIVISOR;
  4578.         } else {
  4579.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  4580.  
  4581.                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
  4582.                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  4583.                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
  4584.                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
  4585.         }
  4586.  
  4587.         /* Workaround: Need to write PP_CONTROL with the unlock key as
  4588.          * the very first thing. */
  4589.         pp = ironlake_get_pp_control(intel_dp);
  4590.         I915_WRITE(pp_ctrl_reg, pp);
  4591.  
  4592.         pp_on = I915_READ(pp_on_reg);
  4593.         pp_off = I915_READ(pp_off_reg);
  4594.         pp_div = I915_READ(pp_div_reg);
  4595.  
  4596.         /* Pull timing values out of registers */
  4597.         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
  4598.                 PANEL_POWER_UP_DELAY_SHIFT;
  4599.  
  4600.         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
  4601.                 PANEL_LIGHT_ON_DELAY_SHIFT;
  4602.  
  4603.         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
  4604.                 PANEL_LIGHT_OFF_DELAY_SHIFT;
  4605.  
  4606.         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
  4607.                 PANEL_POWER_DOWN_DELAY_SHIFT;
  4608.  
  4609.         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
  4610.                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
  4611.  
  4612.         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
  4613.                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
  4614.  
  4615.         vbt = dev_priv->vbt.edp_pps;
  4616.  
  4617.         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
  4618.          * our hw here, which are all in 100usec. */
  4619.         spec.t1_t3 = 210 * 10;
  4620.         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
  4621.         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
  4622.         spec.t10 = 500 * 10;
  4623.         /* This one is special and actually in units of 100ms, but zero
  4624.          * based in the hw (so we need to add 100 ms). But the sw vbt
  4625.          * table multiplies it with 1000 to make it in units of 100usec,
  4626.          * too. */
  4627.         spec.t11_t12 = (510 + 100) * 10;
  4628.  
  4629.         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
  4630.                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
  4631.  
  4632.         /* Use the max of the register settings and vbt. If both are
  4633.          * unset, fall back to the spec limits. */
  4634. #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
  4635.                                        spec.field : \
  4636.                                        max(cur.field, vbt.field))
  4637.         assign_final(t1_t3);
  4638.         assign_final(t8);
  4639.         assign_final(t9);
  4640.         assign_final(t10);
  4641.         assign_final(t11_t12);
  4642. #undef assign_final
  4643.  
  4644. #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
  4645.         intel_dp->panel_power_up_delay = get_delay(t1_t3);
  4646.         intel_dp->backlight_on_delay = get_delay(t8);
  4647.         intel_dp->backlight_off_delay = get_delay(t9);
  4648.         intel_dp->panel_power_down_delay = get_delay(t10);
  4649.         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
  4650. #undef get_delay
  4651.  
  4652.         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
  4653.                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
  4654.                       intel_dp->panel_power_cycle_delay);
  4655.  
  4656.         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
  4657.                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
  4658. }
  4659.  
  4660. static void
  4661. intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  4662.                                               struct intel_dp *intel_dp)
  4663. {
  4664.         struct drm_i915_private *dev_priv = dev->dev_private;
  4665.         u32 pp_on, pp_off, pp_div, port_sel = 0;
  4666.         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
  4667.         int pp_on_reg, pp_off_reg, pp_div_reg;
  4668.         enum port port = dp_to_dig_port(intel_dp)->port;
  4669.         const struct edp_power_seq *seq = &intel_dp->pps_delays;
  4670.  
  4671.         lockdep_assert_held(&dev_priv->pps_mutex);
  4672.  
  4673.         if (HAS_PCH_SPLIT(dev)) {
  4674.                 pp_on_reg = PCH_PP_ON_DELAYS;
  4675.                 pp_off_reg = PCH_PP_OFF_DELAYS;
  4676.                 pp_div_reg = PCH_PP_DIVISOR;
  4677.         } else {
  4678.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  4679.  
  4680.                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  4681.                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
  4682.                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
  4683.         }
  4684.  
  4685.         /*
  4686.          * And finally store the new values in the power sequencer. The
  4687.          * backlight delays are set to 1 because we do manual waits on them. For
  4688.          * T8, even BSpec recommends doing it. For T9, if we don't do this,
  4689.          * we'll end up waiting for the backlight off delay twice: once when we
  4690.          * do the manual sleep, and once when we disable the panel and wait for
  4691.          * the PP_STATUS bit to become zero.
  4692.          */
  4693.         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
  4694.                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
  4695.         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
  4696.                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
  4697.         /* Compute the divisor for the pp clock, simply match the Bspec
  4698.          * formula. */
  4699.         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
  4700.         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
  4701.                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
  4702.  
  4703.         /* Haswell doesn't have any port selection bits for the panel
  4704.          * power sequencer any more. */
  4705.         if (IS_VALLEYVIEW(dev)) {
  4706.                 port_sel = PANEL_PORT_SELECT_VLV(port);
  4707.         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
  4708.                 if (port == PORT_A)
  4709.                         port_sel = PANEL_PORT_SELECT_DPA;
  4710.                 else
  4711.                         port_sel = PANEL_PORT_SELECT_DPD;
  4712.         }
  4713.  
  4714.         pp_on |= port_sel;
  4715.  
  4716.         I915_WRITE(pp_on_reg, pp_on);
  4717.         I915_WRITE(pp_off_reg, pp_off);
  4718.         I915_WRITE(pp_div_reg, pp_div);
  4719.  
  4720.         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
  4721.                       I915_READ(pp_on_reg),
  4722.                       I915_READ(pp_off_reg),
  4723.                       I915_READ(pp_div_reg));
  4724. }
  4725.  
  4726. void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
  4727. {
  4728.         struct drm_i915_private *dev_priv = dev->dev_private;
  4729.         struct intel_encoder *encoder;
  4730.         struct intel_dp *intel_dp = NULL;
  4731.         struct intel_crtc_config *config = NULL;
  4732.         struct intel_crtc *intel_crtc = NULL;
  4733.         struct intel_connector *intel_connector = dev_priv->drrs.connector;
  4734.         u32 reg, val;
  4735.         enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
  4736.  
  4737.         if (refresh_rate <= 0) {
  4738.                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
  4739.                 return;
  4740.         }
  4741.  
  4742.         if (intel_connector == NULL) {
  4743.                 DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
  4744.                 return;
  4745.         }
  4746.  
  4747.         /*
  4748.          * FIXME: This needs proper synchronization with psr state. But really
  4749.          * hard to tell without seeing the user of this function of this code.
  4750.          * Check locking and ordering once that lands.
  4751.          */
  4752.         if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
  4753.                 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
  4754.                 return;
  4755.         }
  4756.  
  4757.         encoder = intel_attached_encoder(&intel_connector->base);
  4758.         intel_dp = enc_to_intel_dp(&encoder->base);
  4759.         intel_crtc = encoder->new_crtc;
  4760.  
  4761.         if (!intel_crtc) {
  4762.                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
  4763.                 return;
  4764.         }
  4765.  
  4766.         config = &intel_crtc->config;
  4767.  
  4768.         if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
  4769.                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
  4770.                 return;
  4771.         }
  4772.  
  4773.         if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
  4774.                 index = DRRS_LOW_RR;
  4775.  
  4776.         if (index == intel_dp->drrs_state.refresh_rate_type) {
  4777.                 DRM_DEBUG_KMS(
  4778.                         "DRRS requested for previously set RR...ignoring\n");
  4779.                 return;
  4780.         }
  4781.  
  4782.         if (!intel_crtc->active) {
  4783.                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
  4784.                 return;
  4785.         }
  4786.  
  4787.         if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
  4788.                 reg = PIPECONF(intel_crtc->config.cpu_transcoder);
  4789.                 val = I915_READ(reg);
  4790.                 if (index > DRRS_HIGH_RR) {
  4791.                         val |= PIPECONF_EDP_RR_MODE_SWITCH;
  4792.                         intel_dp_set_m_n(intel_crtc);
  4793.                 } else {
  4794.                         val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
  4795.                 }
  4796.                 I915_WRITE(reg, val);
  4797.         }
  4798.  
  4799.         /*
  4800.          * mutex taken to ensure that there is no race between differnt
  4801.          * drrs calls trying to update refresh rate. This scenario may occur
  4802.          * in future when idleness detection based DRRS in kernel and
  4803.          * possible calls from user space to set differnt RR are made.
  4804.          */
  4805.  
  4806.         mutex_lock(&intel_dp->drrs_state.mutex);
  4807.  
  4808.         intel_dp->drrs_state.refresh_rate_type = index;
  4809.  
  4810.         mutex_unlock(&intel_dp->drrs_state.mutex);
  4811.  
  4812.         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
  4813. }
  4814.  
  4815. static struct drm_display_mode *
  4816. intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
  4817.                         struct intel_connector *intel_connector,
  4818.                         struct drm_display_mode *fixed_mode)
  4819. {
  4820.         struct drm_connector *connector = &intel_connector->base;
  4821.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4822.         struct drm_device *dev = intel_dig_port->base.base.dev;
  4823.         struct drm_i915_private *dev_priv = dev->dev_private;
  4824.         struct drm_display_mode *downclock_mode = NULL;
  4825.  
  4826.         if (INTEL_INFO(dev)->gen <= 6) {
  4827.                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
  4828.                 return NULL;
  4829.         }
  4830.  
  4831.         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
  4832.                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
  4833.                 return NULL;
  4834.         }
  4835.  
  4836.         downclock_mode = intel_find_panel_downclock
  4837.                                         (dev, fixed_mode, connector);
  4838.  
  4839.         if (!downclock_mode) {
  4840.                 DRM_DEBUG_KMS("DRRS not supported\n");
  4841.                 return NULL;
  4842.         }
  4843.  
  4844.         dev_priv->drrs.connector = intel_connector;
  4845.  
  4846.         mutex_init(&intel_dp->drrs_state.mutex);
  4847.  
  4848.         intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
  4849.  
  4850.         intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
  4851.         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
  4852.         return downclock_mode;
  4853. }
  4854.  
  4855. static bool intel_edp_init_connector(struct intel_dp *intel_dp,
  4856.                                      struct intel_connector *intel_connector)
  4857. {
  4858.         struct drm_connector *connector = &intel_connector->base;
  4859.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  4860.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4861.         struct drm_device *dev = intel_encoder->base.dev;
  4862.         struct drm_i915_private *dev_priv = dev->dev_private;
  4863.         struct drm_display_mode *fixed_mode = NULL;
  4864.         struct drm_display_mode *downclock_mode = NULL;
  4865.         bool has_dpcd;
  4866.         struct drm_display_mode *scan;
  4867.         struct edid *edid;
  4868.         enum pipe pipe = INVALID_PIPE;
  4869.  
  4870.         intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
  4871.  
  4872.         if (!is_edp(intel_dp))
  4873.                 return true;
  4874.  
  4875.         pps_lock(intel_dp);
  4876.         intel_edp_panel_vdd_sanitize(intel_dp);
  4877.         pps_unlock(intel_dp);
  4878.  
  4879.         /* Cache DPCD and EDID for edp. */
  4880.         has_dpcd = intel_dp_get_dpcd(intel_dp);
  4881.  
  4882.         if (has_dpcd) {
  4883.                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
  4884.                         dev_priv->no_aux_handshake =
  4885.                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
  4886.                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
  4887.         } else {
  4888.                 /* if this fails, presume the device is a ghost */
  4889.                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
  4890.                 return false;
  4891.         }
  4892.  
  4893.         /* We now know it's not a ghost, init power sequence regs. */
  4894.         pps_lock(intel_dp);
  4895.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  4896.         pps_unlock(intel_dp);
  4897.  
  4898.         mutex_lock(&dev->mode_config.mutex);
  4899.         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
  4900.         if (edid) {
  4901.                 if (drm_add_edid_modes(connector, edid)) {
  4902.                         drm_mode_connector_update_edid_property(connector,
  4903.                                                                 edid);
  4904.                         drm_edid_to_eld(connector, edid);
  4905.                 } else {
  4906.                         kfree(edid);
  4907.                         edid = ERR_PTR(-EINVAL);
  4908.                 }
  4909.         } else {
  4910.                 edid = ERR_PTR(-ENOENT);
  4911.         }
  4912.         intel_connector->edid = edid;
  4913.  
  4914.         /* prefer fixed mode from EDID if available */
  4915.         list_for_each_entry(scan, &connector->probed_modes, head) {
  4916.                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
  4917.                         fixed_mode = drm_mode_duplicate(dev, scan);
  4918.                         downclock_mode = intel_dp_drrs_init(
  4919.                                                 intel_dig_port,
  4920.                                                 intel_connector, fixed_mode);
  4921.                         break;
  4922.                 }
  4923.         }
  4924.  
  4925.         /* fallback to VBT if available for eDP */
  4926.         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
  4927.                 fixed_mode = drm_mode_duplicate(dev,
  4928.                                         dev_priv->vbt.lfp_lvds_vbt_mode);
  4929.                 if (fixed_mode)
  4930.                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
  4931.         }
  4932.         mutex_unlock(&dev->mode_config.mutex);
  4933.  
  4934.         if (IS_VALLEYVIEW(dev)) {
  4935.  
  4936.                 /*
  4937.                  * Figure out the current pipe for the initial backlight setup.
  4938.                  * If the current pipe isn't valid, try the PPS pipe, and if that
  4939.                  * fails just assume pipe A.
  4940.                  */
  4941.                 if (IS_CHERRYVIEW(dev))
  4942.                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
  4943.                 else
  4944.                         pipe = PORT_TO_PIPE(intel_dp->DP);
  4945.  
  4946.                 if (pipe != PIPE_A && pipe != PIPE_B)
  4947.                         pipe = intel_dp->pps_pipe;
  4948.  
  4949.                 if (pipe != PIPE_A && pipe != PIPE_B)
  4950.                         pipe = PIPE_A;
  4951.  
  4952.                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
  4953.                               pipe_name(pipe));
  4954.         }
  4955.  
  4956.         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
  4957.         intel_connector->panel.backlight_power = intel_edp_backlight_power;
  4958.         intel_panel_setup_backlight(connector, pipe);
  4959.  
  4960.         return true;
  4961. }
  4962.  
  4963. bool
  4964. intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  4965.                         struct intel_connector *intel_connector)
  4966. {
  4967.         struct drm_connector *connector = &intel_connector->base;
  4968.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4969.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4970.         struct drm_device *dev = intel_encoder->base.dev;
  4971.         struct drm_i915_private *dev_priv = dev->dev_private;
  4972.         enum port port = intel_dig_port->port;
  4973.         int type;
  4974.  
  4975.         intel_dp->pps_pipe = INVALID_PIPE;
  4976.  
  4977.         /* intel_dp vfuncs */
  4978.         if (INTEL_INFO(dev)->gen >= 9)
  4979.                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
  4980.         else if (IS_VALLEYVIEW(dev))
  4981.                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
  4982.         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  4983.                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
  4984.         else if (HAS_PCH_SPLIT(dev))
  4985.                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
  4986.         else
  4987.                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
  4988.  
  4989.         if (INTEL_INFO(dev)->gen >= 9)
  4990.                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
  4991.         else
  4992.         intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
  4993.  
  4994.         /* Preserve the current hw state. */
  4995.         intel_dp->DP = I915_READ(intel_dp->output_reg);
  4996.         intel_dp->attached_connector = intel_connector;
  4997.  
  4998.         if (intel_dp_is_edp(dev, port))
  4999.                 type = DRM_MODE_CONNECTOR_eDP;
  5000.         else
  5001.         type = DRM_MODE_CONNECTOR_DisplayPort;
  5002.  
  5003.         /*
  5004.          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
  5005.          * for DP the encoder type can be set by the caller to
  5006.          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
  5007.          */
  5008.         if (type == DRM_MODE_CONNECTOR_eDP)
  5009.                 intel_encoder->type = INTEL_OUTPUT_EDP;
  5010.  
  5011.         /* eDP only on port B and/or C on vlv/chv */
  5012.         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
  5013.                     port != PORT_B && port != PORT_C))
  5014.                 return false;
  5015.  
  5016.         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
  5017.                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
  5018.                         port_name(port));
  5019.  
  5020.         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
  5021.         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
  5022.  
  5023.         connector->interlace_allowed = true;
  5024.         connector->doublescan_allowed = 0;
  5025.  
  5026.         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
  5027.                           edp_panel_vdd_work);
  5028.  
  5029.         intel_connector_attach_encoder(intel_connector, intel_encoder);
  5030.         drm_connector_register(connector);
  5031.  
  5032.         if (HAS_DDI(dev))
  5033.                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
  5034.         else
  5035.         intel_connector->get_hw_state = intel_connector_get_hw_state;
  5036.         intel_connector->unregister = intel_dp_connector_unregister;
  5037.  
  5038.         /* Set up the hotplug pin. */
  5039.         switch (port) {
  5040.         case PORT_A:
  5041.                 intel_encoder->hpd_pin = HPD_PORT_A;
  5042.                         break;
  5043.         case PORT_B:
  5044.                 intel_encoder->hpd_pin = HPD_PORT_B;
  5045.                         break;
  5046.         case PORT_C:
  5047.                 intel_encoder->hpd_pin = HPD_PORT_C;
  5048.                         break;
  5049.         case PORT_D:
  5050.                 intel_encoder->hpd_pin = HPD_PORT_D;
  5051.                         break;
  5052.         default:
  5053.                 BUG();
  5054.         }
  5055.  
  5056.         if (is_edp(intel_dp)) {
  5057.                 pps_lock(intel_dp);
  5058.                 intel_dp_init_panel_power_timestamps(intel_dp);
  5059.                 if (IS_VALLEYVIEW(dev))
  5060.                         vlv_initial_power_sequencer_setup(intel_dp);
  5061.                 else
  5062.                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  5063.                 pps_unlock(intel_dp);
  5064.         }
  5065.  
  5066.         intel_dp_aux_init(intel_dp, intel_connector);
  5067.  
  5068.         /* init MST on ports that can support it */
  5069.         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  5070.                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
  5071.                         intel_dp_mst_encoder_init(intel_dig_port,
  5072.                                                   intel_connector->base.base.id);
  5073.                 }
  5074.         }
  5075.  
  5076.         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
  5077.                 drm_dp_aux_unregister(&intel_dp->aux);
  5078.         if (is_edp(intel_dp)) {
  5079.                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  5080.                         /*
  5081.                          * vdd might still be enabled do to the delayed vdd off.
  5082.                          * Make sure vdd is actually turned off here.
  5083.                          */
  5084.                         pps_lock(intel_dp);
  5085.                         edp_panel_vdd_off_sync(intel_dp);
  5086.                         pps_unlock(intel_dp);
  5087.                 }
  5088.                 drm_connector_unregister(connector);
  5089.                 drm_connector_cleanup(connector);
  5090.                 return false;
  5091.         }
  5092.  
  5093.         intel_dp_add_properties(intel_dp, connector);
  5094.  
  5095.         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
  5096.          * 0xd.  Failure to do so will result in spurious interrupts being
  5097.          * generated on the port when a cable is not attached.
  5098.          */
  5099.         if (IS_G4X(dev) && !IS_GM45(dev)) {
  5100.                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
  5101.                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
  5102.         }
  5103.  
  5104.         return true;
  5105. }
  5106.  
  5107. void
  5108. intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
  5109. {
  5110.         struct drm_i915_private *dev_priv = dev->dev_private;
  5111.         struct intel_digital_port *intel_dig_port;
  5112.         struct intel_encoder *intel_encoder;
  5113.         struct drm_encoder *encoder;
  5114.         struct intel_connector *intel_connector;
  5115.  
  5116.         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
  5117.         if (!intel_dig_port)
  5118.                 return;
  5119.  
  5120.         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
  5121.         if (!intel_connector) {
  5122.                 kfree(intel_dig_port);
  5123.                 return;
  5124.         }
  5125.  
  5126.         intel_encoder = &intel_dig_port->base;
  5127.         encoder = &intel_encoder->base;
  5128.  
  5129.         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
  5130.                          DRM_MODE_ENCODER_TMDS);
  5131.  
  5132.         intel_encoder->compute_config = intel_dp_compute_config;
  5133.         intel_encoder->disable = intel_disable_dp;
  5134.         intel_encoder->get_hw_state = intel_dp_get_hw_state;
  5135.         intel_encoder->get_config = intel_dp_get_config;
  5136.         intel_encoder->suspend = intel_dp_encoder_suspend;
  5137.         if (IS_CHERRYVIEW(dev)) {
  5138.                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
  5139.                 intel_encoder->pre_enable = chv_pre_enable_dp;
  5140.                 intel_encoder->enable = vlv_enable_dp;
  5141.                 intel_encoder->post_disable = chv_post_disable_dp;
  5142.         } else if (IS_VALLEYVIEW(dev)) {
  5143.                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
  5144.                 intel_encoder->pre_enable = vlv_pre_enable_dp;
  5145.                 intel_encoder->enable = vlv_enable_dp;
  5146.                 intel_encoder->post_disable = vlv_post_disable_dp;
  5147.         } else {
  5148.                 intel_encoder->pre_enable = g4x_pre_enable_dp;
  5149.                 intel_encoder->enable = g4x_enable_dp;
  5150.                 if (INTEL_INFO(dev)->gen >= 5)
  5151.                         intel_encoder->post_disable = ilk_post_disable_dp;
  5152.         }
  5153.  
  5154.         intel_dig_port->port = port;
  5155.         intel_dig_port->dp.output_reg = output_reg;
  5156.  
  5157.         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  5158.         if (IS_CHERRYVIEW(dev)) {
  5159.                 if (port == PORT_D)
  5160.                         intel_encoder->crtc_mask = 1 << 2;
  5161.                 else
  5162.                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
  5163.         } else {
  5164.         intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
  5165.         }
  5166.         intel_encoder->cloneable = 0;
  5167.         intel_encoder->hot_plug = intel_dp_hot_plug;
  5168.  
  5169.         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
  5170.         dev_priv->hpd_irq_port[port] = intel_dig_port;
  5171.  
  5172.         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
  5173.                 drm_encoder_cleanup(encoder);
  5174.                 kfree(intel_dig_port);
  5175.                 kfree(intel_connector);
  5176.         }
  5177. }
  5178.  
  5179. void intel_dp_mst_suspend(struct drm_device *dev)
  5180. {
  5181.         struct drm_i915_private *dev_priv = dev->dev_private;
  5182.         int i;
  5183.  
  5184.         /* disable MST */
  5185.         for (i = 0; i < I915_MAX_PORTS; i++) {
  5186.                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
  5187.                 if (!intel_dig_port)
  5188.                         continue;
  5189.  
  5190.                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
  5191.                         if (!intel_dig_port->dp.can_mst)
  5192.                                 continue;
  5193.                         if (intel_dig_port->dp.is_mst)
  5194.                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
  5195.                 }
  5196.         }
  5197. }
  5198.  
  5199. void intel_dp_mst_resume(struct drm_device *dev)
  5200. {
  5201.         struct drm_i915_private *dev_priv = dev->dev_private;
  5202.         int i;
  5203.  
  5204.         for (i = 0; i < I915_MAX_PORTS; i++) {
  5205.                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
  5206.                 if (!intel_dig_port)
  5207.                         continue;
  5208.                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
  5209.                         int ret;
  5210.  
  5211.                         if (!intel_dig_port->dp.can_mst)
  5212.                                 continue;
  5213.  
  5214.                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
  5215.                         if (ret != 0) {
  5216.                                 intel_dp_check_mst_status(&intel_dig_port->dp);
  5217.                         }
  5218.                 }
  5219.         }
  5220. }
  5221.