Subversion Repositories Kolibri OS

Rev

Rev 5139 | Rev 5367 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Keith Packard <keithp@keithp.com>
  25.  *
  26.  */
  27.  
  28. #include <linux/i2c.h>
  29. #include <linux/slab.h>
  30. #include <linux/export.h>
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc.h>
  33. #include <drm/drm_crtc_helper.h>
  34. #include <drm/drm_edid.h>
  35. #include "intel_drv.h"
  36. #include <drm/i915_drm.h>
  37. #include "i915_drv.h"
  38.  
  39. #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
  40.  
  41. struct dp_link_dpll {
  42.         int link_bw;
  43.         struct dpll dpll;
  44. };
  45.  
  46. static const struct dp_link_dpll gen4_dpll[] = {
  47.         { DP_LINK_BW_1_62,
  48.                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  49.         { DP_LINK_BW_2_7,
  50.                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  51. };
  52.  
  53. static const struct dp_link_dpll pch_dpll[] = {
  54.         { DP_LINK_BW_1_62,
  55.                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  56.         { DP_LINK_BW_2_7,
  57.                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  58. };
  59.  
  60. static const struct dp_link_dpll vlv_dpll[] = {
  61.         { DP_LINK_BW_1_62,
  62.                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  63.         { DP_LINK_BW_2_7,
  64.                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  65. };
  66.  
  67. /*
  68.  * CHV supports eDP 1.4 that have  more link rates.
  69.  * Below only provides the fixed rate but exclude variable rate.
  70.  */
  71. static const struct dp_link_dpll chv_dpll[] = {
  72.         /*
  73.          * CHV requires to program fractional division for m2.
  74.          * m2 is stored in fixed point format using formula below
  75.          * (m2_int << 22) | m2_fraction
  76.          */
  77.         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
  78.                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  79.         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
  80.                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  81.         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
  82.                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  83. };
  84.  
  85. /**
  86.  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  87.  * @intel_dp: DP struct
  88.  *
  89.  * If a CPU or PCH DP output is attached to an eDP panel, this function
  90.  * will return true, and false otherwise.
  91.  */
  92. static bool is_edp(struct intel_dp *intel_dp)
  93. {
  94.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  95.  
  96.         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
  97. }
  98.  
  99. static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
  100. {
  101.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  102.  
  103.         return intel_dig_port->base.base.dev;
  104. }
  105.  
  106. static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
  107. {
  108.         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
  109. }
  110.  
  111. static void intel_dp_link_down(struct intel_dp *intel_dp);
  112. static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
  113. static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  114. static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
  115. static void vlv_steal_power_sequencer(struct drm_device *dev,
  116.                                       enum pipe pipe);
  117.  
  118. int
  119. intel_dp_max_link_bw(struct intel_dp *intel_dp)
  120. {
  121.         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
  122.         struct drm_device *dev = intel_dp->attached_connector->base.dev;
  123.  
  124.         switch (max_link_bw) {
  125.         case DP_LINK_BW_1_62:
  126.         case DP_LINK_BW_2_7:
  127.                 break;
  128.         case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
  129.                 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
  130.                      INTEL_INFO(dev)->gen >= 8) &&
  131.                     intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
  132.                         max_link_bw = DP_LINK_BW_5_4;
  133.                 else
  134.                         max_link_bw = DP_LINK_BW_2_7;
  135.                 break;
  136.         default:
  137.                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
  138.                      max_link_bw);
  139.                 max_link_bw = DP_LINK_BW_1_62;
  140.                 break;
  141.         }
  142.         return max_link_bw;
  143. }
  144.  
  145. static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
  146. {
  147.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  148.         struct drm_device *dev = intel_dig_port->base.base.dev;
  149.         u8 source_max, sink_max;
  150.  
  151.         source_max = 4;
  152.         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
  153.             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
  154.                 source_max = 2;
  155.  
  156.         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
  157.  
  158.         return min(source_max, sink_max);
  159. }
  160.  
  161. /*
  162.  * The units on the numbers in the next two are... bizarre.  Examples will
  163.  * make it clearer; this one parallels an example in the eDP spec.
  164.  *
  165.  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
  166.  *
  167.  *     270000 * 1 * 8 / 10 == 216000
  168.  *
  169.  * The actual data capacity of that configuration is 2.16Gbit/s, so the
  170.  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
  171.  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
  172.  * 119000.  At 18bpp that's 2142000 kilobits per second.
  173.  *
  174.  * Thus the strange-looking division by 10 in intel_dp_link_required, to
  175.  * get the result in decakilobits instead of kilobits.
  176.  */
  177.  
  178. static int
  179. intel_dp_link_required(int pixel_clock, int bpp)
  180. {
  181.         return (pixel_clock * bpp + 9) / 10;
  182. }
  183.  
  184. static int
  185. intel_dp_max_data_rate(int max_link_clock, int max_lanes)
  186. {
  187.         return (max_link_clock * max_lanes * 8) / 10;
  188. }
  189.  
  190. static enum drm_mode_status
  191. intel_dp_mode_valid(struct drm_connector *connector,
  192.                     struct drm_display_mode *mode)
  193. {
  194.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  195.         struct intel_connector *intel_connector = to_intel_connector(connector);
  196.         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
  197.         int target_clock = mode->clock;
  198.         int max_rate, mode_rate, max_lanes, max_link_clock;
  199.  
  200.         if (is_edp(intel_dp) && fixed_mode) {
  201.                 if (mode->hdisplay > fixed_mode->hdisplay)
  202.                         return MODE_PANEL;
  203.  
  204.                 if (mode->vdisplay > fixed_mode->vdisplay)
  205.                         return MODE_PANEL;
  206.  
  207.                 target_clock = fixed_mode->clock;
  208.         }
  209.  
  210.         max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
  211.         max_lanes = intel_dp_max_lane_count(intel_dp);
  212.  
  213.         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
  214.         mode_rate = intel_dp_link_required(target_clock, 18);
  215.  
  216.         if (mode_rate > max_rate)
  217.                 return MODE_CLOCK_HIGH;
  218.  
  219.         if (mode->clock < 10000)
  220.                 return MODE_CLOCK_LOW;
  221.  
  222.         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
  223.                 return MODE_H_ILLEGAL;
  224.  
  225.         return MODE_OK;
  226. }
  227.  
  228. uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
  229. {
  230.         int     i;
  231.         uint32_t v = 0;
  232.  
  233.         if (src_bytes > 4)
  234.                 src_bytes = 4;
  235.         for (i = 0; i < src_bytes; i++)
  236.                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
  237.         return v;
  238. }
  239.  
  240. void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
  241. {
  242.         int i;
  243.         if (dst_bytes > 4)
  244.                 dst_bytes = 4;
  245.         for (i = 0; i < dst_bytes; i++)
  246.                 dst[i] = src >> ((3-i) * 8);
  247. }
  248.  
  249. /* hrawclock is 1/4 the FSB frequency */
  250. static int
  251. intel_hrawclk(struct drm_device *dev)
  252. {
  253.         struct drm_i915_private *dev_priv = dev->dev_private;
  254.         uint32_t clkcfg;
  255.  
  256.         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
  257.         if (IS_VALLEYVIEW(dev))
  258.                 return 200;
  259.  
  260.         clkcfg = I915_READ(CLKCFG);
  261.         switch (clkcfg & CLKCFG_FSB_MASK) {
  262.         case CLKCFG_FSB_400:
  263.                 return 100;
  264.         case CLKCFG_FSB_533:
  265.                 return 133;
  266.         case CLKCFG_FSB_667:
  267.                 return 166;
  268.         case CLKCFG_FSB_800:
  269.                 return 200;
  270.         case CLKCFG_FSB_1067:
  271.                 return 266;
  272.         case CLKCFG_FSB_1333:
  273.                 return 333;
  274.         /* these two are just a guess; one of them might be right */
  275.         case CLKCFG_FSB_1600:
  276.         case CLKCFG_FSB_1600_ALT:
  277.                 return 400;
  278.         default:
  279.                 return 133;
  280.         }
  281. }
  282.  
  283. static void
  284. intel_dp_init_panel_power_sequencer(struct drm_device *dev,
  285.                                     struct intel_dp *intel_dp);
  286. static void
  287. intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  288.                                               struct intel_dp *intel_dp);
  289.  
  290. static void pps_lock(struct intel_dp *intel_dp)
  291. {
  292.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  293.         struct intel_encoder *encoder = &intel_dig_port->base;
  294.         struct drm_device *dev = encoder->base.dev;
  295.         struct drm_i915_private *dev_priv = dev->dev_private;
  296.         enum intel_display_power_domain power_domain;
  297.  
  298.         /*
  299.          * See vlv_power_sequencer_reset() why we need
  300.          * a power domain reference here.
  301.          */
  302.         power_domain = intel_display_port_power_domain(encoder);
  303.         intel_display_power_get(dev_priv, power_domain);
  304.  
  305.         mutex_lock(&dev_priv->pps_mutex);
  306. }
  307.  
  308. static void pps_unlock(struct intel_dp *intel_dp)
  309. {
  310.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  311.         struct intel_encoder *encoder = &intel_dig_port->base;
  312.         struct drm_device *dev = encoder->base.dev;
  313.         struct drm_i915_private *dev_priv = dev->dev_private;
  314.         enum intel_display_power_domain power_domain;
  315.  
  316.         mutex_unlock(&dev_priv->pps_mutex);
  317.  
  318.         power_domain = intel_display_port_power_domain(encoder);
  319.         intel_display_power_put(dev_priv, power_domain);
  320. }
  321.  
  322. static void
  323. vlv_power_sequencer_kick(struct intel_dp *intel_dp)
  324. {
  325.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  326.         struct drm_device *dev = intel_dig_port->base.base.dev;
  327.         struct drm_i915_private *dev_priv = dev->dev_private;
  328.         enum pipe pipe = intel_dp->pps_pipe;
  329.         bool pll_enabled;
  330.         uint32_t DP;
  331.  
  332.         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
  333.                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
  334.                  pipe_name(pipe), port_name(intel_dig_port->port)))
  335.                 return;
  336.  
  337.         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
  338.                       pipe_name(pipe), port_name(intel_dig_port->port));
  339.  
  340.         /* Preserve the BIOS-computed detected bit. This is
  341.          * supposed to be read-only.
  342.          */
  343.         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  344.         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  345.         DP |= DP_PORT_WIDTH(1);
  346.         DP |= DP_LINK_TRAIN_PAT_1;
  347.  
  348.         if (IS_CHERRYVIEW(dev))
  349.                 DP |= DP_PIPE_SELECT_CHV(pipe);
  350.         else if (pipe == PIPE_B)
  351.                 DP |= DP_PIPEB_SELECT;
  352.  
  353.         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
  354.  
  355.         /*
  356.          * The DPLL for the pipe must be enabled for this to work.
  357.          * So enable temporarily it if it's not already enabled.
  358.          */
  359.         if (!pll_enabled)
  360.                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
  361.                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
  362.  
  363.         /*
  364.          * Similar magic as in intel_dp_enable_port().
  365.          * We _must_ do this port enable + disable trick
  366.          * to make this power seqeuencer lock onto the port.
  367.          * Otherwise even VDD force bit won't work.
  368.          */
  369.         I915_WRITE(intel_dp->output_reg, DP);
  370.         POSTING_READ(intel_dp->output_reg);
  371.  
  372.         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
  373.         POSTING_READ(intel_dp->output_reg);
  374.  
  375.         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
  376.         POSTING_READ(intel_dp->output_reg);
  377.  
  378.         if (!pll_enabled)
  379.                 vlv_force_pll_off(dev, pipe);
  380. }
  381.  
  382. static enum pipe
  383. vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
  384. {
  385.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  386.         struct drm_device *dev = intel_dig_port->base.base.dev;
  387.         struct drm_i915_private *dev_priv = dev->dev_private;
  388.         struct intel_encoder *encoder;
  389.         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
  390.         enum pipe pipe;
  391.  
  392.         lockdep_assert_held(&dev_priv->pps_mutex);
  393.  
  394.         /* We should never land here with regular DP ports */
  395.         WARN_ON(!is_edp(intel_dp));
  396.  
  397.         if (intel_dp->pps_pipe != INVALID_PIPE)
  398.                 return intel_dp->pps_pipe;
  399.  
  400.         /*
  401.          * We don't have power sequencer currently.
  402.          * Pick one that's not used by other ports.
  403.          */
  404.         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
  405.                             base.head) {
  406.                 struct intel_dp *tmp;
  407.  
  408.                 if (encoder->type != INTEL_OUTPUT_EDP)
  409.                         continue;
  410.  
  411.                 tmp = enc_to_intel_dp(&encoder->base);
  412.  
  413.                 if (tmp->pps_pipe != INVALID_PIPE)
  414.                         pipes &= ~(1 << tmp->pps_pipe);
  415.         }
  416.  
  417.         /*
  418.          * Didn't find one. This should not happen since there
  419.          * are two power sequencers and up to two eDP ports.
  420.          */
  421.         if (WARN_ON(pipes == 0))
  422.                 pipe = PIPE_A;
  423.         else
  424.                 pipe = ffs(pipes) - 1;
  425.  
  426.         vlv_steal_power_sequencer(dev, pipe);
  427.         intel_dp->pps_pipe = pipe;
  428.  
  429.         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
  430.                       pipe_name(intel_dp->pps_pipe),
  431.                       port_name(intel_dig_port->port));
  432.  
  433.         /* init power sequencer on this pipe and port */
  434.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  435.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  436.  
  437.         /*
  438.          * Even vdd force doesn't work until we've made
  439.          * the power sequencer lock in on the port.
  440.          */
  441.         vlv_power_sequencer_kick(intel_dp);
  442.  
  443.         return intel_dp->pps_pipe;
  444. }
  445.  
  446. typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
  447.                                enum pipe pipe);
  448.  
  449. static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
  450.                                enum pipe pipe)
  451. {
  452.         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
  453. }
  454.  
  455. static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
  456.                                 enum pipe pipe)
  457. {
  458.         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
  459. }
  460.  
  461. static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
  462.                          enum pipe pipe)
  463. {
  464.         return true;
  465. }
  466.  
  467. static enum pipe
  468. vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
  469.                      enum port port,
  470.                      vlv_pipe_check pipe_check)
  471. {
  472.         enum pipe pipe;
  473.  
  474.         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
  475.                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
  476.                         PANEL_PORT_SELECT_MASK;
  477.  
  478.                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
  479.                         continue;
  480.  
  481.                 if (!pipe_check(dev_priv, pipe))
  482.                         continue;
  483.  
  484.                         return pipe;
  485.         }
  486.  
  487.         return INVALID_PIPE;
  488. }
  489.  
  490. static void
  491. vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
  492. {
  493.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  494.         struct drm_device *dev = intel_dig_port->base.base.dev;
  495.         struct drm_i915_private *dev_priv = dev->dev_private;
  496.         enum port port = intel_dig_port->port;
  497.  
  498.         lockdep_assert_held(&dev_priv->pps_mutex);
  499.  
  500.         /* try to find a pipe with this port selected */
  501.         /* first pick one where the panel is on */
  502.         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  503.                                                   vlv_pipe_has_pp_on);
  504.         /* didn't find one? pick one where vdd is on */
  505.         if (intel_dp->pps_pipe == INVALID_PIPE)
  506.                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  507.                                                           vlv_pipe_has_vdd_on);
  508.         /* didn't find one? pick one with just the correct port */
  509.         if (intel_dp->pps_pipe == INVALID_PIPE)
  510.                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
  511.                                                           vlv_pipe_any);
  512.  
  513.         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
  514.         if (intel_dp->pps_pipe == INVALID_PIPE) {
  515.                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
  516.                               port_name(port));
  517.                 return;
  518.         }
  519.  
  520.         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
  521.                       port_name(port), pipe_name(intel_dp->pps_pipe));
  522.  
  523.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  524.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  525. }
  526.  
  527. void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
  528. {
  529.         struct drm_device *dev = dev_priv->dev;
  530.         struct intel_encoder *encoder;
  531.  
  532.         if (WARN_ON(!IS_VALLEYVIEW(dev)))
  533.                 return;
  534.  
  535.         /*
  536.          * We can't grab pps_mutex here due to deadlock with power_domain
  537.          * mutex when power_domain functions are called while holding pps_mutex.
  538.          * That also means that in order to use pps_pipe the code needs to
  539.          * hold both a power domain reference and pps_mutex, and the power domain
  540.          * reference get/put must be done while _not_ holding pps_mutex.
  541.          * pps_{lock,unlock}() do these steps in the correct order, so one
  542.          * should use them always.
  543.          */
  544.  
  545.         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
  546.                 struct intel_dp *intel_dp;
  547.  
  548.                 if (encoder->type != INTEL_OUTPUT_EDP)
  549.                         continue;
  550.  
  551.                 intel_dp = enc_to_intel_dp(&encoder->base);
  552.                 intel_dp->pps_pipe = INVALID_PIPE;
  553.         }
  554. }
  555.  
  556. static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
  557. {
  558.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  559.  
  560.         if (HAS_PCH_SPLIT(dev))
  561.                 return PCH_PP_CONTROL;
  562.         else
  563.                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
  564. }
  565.  
  566. static u32 _pp_stat_reg(struct intel_dp *intel_dp)
  567. {
  568.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  569.  
  570.         if (HAS_PCH_SPLIT(dev))
  571.                 return PCH_PP_STATUS;
  572.         else
  573.                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
  574. }
  575.  
  576. #if 0
  577. /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
  578.    This function only applicable when panel PM state is not to be tracked */
  579. static int edp_notify_handler(struct notifier_block *this, unsigned long code,
  580.                               void *unused)
  581. {
  582.         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
  583.                                                  edp_notifier);
  584.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  585.         struct drm_i915_private *dev_priv = dev->dev_private;
  586.         u32 pp_div;
  587.         u32 pp_ctrl_reg, pp_div_reg;
  588.  
  589.         if (!is_edp(intel_dp) || code != SYS_RESTART)
  590.                 return 0;
  591.  
  592.         pps_lock(intel_dp);
  593.  
  594.         if (IS_VALLEYVIEW(dev)) {
  595.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  596.  
  597.                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
  598.                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
  599.                 pp_div = I915_READ(pp_div_reg);
  600.                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
  601.  
  602.                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
  603.                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
  604.                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
  605.                 msleep(intel_dp->panel_power_cycle_delay);
  606.         }
  607.  
  608.         pps_unlock(intel_dp);
  609.  
  610.         return 0;
  611. }
  612. #endif
  613.  
  614. static bool edp_have_panel_power(struct intel_dp *intel_dp)
  615. {
  616.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  617.         struct drm_i915_private *dev_priv = dev->dev_private;
  618.  
  619.         lockdep_assert_held(&dev_priv->pps_mutex);
  620.  
  621.         if (IS_VALLEYVIEW(dev) &&
  622.             intel_dp->pps_pipe == INVALID_PIPE)
  623.                 return false;
  624.  
  625.         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
  626. }
  627.  
  628. static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  629. {
  630.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  631.         struct drm_i915_private *dev_priv = dev->dev_private;
  632.  
  633.         lockdep_assert_held(&dev_priv->pps_mutex);
  634.  
  635.         if (IS_VALLEYVIEW(dev) &&
  636.             intel_dp->pps_pipe == INVALID_PIPE)
  637.                 return false;
  638.  
  639.         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
  640. }
  641.  
  642. static void
  643. intel_dp_check_edp(struct intel_dp *intel_dp)
  644. {
  645.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  646.         struct drm_i915_private *dev_priv = dev->dev_private;
  647.  
  648.         if (!is_edp(intel_dp))
  649.                 return;
  650.  
  651.         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
  652.                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
  653.                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
  654.                               I915_READ(_pp_stat_reg(intel_dp)),
  655.                               I915_READ(_pp_ctrl_reg(intel_dp)));
  656.         }
  657. }
  658.  
  659. static uint32_t
  660. intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
  661. {
  662.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  663.         struct drm_device *dev = intel_dig_port->base.base.dev;
  664.         struct drm_i915_private *dev_priv = dev->dev_private;
  665.         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  666.         uint32_t status;
  667.         bool done;
  668.  
  669. #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  670.         if (has_aux_irq)
  671.                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
  672.                                           msecs_to_jiffies_timeout(10));
  673.         else
  674.                 done = wait_for_atomic(C, 10) == 0;
  675.         if (!done)
  676.                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
  677.                           has_aux_irq);
  678. #undef C
  679.  
  680.         return status;
  681. }
  682.  
  683. static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  684. {
  685.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  686.         struct drm_device *dev = intel_dig_port->base.base.dev;
  687.  
  688.         /*
  689.          * The clock divider is based off the hrawclk, and would like to run at
  690.          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
  691.          */
  692.         return index ? 0 : intel_hrawclk(dev) / 2;
  693. }
  694.  
  695. static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  696. {
  697.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  698.         struct drm_device *dev = intel_dig_port->base.base.dev;
  699.  
  700.         if (index)
  701.                 return 0;
  702.  
  703.         if (intel_dig_port->port == PORT_A) {
  704.                 if (IS_GEN6(dev) || IS_GEN7(dev))
  705.                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
  706.                 else
  707.                         return 225; /* eDP input clock at 450Mhz */
  708.         } else {
  709.                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  710.         }
  711. }
  712.  
  713. static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  714. {
  715.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  716.         struct drm_device *dev = intel_dig_port->base.base.dev;
  717.         struct drm_i915_private *dev_priv = dev->dev_private;
  718.  
  719.         if (intel_dig_port->port == PORT_A) {
  720.                 if (index)
  721.                         return 0;
  722.                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
  723.         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  724.                 /* Workaround for non-ULT HSW */
  725.                 switch (index) {
  726.                 case 0: return 63;
  727.                 case 1: return 72;
  728.                 default: return 0;
  729.                 }
  730.         } else  {
  731.                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  732.         }
  733. }
  734.  
  735. static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  736. {
  737.         return index ? 0 : 100;
  738. }
  739.  
  740. static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  741. {
  742.         /*
  743.          * SKL doesn't need us to program the AUX clock divider (Hardware will
  744.          * derive the clock from CDCLK automatically). We still implement the
  745.          * get_aux_clock_divider vfunc to plug-in into the existing code.
  746.          */
  747.         return index ? 0 : 1;
  748. }
  749.  
  750. static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
  751.                                       bool has_aux_irq,
  752.                                       int send_bytes,
  753.                                       uint32_t aux_clock_divider)
  754. {
  755.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  756.         struct drm_device *dev = intel_dig_port->base.base.dev;
  757.         uint32_t precharge, timeout;
  758.  
  759.         if (IS_GEN6(dev))
  760.                 precharge = 3;
  761.         else
  762.                 precharge = 5;
  763.  
  764.         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
  765.                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
  766.         else
  767.                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
  768.  
  769.         return DP_AUX_CH_CTL_SEND_BUSY |
  770.                DP_AUX_CH_CTL_DONE |
  771.                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  772.                DP_AUX_CH_CTL_TIME_OUT_ERROR |
  773.                timeout |
  774.                DP_AUX_CH_CTL_RECEIVE_ERROR |
  775.                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  776.                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  777.                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
  778. }
  779.  
  780. static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
  781.                                       bool has_aux_irq,
  782.                                       int send_bytes,
  783.                                       uint32_t unused)
  784. {
  785.         return DP_AUX_CH_CTL_SEND_BUSY |
  786.                DP_AUX_CH_CTL_DONE |
  787.                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  788.                DP_AUX_CH_CTL_TIME_OUT_ERROR |
  789.                DP_AUX_CH_CTL_TIME_OUT_1600us |
  790.                DP_AUX_CH_CTL_RECEIVE_ERROR |
  791.                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  792.                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
  793. }
  794.  
  795. static int
  796. intel_dp_aux_ch(struct intel_dp *intel_dp,
  797.                 const uint8_t *send, int send_bytes,
  798.                 uint8_t *recv, int recv_size)
  799. {
  800.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  801.         struct drm_device *dev = intel_dig_port->base.base.dev;
  802.         struct drm_i915_private *dev_priv = dev->dev_private;
  803.         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  804.         uint32_t ch_data = ch_ctl + 4;
  805.         uint32_t aux_clock_divider;
  806.         int i, ret, recv_bytes;
  807.         uint32_t status;
  808.         int try, clock = 0;
  809.         bool has_aux_irq = HAS_AUX_IRQ(dev);
  810.         bool vdd;
  811.  
  812.         pps_lock(intel_dp);
  813.  
  814.         /*
  815.          * We will be called with VDD already enabled for dpcd/edid/oui reads.
  816.          * In such cases we want to leave VDD enabled and it's up to upper layers
  817.          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
  818.          * ourselves.
  819.          */
  820.         vdd = edp_panel_vdd_on(intel_dp);
  821.  
  822.         /* dp aux is extremely sensitive to irq latency, hence request the
  823.          * lowest possible wakeup latency and so prevent the cpu from going into
  824.          * deep sleep states.
  825.          */
  826.  
  827.         intel_dp_check_edp(intel_dp);
  828.  
  829.         intel_aux_display_runtime_get(dev_priv);
  830.  
  831.         /* Try to wait for any previous AUX channel activity */
  832.         for (try = 0; try < 3; try++) {
  833.                 status = I915_READ_NOTRACE(ch_ctl);
  834.                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  835.                         break;
  836.                 msleep(1);
  837.         }
  838.  
  839.         if (try == 3) {
  840.                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
  841.                      I915_READ(ch_ctl));
  842.                 ret = -EBUSY;
  843.                 goto out;
  844.         }
  845.  
  846.         /* Only 5 data registers! */
  847.         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
  848.                 ret = -E2BIG;
  849.                 goto out;
  850.         }
  851.  
  852.         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
  853.                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
  854.                                                           has_aux_irq,
  855.                                                           send_bytes,
  856.                                                           aux_clock_divider);
  857.  
  858.         /* Must try at least 3 times according to DP spec */
  859.         for (try = 0; try < 5; try++) {
  860.                 /* Load the send data into the aux channel data registers */
  861.                 for (i = 0; i < send_bytes; i += 4)
  862.                         I915_WRITE(ch_data + i,
  863.                                            intel_dp_pack_aux(send + i,
  864.                                                              send_bytes - i));
  865.  
  866.                 /* Send the command and wait for it to complete */
  867.                         I915_WRITE(ch_ctl, send_ctl);
  868.  
  869.                 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  870.  
  871.                 /* Clear done status and any errors */
  872.                 I915_WRITE(ch_ctl,
  873.                            status |
  874.                            DP_AUX_CH_CTL_DONE |
  875.                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
  876.                            DP_AUX_CH_CTL_RECEIVE_ERROR);
  877.  
  878.                 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
  879.                               DP_AUX_CH_CTL_RECEIVE_ERROR))
  880.                         continue;
  881.                 if (status & DP_AUX_CH_CTL_DONE)
  882.                         break;
  883.         }
  884.                 if (status & DP_AUX_CH_CTL_DONE)
  885.                         break;
  886.         }
  887.  
  888.         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
  889.                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
  890.                 ret = -EBUSY;
  891.                 goto out;
  892.         }
  893.  
  894.         /* Check for timeout or receive error.
  895.          * Timeouts occur when the sink is not connected
  896.          */
  897.         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
  898.                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
  899.                 ret = -EIO;
  900.                 goto out;
  901.         }
  902.  
  903.         /* Timeouts occur when the device isn't connected, so they're
  904.          * "normal" -- don't fill the kernel log with these */
  905.         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
  906.                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
  907.                 ret = -ETIMEDOUT;
  908.                 goto out;
  909.         }
  910.  
  911.         /* Unload any bytes sent back from the other side */
  912.         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
  913.                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
  914.         if (recv_bytes > recv_size)
  915.                 recv_bytes = recv_size;
  916.  
  917.         for (i = 0; i < recv_bytes; i += 4)
  918.                 intel_dp_unpack_aux(I915_READ(ch_data + i),
  919.                            recv + i, recv_bytes - i);
  920.  
  921.         ret = recv_bytes;
  922. out:
  923. //      pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
  924.         intel_aux_display_runtime_put(dev_priv);
  925.  
  926.         if (vdd)
  927.                 edp_panel_vdd_off(intel_dp, false);
  928.  
  929.         pps_unlock(intel_dp);
  930.  
  931.         return ret;
  932. }
  933.  
  934. #define BARE_ADDRESS_SIZE       3
  935. #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
  936. static ssize_t
  937. intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
  938. {
  939.         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
  940.         uint8_t txbuf[20], rxbuf[20];
  941.         size_t txsize, rxsize;
  942.         int ret;
  943.  
  944.         txbuf[0] = msg->request << 4;
  945.         txbuf[1] = msg->address >> 8;
  946.         txbuf[2] = msg->address & 0xff;
  947.         txbuf[3] = msg->size - 1;
  948.  
  949.         switch (msg->request & ~DP_AUX_I2C_MOT) {
  950.         case DP_AUX_NATIVE_WRITE:
  951.         case DP_AUX_I2C_WRITE:
  952.                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
  953.                 rxsize = 1;
  954.  
  955.                 if (WARN_ON(txsize > 20))
  956.                 return -E2BIG;
  957.  
  958.                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
  959.  
  960.                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
  961.                 if (ret > 0) {
  962.                         msg->reply = rxbuf[0] >> 4;
  963.  
  964.                         /* Return payload size. */
  965.                         ret = msg->size;
  966.                 }
  967.                         break;
  968.  
  969.         case DP_AUX_NATIVE_READ:
  970.         case DP_AUX_I2C_READ:
  971.                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
  972.                 rxsize = msg->size + 1;
  973.  
  974.                 if (WARN_ON(rxsize > 20))
  975.                 return -E2BIG;
  976.  
  977.                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
  978.                 if (ret > 0) {
  979.                         msg->reply = rxbuf[0] >> 4;
  980.                         /*
  981.                          * Assume happy day, and copy the data. The caller is
  982.                          * expected to check msg->reply before touching it.
  983.                          *
  984.                          * Return payload size.
  985.                          */
  986.                         ret--;
  987.                         memcpy(msg->buffer, rxbuf + 1, ret);
  988.                 }
  989.                 break;
  990.  
  991.         default:
  992.                 ret = -EINVAL;
  993.                 break;
  994.         }
  995.  
  996.                         return ret;
  997. }
  998.  
  999. static void
  1000. intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
  1001. {
  1002.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1003.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1004.         enum port port = intel_dig_port->port;
  1005.         const char *name = NULL;
  1006.         int ret;
  1007.  
  1008.         switch (port) {
  1009.         case PORT_A:
  1010.                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
  1011.                 name = "DPDDC-A";
  1012.                 break;
  1013.         case PORT_B:
  1014.                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
  1015.                 name = "DPDDC-B";
  1016.                 break;
  1017.         case PORT_C:
  1018.                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
  1019.                 name = "DPDDC-C";
  1020.                 break;
  1021.         case PORT_D:
  1022.                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
  1023.                 name = "DPDDC-D";
  1024.                 break;
  1025.         default:
  1026.                 BUG();
  1027.         }
  1028.  
  1029.         /*
  1030.          * The AUX_CTL register is usually DP_CTL + 0x10.
  1031.          *
  1032.          * On Haswell and Broadwell though:
  1033.          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
  1034.          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
  1035.          *
  1036.          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
  1037.          */
  1038.         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
  1039.                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
  1040.  
  1041.         intel_dp->aux.name = name;
  1042.         intel_dp->aux.dev = dev->dev;
  1043.         intel_dp->aux.transfer = intel_dp_aux_transfer;
  1044.  
  1045.         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
  1046.                       connector->base.kdev->kobj.name);
  1047.  
  1048.         ret = drm_dp_aux_register(&intel_dp->aux);
  1049.                 if (ret < 0) {
  1050.                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
  1051.                           name, ret);
  1052.                 return;
  1053.         }
  1054. }
  1055.  
  1056. static void
  1057. intel_dp_connector_unregister(struct intel_connector *intel_connector)
  1058. {
  1059.         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
  1060.  
  1061.         intel_connector_unregister(intel_connector);
  1062. }
  1063.  
  1064. static void
  1065. skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
  1066. {
  1067.         u32 ctrl1;
  1068.  
  1069.         pipe_config->ddi_pll_sel = SKL_DPLL0;
  1070.         pipe_config->dpll_hw_state.cfgcr1 = 0;
  1071.         pipe_config->dpll_hw_state.cfgcr2 = 0;
  1072.  
  1073.         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  1074.         switch (link_bw) {
  1075.         case DP_LINK_BW_1_62:
  1076.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
  1077.                                               SKL_DPLL0);
  1078.                 break;
  1079.         case DP_LINK_BW_2_7:
  1080.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
  1081.                                               SKL_DPLL0);
  1082.                 break;
  1083.         case DP_LINK_BW_5_4:
  1084.                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
  1085.                                               SKL_DPLL0);
  1086.                 break;
  1087.         }
  1088.         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
  1089. }
  1090.  
  1091. static void
  1092. hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
  1093. {
  1094.         switch (link_bw) {
  1095.         case DP_LINK_BW_1_62:
  1096.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
  1097.                 break;
  1098.         case DP_LINK_BW_2_7:
  1099.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
  1100.                 break;
  1101.         case DP_LINK_BW_5_4:
  1102.                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
  1103.                 break;
  1104.         }
  1105. }
  1106.  
  1107. static void
  1108. intel_dp_set_clock(struct intel_encoder *encoder,
  1109.                    struct intel_crtc_config *pipe_config, int link_bw)
  1110. {
  1111.         struct drm_device *dev = encoder->base.dev;
  1112.         const struct dp_link_dpll *divisor = NULL;
  1113.         int i, count = 0;
  1114.  
  1115.         if (IS_G4X(dev)) {
  1116.                 divisor = gen4_dpll;
  1117.                 count = ARRAY_SIZE(gen4_dpll);
  1118.         } else if (HAS_PCH_SPLIT(dev)) {
  1119.                 divisor = pch_dpll;
  1120.                 count = ARRAY_SIZE(pch_dpll);
  1121.         } else if (IS_CHERRYVIEW(dev)) {
  1122.                 divisor = chv_dpll;
  1123.                 count = ARRAY_SIZE(chv_dpll);
  1124.         } else if (IS_VALLEYVIEW(dev)) {
  1125.                 divisor = vlv_dpll;
  1126.                 count = ARRAY_SIZE(vlv_dpll);
  1127.         }
  1128.  
  1129.         if (divisor && count) {
  1130.                 for (i = 0; i < count; i++) {
  1131.                         if (link_bw == divisor[i].link_bw) {
  1132.                                 pipe_config->dpll = divisor[i].dpll;
  1133.                                 pipe_config->clock_set = true;
  1134.                                 break;
  1135.                         }
  1136.                 }
  1137.         }
  1138. }
  1139.  
  1140. bool
  1141. intel_dp_compute_config(struct intel_encoder *encoder,
  1142.                         struct intel_crtc_config *pipe_config)
  1143. {
  1144.         struct drm_device *dev = encoder->base.dev;
  1145.         struct drm_i915_private *dev_priv = dev->dev_private;
  1146.         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
  1147.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1148.         enum port port = dp_to_dig_port(intel_dp)->port;
  1149.         struct intel_crtc *intel_crtc = encoder->new_crtc;
  1150.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  1151.         int lane_count, clock;
  1152.         int min_lane_count = 1;
  1153.         int max_lane_count = intel_dp_max_lane_count(intel_dp);
  1154.         /* Conveniently, the link BW constants become indices with a shift...*/
  1155.         int min_clock = 0;
  1156.         int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
  1157.         int bpp, mode_rate;
  1158.         static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
  1159.         int link_avail, link_clock;
  1160.  
  1161.         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
  1162.                 pipe_config->has_pch_encoder = true;
  1163.  
  1164.         pipe_config->has_dp_encoder = true;
  1165.         pipe_config->has_drrs = false;
  1166.         pipe_config->has_audio = intel_dp->has_audio;
  1167.  
  1168.         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
  1169.                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
  1170.                                        adjusted_mode);
  1171.                 if (!HAS_PCH_SPLIT(dev))
  1172.                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
  1173.                                                  intel_connector->panel.fitting_mode);
  1174.                 else
  1175.                         intel_pch_panel_fitting(intel_crtc, pipe_config,
  1176.                                                 intel_connector->panel.fitting_mode);
  1177.         }
  1178.  
  1179.         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
  1180.                 return false;
  1181.  
  1182.         DRM_DEBUG_KMS("DP link computation with max lane count %i "
  1183.                       "max bw %02x pixel clock %iKHz\n",
  1184.                       max_lane_count, bws[max_clock],
  1185.                       adjusted_mode->crtc_clock);
  1186.  
  1187.         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
  1188.          * bpc in between. */
  1189.         bpp = pipe_config->pipe_bpp;
  1190.         if (is_edp(intel_dp)) {
  1191.                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
  1192.                 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
  1193.                               dev_priv->vbt.edp_bpp);
  1194.                 bpp = dev_priv->vbt.edp_bpp;
  1195.         }
  1196.  
  1197.                 /*
  1198.                  * Use the maximum clock and number of lanes the eDP panel
  1199.                  * advertizes being capable of. The panels are generally
  1200.                  * designed to support only a single clock and lane
  1201.                  * configuration, and typically these values correspond to the
  1202.                  * native resolution of the panel.
  1203.                  */
  1204.                         min_lane_count = max_lane_count;
  1205.                 min_clock = max_clock;
  1206.         }
  1207.  
  1208.         for (; bpp >= 6*3; bpp -= 2*3) {
  1209.                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
  1210.                                                    bpp);
  1211.  
  1212.                 for (clock = min_clock; clock <= max_clock; clock++) {
  1213.                 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
  1214.                                 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
  1215.                                 link_avail = intel_dp_max_data_rate(link_clock,
  1216.                                                                     lane_count);
  1217.  
  1218.                                 if (mode_rate <= link_avail) {
  1219.                                         goto found;
  1220.                                 }
  1221.                         }
  1222.                 }
  1223.         }
  1224.  
  1225.                 return false;
  1226.  
  1227. found:
  1228.         if (intel_dp->color_range_auto) {
  1229.                 /*
  1230.                  * See:
  1231.                  * CEA-861-E - 5.1 Default Encoding Parameters
  1232.                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
  1233.                  */
  1234.                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
  1235.                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
  1236.                 else
  1237.                         intel_dp->color_range = 0;
  1238.         }
  1239.  
  1240.         if (intel_dp->color_range)
  1241.                 pipe_config->limited_color_range = true;
  1242.  
  1243.                                 intel_dp->link_bw = bws[clock];
  1244.                                 intel_dp->lane_count = lane_count;
  1245.         pipe_config->pipe_bpp = bpp;
  1246.         pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
  1247.  
  1248.         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
  1249.                                        intel_dp->link_bw, intel_dp->lane_count,
  1250.                       pipe_config->port_clock, bpp);
  1251.                                 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
  1252.                                               mode_rate, link_avail);
  1253.  
  1254.         intel_link_compute_m_n(bpp, lane_count,
  1255.                                adjusted_mode->crtc_clock,
  1256.                                pipe_config->port_clock,
  1257.                                &pipe_config->dp_m_n);
  1258.  
  1259.         if (intel_connector->panel.downclock_mode != NULL &&
  1260.                 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
  1261.                         pipe_config->has_drrs = true;
  1262.                         intel_link_compute_m_n(bpp, lane_count,
  1263.                                 intel_connector->panel.downclock_mode->clock,
  1264.                                 pipe_config->port_clock,
  1265.                                 &pipe_config->dp_m2_n2);
  1266.         }
  1267.  
  1268.         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
  1269.                 skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
  1270.         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  1271.                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
  1272.         else
  1273.         intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
  1274.  
  1275.         return true;
  1276. }
  1277.  
  1278. static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
  1279. {
  1280.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  1281.         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  1282.         struct drm_device *dev = crtc->base.dev;
  1283.         struct drm_i915_private *dev_priv = dev->dev_private;
  1284.         u32 dpa_ctl;
  1285.  
  1286.         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
  1287.         dpa_ctl = I915_READ(DP_A);
  1288.         dpa_ctl &= ~DP_PLL_FREQ_MASK;
  1289.  
  1290.         if (crtc->config.port_clock == 162000) {
  1291.                 /* For a long time we've carried around a ILK-DevA w/a for the
  1292.                  * 160MHz clock. If we're really unlucky, it's still required.
  1293.                  */
  1294.                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
  1295.                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
  1296.                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
  1297.         } else {
  1298.                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
  1299.                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
  1300.         }
  1301.  
  1302.         I915_WRITE(DP_A, dpa_ctl);
  1303.  
  1304.         POSTING_READ(DP_A);
  1305.         udelay(500);
  1306. }
  1307.  
  1308. static void intel_dp_prepare(struct intel_encoder *encoder)
  1309. {
  1310.         struct drm_device *dev = encoder->base.dev;
  1311.         struct drm_i915_private *dev_priv = dev->dev_private;
  1312.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1313.         enum port port = dp_to_dig_port(intel_dp)->port;
  1314.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  1315.         struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
  1316.  
  1317.         /*
  1318.          * There are four kinds of DP registers:
  1319.          *
  1320.          *      IBX PCH
  1321.          *      SNB CPU
  1322.          *      IVB CPU
  1323.          *      CPT PCH
  1324.          *
  1325.          * IBX PCH and CPU are the same for almost everything,
  1326.          * except that the CPU DP PLL is configured in this
  1327.          * register
  1328.          *
  1329.          * CPT PCH is quite different, having many bits moved
  1330.          * to the TRANS_DP_CTL register instead. That
  1331.          * configuration happens (oddly) in ironlake_pch_enable
  1332.          */
  1333.  
  1334.         /* Preserve the BIOS-computed detected bit. This is
  1335.          * supposed to be read-only.
  1336.          */
  1337.         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  1338.  
  1339.         /* Handle DP bits in common between all three register formats */
  1340.         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  1341.         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
  1342.  
  1343.         if (crtc->config.has_audio)
  1344.                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
  1345.  
  1346.         /* Split out the IBX/CPU vs CPT settings */
  1347.  
  1348.         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1349.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  1350.                         intel_dp->DP |= DP_SYNC_HS_HIGH;
  1351.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  1352.                         intel_dp->DP |= DP_SYNC_VS_HIGH;
  1353.                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  1354.  
  1355.                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  1356.                         intel_dp->DP |= DP_ENHANCED_FRAMING;
  1357.  
  1358.                 intel_dp->DP |= crtc->pipe << 29;
  1359.         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
  1360.                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
  1361.                 intel_dp->DP |= intel_dp->color_range;
  1362.  
  1363.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  1364.                         intel_dp->DP |= DP_SYNC_HS_HIGH;
  1365.                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  1366.                         intel_dp->DP |= DP_SYNC_VS_HIGH;
  1367.                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
  1368.  
  1369.                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  1370.                 intel_dp->DP |= DP_ENHANCED_FRAMING;
  1371.  
  1372.                 if (!IS_CHERRYVIEW(dev)) {
  1373.                 if (crtc->pipe == 1)
  1374.                 intel_dp->DP |= DP_PIPEB_SELECT;
  1375.         } else {
  1376.                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
  1377.                 }
  1378.         } else {
  1379.                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  1380.         }
  1381. }
  1382.  
  1383. #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
  1384. #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
  1385.  
  1386. #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
  1387. #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
  1388.  
  1389. #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
  1390. #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
  1391.  
  1392. static void wait_panel_status(struct intel_dp *intel_dp,
  1393.                                        u32 mask,
  1394.                                        u32 value)
  1395. {
  1396.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1397.         struct drm_i915_private *dev_priv = dev->dev_private;
  1398.         u32 pp_stat_reg, pp_ctrl_reg;
  1399.  
  1400.         lockdep_assert_held(&dev_priv->pps_mutex);
  1401.  
  1402.         pp_stat_reg = _pp_stat_reg(intel_dp);
  1403.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1404.  
  1405.         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
  1406.                       mask, value,
  1407.                         I915_READ(pp_stat_reg),
  1408.                         I915_READ(pp_ctrl_reg));
  1409.  
  1410.         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
  1411.                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
  1412.                                 I915_READ(pp_stat_reg),
  1413.                                 I915_READ(pp_ctrl_reg));
  1414.         }
  1415.  
  1416.         DRM_DEBUG_KMS("Wait complete\n");
  1417. }
  1418.  
  1419. static void wait_panel_on(struct intel_dp *intel_dp)
  1420. {
  1421.         DRM_DEBUG_KMS("Wait for panel power on\n");
  1422.         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
  1423. }
  1424.  
  1425. static void wait_panel_off(struct intel_dp *intel_dp)
  1426. {
  1427.         DRM_DEBUG_KMS("Wait for panel power off time\n");
  1428.         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
  1429. }
  1430.  
  1431. static void wait_panel_power_cycle(struct intel_dp *intel_dp)
  1432. {
  1433.         DRM_DEBUG_KMS("Wait for panel power cycle\n");
  1434.  
  1435.         /* When we disable the VDD override bit last we have to do the manual
  1436.          * wait. */
  1437.         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
  1438.                                        intel_dp->panel_power_cycle_delay);
  1439.  
  1440.         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
  1441. }
  1442.  
  1443. static void wait_backlight_on(struct intel_dp *intel_dp)
  1444. {
  1445.         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
  1446.                                        intel_dp->backlight_on_delay);
  1447. }
  1448.  
  1449. static void edp_wait_backlight_off(struct intel_dp *intel_dp)
  1450. {
  1451.         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
  1452.                                        intel_dp->backlight_off_delay);
  1453. }
  1454.  
  1455. /* Read the current pp_control value, unlocking the register if it
  1456.  * is locked
  1457.  */
  1458.  
  1459. static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
  1460. {
  1461.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1462.         struct drm_i915_private *dev_priv = dev->dev_private;
  1463.         u32 control;
  1464.  
  1465.         lockdep_assert_held(&dev_priv->pps_mutex);
  1466.  
  1467.         control = I915_READ(_pp_ctrl_reg(intel_dp));
  1468.         control &= ~PANEL_UNLOCK_MASK;
  1469.         control |= PANEL_UNLOCK_REGS;
  1470.         return control;
  1471. }
  1472.  
  1473. /*
  1474.  * Must be paired with edp_panel_vdd_off().
  1475.  * Must hold pps_mutex around the whole on/off sequence.
  1476.  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
  1477.  */
  1478. static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
  1479. {
  1480.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1481.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1482.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1483.         struct drm_i915_private *dev_priv = dev->dev_private;
  1484.         enum intel_display_power_domain power_domain;
  1485.         u32 pp;
  1486.         u32 pp_stat_reg, pp_ctrl_reg;
  1487.         bool need_to_disable = !intel_dp->want_panel_vdd;
  1488.  
  1489.         lockdep_assert_held(&dev_priv->pps_mutex);
  1490.  
  1491.         if (!is_edp(intel_dp))
  1492.                 return false;
  1493.  
  1494.         intel_dp->want_panel_vdd = true;
  1495.  
  1496.         if (edp_have_panel_vdd(intel_dp))
  1497.                 return need_to_disable;
  1498.  
  1499.         power_domain = intel_display_port_power_domain(intel_encoder);
  1500.         intel_display_power_get(dev_priv, power_domain);
  1501.  
  1502.         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
  1503.                       port_name(intel_dig_port->port));
  1504.  
  1505.         if (!edp_have_panel_power(intel_dp))
  1506.                 wait_panel_power_cycle(intel_dp);
  1507.  
  1508.         pp = ironlake_get_pp_control(intel_dp);
  1509.         pp |= EDP_FORCE_VDD;
  1510.  
  1511.         pp_stat_reg = _pp_stat_reg(intel_dp);
  1512.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1513.  
  1514.         I915_WRITE(pp_ctrl_reg, pp);
  1515.         POSTING_READ(pp_ctrl_reg);
  1516.         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1517.                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1518.         /*
  1519.          * If the panel wasn't on, delay before accessing aux channel
  1520.          */
  1521.         if (!edp_have_panel_power(intel_dp)) {
  1522.                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
  1523.                               port_name(intel_dig_port->port));
  1524.                 msleep(intel_dp->panel_power_up_delay);
  1525.         }
  1526.  
  1527.         return need_to_disable;
  1528. }
  1529.  
  1530. /*
  1531.  * Must be paired with intel_edp_panel_vdd_off() or
  1532.  * intel_edp_panel_off().
  1533.  * Nested calls to these functions are not allowed since
  1534.  * we drop the lock. Caller must use some higher level
  1535.  * locking to prevent nested calls from other threads.
  1536.  */
  1537. void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
  1538. {
  1539.         bool vdd;
  1540.  
  1541.         if (!is_edp(intel_dp))
  1542.                 return;
  1543.  
  1544.         pps_lock(intel_dp);
  1545.         vdd = edp_panel_vdd_on(intel_dp);
  1546.         pps_unlock(intel_dp);
  1547.  
  1548.         WARN(!vdd, "eDP port %c VDD already requested on\n",
  1549.              port_name(dp_to_dig_port(intel_dp)->port));
  1550. }
  1551.  
  1552. static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  1553. {
  1554.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1555.         struct drm_i915_private *dev_priv = dev->dev_private;
  1556.         struct intel_digital_port *intel_dig_port =
  1557.                 dp_to_dig_port(intel_dp);
  1558.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1559.         enum intel_display_power_domain power_domain;
  1560.         u32 pp;
  1561.         u32 pp_stat_reg, pp_ctrl_reg;
  1562.  
  1563.         lockdep_assert_held(&dev_priv->pps_mutex);
  1564.  
  1565.         WARN_ON(intel_dp->want_panel_vdd);
  1566.  
  1567.         if (!edp_have_panel_vdd(intel_dp))
  1568.                 return;
  1569.  
  1570.         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
  1571.                       port_name(intel_dig_port->port));
  1572.  
  1573.                 pp = ironlake_get_pp_control(intel_dp);
  1574.         pp &= ~EDP_FORCE_VDD;
  1575.  
  1576.                 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1577.                 pp_stat_reg = _pp_stat_reg(intel_dp);
  1578.  
  1579.                 I915_WRITE(pp_ctrl_reg, pp);
  1580.                 POSTING_READ(pp_ctrl_reg);
  1581.  
  1582.         /* Make sure sequencer is idle before allowing subsequent activity */
  1583.                 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1584.                 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1585.  
  1586.                 if ((pp & POWER_TARGET_ON) == 0)
  1587.                         intel_dp->last_power_cycle = jiffies;
  1588.  
  1589.                 power_domain = intel_display_port_power_domain(intel_encoder);
  1590.                 intel_display_power_put(dev_priv, power_domain);
  1591. }
  1592.  
  1593. static void edp_panel_vdd_work(struct work_struct *__work)
  1594. {
  1595.         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
  1596.                                                  struct intel_dp, panel_vdd_work);
  1597.  
  1598.         pps_lock(intel_dp);
  1599.         if (!intel_dp->want_panel_vdd)
  1600.         edp_panel_vdd_off_sync(intel_dp);
  1601.         pps_unlock(intel_dp);
  1602. }
  1603.  
  1604. static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
  1605. {
  1606.         unsigned long delay;
  1607.  
  1608.         /*
  1609.          * Queue the timer to fire a long time from now (relative to the power
  1610.          * down delay) to keep the panel power up across a sequence of
  1611.          * operations.
  1612.          */
  1613.         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
  1614.         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
  1615. }
  1616.  
  1617. /*
  1618.  * Must be paired with edp_panel_vdd_on().
  1619.  * Must hold pps_mutex around the whole on/off sequence.
  1620.  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
  1621.  */
  1622. static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  1623. {
  1624.         struct drm_i915_private *dev_priv =
  1625.                 intel_dp_to_dev(intel_dp)->dev_private;
  1626.  
  1627.         lockdep_assert_held(&dev_priv->pps_mutex);
  1628.  
  1629.         if (!is_edp(intel_dp))
  1630.                 return;
  1631.  
  1632.         WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
  1633.              port_name(dp_to_dig_port(intel_dp)->port));
  1634.  
  1635.         intel_dp->want_panel_vdd = false;
  1636.  
  1637.         if (sync)
  1638.                 edp_panel_vdd_off_sync(intel_dp);
  1639.         else
  1640.                 edp_panel_vdd_schedule_off(intel_dp);
  1641. }
  1642.  
  1643. static void edp_panel_on(struct intel_dp *intel_dp)
  1644. {
  1645.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1646.         struct drm_i915_private *dev_priv = dev->dev_private;
  1647.         u32 pp;
  1648.         u32 pp_ctrl_reg;
  1649.  
  1650.         lockdep_assert_held(&dev_priv->pps_mutex);
  1651.  
  1652.         if (!is_edp(intel_dp))
  1653.                 return;
  1654.  
  1655.         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
  1656.                       port_name(dp_to_dig_port(intel_dp)->port));
  1657.  
  1658.         if (WARN(edp_have_panel_power(intel_dp),
  1659.                  "eDP port %c panel power already on\n",
  1660.                  port_name(dp_to_dig_port(intel_dp)->port)))
  1661.                 return;
  1662.  
  1663.         wait_panel_power_cycle(intel_dp);
  1664.  
  1665.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1666.         pp = ironlake_get_pp_control(intel_dp);
  1667.         if (IS_GEN5(dev)) {
  1668.         /* ILK workaround: disable reset around power sequence */
  1669.         pp &= ~PANEL_POWER_RESET;
  1670.                 I915_WRITE(pp_ctrl_reg, pp);
  1671.                 POSTING_READ(pp_ctrl_reg);
  1672.         }
  1673.  
  1674.         pp |= POWER_TARGET_ON;
  1675.         if (!IS_GEN5(dev))
  1676.                 pp |= PANEL_POWER_RESET;
  1677.  
  1678.         I915_WRITE(pp_ctrl_reg, pp);
  1679.         POSTING_READ(pp_ctrl_reg);
  1680.  
  1681.         wait_panel_on(intel_dp);
  1682.         intel_dp->last_power_on = jiffies;
  1683.  
  1684.         if (IS_GEN5(dev)) {
  1685.         pp |= PANEL_POWER_RESET; /* restore panel reset bit */
  1686.                 I915_WRITE(pp_ctrl_reg, pp);
  1687.                 POSTING_READ(pp_ctrl_reg);
  1688.         }
  1689. }
  1690.  
  1691. void intel_edp_panel_on(struct intel_dp *intel_dp)
  1692. {
  1693.         if (!is_edp(intel_dp))
  1694.                 return;
  1695.  
  1696.         pps_lock(intel_dp);
  1697.         edp_panel_on(intel_dp);
  1698.         pps_unlock(intel_dp);
  1699. }
  1700.  
  1701.  
  1702. static void edp_panel_off(struct intel_dp *intel_dp)
  1703. {
  1704.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1705.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  1706.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1707.         struct drm_i915_private *dev_priv = dev->dev_private;
  1708.         enum intel_display_power_domain power_domain;
  1709.         u32 pp;
  1710.         u32 pp_ctrl_reg;
  1711.  
  1712.         lockdep_assert_held(&dev_priv->pps_mutex);
  1713.  
  1714.         if (!is_edp(intel_dp))
  1715.                 return;
  1716.  
  1717.         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
  1718.                       port_name(dp_to_dig_port(intel_dp)->port));
  1719.  
  1720.         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
  1721.              port_name(dp_to_dig_port(intel_dp)->port));
  1722.  
  1723.         pp = ironlake_get_pp_control(intel_dp);
  1724.         /* We need to switch off panel power _and_ force vdd, for otherwise some
  1725.          * panels get very unhappy and cease to work. */
  1726.         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
  1727.                 EDP_BLC_ENABLE);
  1728.  
  1729.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1730.  
  1731.         intel_dp->want_panel_vdd = false;
  1732.  
  1733.         I915_WRITE(pp_ctrl_reg, pp);
  1734.         POSTING_READ(pp_ctrl_reg);
  1735.  
  1736.         intel_dp->last_power_cycle = jiffies;
  1737.         wait_panel_off(intel_dp);
  1738.  
  1739.         /* We got a reference when we enabled the VDD. */
  1740.         power_domain = intel_display_port_power_domain(intel_encoder);
  1741.         intel_display_power_put(dev_priv, power_domain);
  1742. }
  1743.  
  1744. void intel_edp_panel_off(struct intel_dp *intel_dp)
  1745. {
  1746.         if (!is_edp(intel_dp))
  1747.                 return;
  1748.  
  1749.         pps_lock(intel_dp);
  1750.         edp_panel_off(intel_dp);
  1751.         pps_unlock(intel_dp);
  1752. }
  1753.  
  1754. /* Enable backlight in the panel power control. */
  1755. static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
  1756. {
  1757.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1758.         struct drm_device *dev = intel_dig_port->base.base.dev;
  1759.         struct drm_i915_private *dev_priv = dev->dev_private;
  1760.         u32 pp;
  1761.         u32 pp_ctrl_reg;
  1762.  
  1763.         /*
  1764.          * If we enable the backlight right away following a panel power
  1765.          * on, we may see slight flicker as the panel syncs with the eDP
  1766.          * link.  So delay a bit to make sure the image is solid before
  1767.          * allowing it to appear.
  1768.          */
  1769.         wait_backlight_on(intel_dp);
  1770.  
  1771.         pps_lock(intel_dp);
  1772.  
  1773.         pp = ironlake_get_pp_control(intel_dp);
  1774.         pp |= EDP_BLC_ENABLE;
  1775.  
  1776.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1777.  
  1778.         I915_WRITE(pp_ctrl_reg, pp);
  1779.         POSTING_READ(pp_ctrl_reg);
  1780.  
  1781.         pps_unlock(intel_dp);
  1782. }
  1783.  
  1784. /* Enable backlight PWM and backlight PP control. */
  1785. void intel_edp_backlight_on(struct intel_dp *intel_dp)
  1786. {
  1787.         if (!is_edp(intel_dp))
  1788.                 return;
  1789.  
  1790.         DRM_DEBUG_KMS("\n");
  1791.  
  1792.         intel_panel_enable_backlight(intel_dp->attached_connector);
  1793.         _intel_edp_backlight_on(intel_dp);
  1794. }
  1795.  
  1796. /* Disable backlight in the panel power control. */
  1797. static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
  1798. {
  1799.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1800.         struct drm_i915_private *dev_priv = dev->dev_private;
  1801.         u32 pp;
  1802.         u32 pp_ctrl_reg;
  1803.  
  1804.         if (!is_edp(intel_dp))
  1805.                 return;
  1806.  
  1807.         pps_lock(intel_dp);
  1808.  
  1809.         pp = ironlake_get_pp_control(intel_dp);
  1810.         pp &= ~EDP_BLC_ENABLE;
  1811.  
  1812.         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  1813.  
  1814.         I915_WRITE(pp_ctrl_reg, pp);
  1815.         POSTING_READ(pp_ctrl_reg);
  1816.  
  1817.         pps_unlock(intel_dp);
  1818.  
  1819.         intel_dp->last_backlight_off = jiffies;
  1820.         edp_wait_backlight_off(intel_dp);
  1821. }
  1822.  
  1823. /* Disable backlight PP control and backlight PWM. */
  1824. void intel_edp_backlight_off(struct intel_dp *intel_dp)
  1825. {
  1826.         if (!is_edp(intel_dp))
  1827.                 return;
  1828.  
  1829.         DRM_DEBUG_KMS("\n");
  1830.  
  1831.         _intel_edp_backlight_off(intel_dp);
  1832.         intel_panel_disable_backlight(intel_dp->attached_connector);
  1833. }
  1834.  
  1835. /*
  1836.  * Hook for controlling the panel power control backlight through the bl_power
  1837.  * sysfs attribute. Take care to handle multiple calls.
  1838.  */
  1839. static void intel_edp_backlight_power(struct intel_connector *connector,
  1840.                                       bool enable)
  1841. {
  1842.         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
  1843.         bool is_enabled;
  1844.  
  1845.         pps_lock(intel_dp);
  1846.         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
  1847.         pps_unlock(intel_dp);
  1848.  
  1849.         if (is_enabled == enable)
  1850.                 return;
  1851.  
  1852.         DRM_DEBUG_KMS("panel power control backlight %s\n",
  1853.                       enable ? "enable" : "disable");
  1854.  
  1855.         if (enable)
  1856.                 _intel_edp_backlight_on(intel_dp);
  1857.         else
  1858.                 _intel_edp_backlight_off(intel_dp);
  1859. }
  1860.  
  1861. static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
  1862. {
  1863.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1864.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1865.         struct drm_device *dev = crtc->dev;
  1866.         struct drm_i915_private *dev_priv = dev->dev_private;
  1867.         u32 dpa_ctl;
  1868.  
  1869.         assert_pipe_disabled(dev_priv,
  1870.                              to_intel_crtc(crtc)->pipe);
  1871.  
  1872.         DRM_DEBUG_KMS("\n");
  1873.         dpa_ctl = I915_READ(DP_A);
  1874.         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
  1875.         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1876.  
  1877.         /* We don't adjust intel_dp->DP while tearing down the link, to
  1878.          * facilitate link retraining (e.g. after hotplug). Hence clear all
  1879.          * enable bits here to ensure that we don't enable too much. */
  1880.         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
  1881.         intel_dp->DP |= DP_PLL_ENABLE;
  1882.         I915_WRITE(DP_A, intel_dp->DP);
  1883.         POSTING_READ(DP_A);
  1884.         udelay(200);
  1885. }
  1886.  
  1887. static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
  1888. {
  1889.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1890.         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1891.         struct drm_device *dev = crtc->dev;
  1892.         struct drm_i915_private *dev_priv = dev->dev_private;
  1893.         u32 dpa_ctl;
  1894.  
  1895.         assert_pipe_disabled(dev_priv,
  1896.                              to_intel_crtc(crtc)->pipe);
  1897.  
  1898.         dpa_ctl = I915_READ(DP_A);
  1899.         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
  1900.              "dp pll off, should be on\n");
  1901.         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1902.  
  1903.         /* We can't rely on the value tracked for the DP register in
  1904.          * intel_dp->DP because link_down must not change that (otherwise link
  1905.          * re-training will fail. */
  1906.         dpa_ctl &= ~DP_PLL_ENABLE;
  1907.         I915_WRITE(DP_A, dpa_ctl);
  1908.         POSTING_READ(DP_A);
  1909.         udelay(200);
  1910. }
  1911.  
  1912. /* If the sink supports it, try to set the power state appropriately */
  1913. void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
  1914. {
  1915.         int ret, i;
  1916.  
  1917.         /* Should have a valid DPCD by this point */
  1918.         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
  1919.                 return;
  1920.  
  1921.         if (mode != DRM_MODE_DPMS_ON) {
  1922.                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  1923.                                                   DP_SET_POWER_D3);
  1924.         } else {
  1925.                 /*
  1926.                  * When turning on, we need to retry for 1ms to give the sink
  1927.                  * time to wake up.
  1928.                  */
  1929.                 for (i = 0; i < 3; i++) {
  1930.                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  1931.                                                           DP_SET_POWER_D0);
  1932.                         if (ret == 1)
  1933.                                 break;
  1934.                         msleep(1);
  1935.                 }
  1936.         }
  1937.  
  1938.         if (ret != 1)
  1939.                 DRM_DEBUG_KMS("failed to %s sink power state\n",
  1940.                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
  1941. }
  1942.  
  1943. static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
  1944.                                   enum pipe *pipe)
  1945. {
  1946.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1947.         enum port port = dp_to_dig_port(intel_dp)->port;
  1948.         struct drm_device *dev = encoder->base.dev;
  1949.         struct drm_i915_private *dev_priv = dev->dev_private;
  1950.         enum intel_display_power_domain power_domain;
  1951.         u32 tmp;
  1952.  
  1953.         power_domain = intel_display_port_power_domain(encoder);
  1954.         if (!intel_display_power_is_enabled(dev_priv, power_domain))
  1955.                 return false;
  1956.  
  1957.         tmp = I915_READ(intel_dp->output_reg);
  1958.  
  1959.         if (!(tmp & DP_PORT_EN))
  1960.                 return false;
  1961.  
  1962.         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1963.                 *pipe = PORT_TO_PIPE_CPT(tmp);
  1964.         } else if (IS_CHERRYVIEW(dev)) {
  1965.                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
  1966.         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
  1967.                 *pipe = PORT_TO_PIPE(tmp);
  1968.         } else {
  1969.                 u32 trans_sel;
  1970.                 u32 trans_dp;
  1971.                 int i;
  1972.  
  1973.                 switch (intel_dp->output_reg) {
  1974.                 case PCH_DP_B:
  1975.                         trans_sel = TRANS_DP_PORT_SEL_B;
  1976.                         break;
  1977.                 case PCH_DP_C:
  1978.                         trans_sel = TRANS_DP_PORT_SEL_C;
  1979.                         break;
  1980.                 case PCH_DP_D:
  1981.                         trans_sel = TRANS_DP_PORT_SEL_D;
  1982.                         break;
  1983.                 default:
  1984.                         return true;
  1985.                 }
  1986.  
  1987.                 for_each_pipe(dev_priv, i) {
  1988.                         trans_dp = I915_READ(TRANS_DP_CTL(i));
  1989.                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
  1990.                                 *pipe = i;
  1991.                                 return true;
  1992.                         }
  1993.                 }
  1994.  
  1995.                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
  1996.                               intel_dp->output_reg);
  1997.         }
  1998.  
  1999.         return true;
  2000. }
  2001.  
  2002. static void intel_dp_get_config(struct intel_encoder *encoder,
  2003.                                 struct intel_crtc_config *pipe_config)
  2004. {
  2005.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2006.         u32 tmp, flags = 0;
  2007.         struct drm_device *dev = encoder->base.dev;
  2008.         struct drm_i915_private *dev_priv = dev->dev_private;
  2009.         enum port port = dp_to_dig_port(intel_dp)->port;
  2010.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2011.         int dotclock;
  2012.  
  2013.         tmp = I915_READ(intel_dp->output_reg);
  2014.         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
  2015.                 pipe_config->has_audio = true;
  2016.  
  2017.         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
  2018.                 if (tmp & DP_SYNC_HS_HIGH)
  2019.                         flags |= DRM_MODE_FLAG_PHSYNC;
  2020.                 else
  2021.                         flags |= DRM_MODE_FLAG_NHSYNC;
  2022.  
  2023.                 if (tmp & DP_SYNC_VS_HIGH)
  2024.                         flags |= DRM_MODE_FLAG_PVSYNC;
  2025.                 else
  2026.                         flags |= DRM_MODE_FLAG_NVSYNC;
  2027.         } else {
  2028.                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
  2029.                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
  2030.                         flags |= DRM_MODE_FLAG_PHSYNC;
  2031.                 else
  2032.                         flags |= DRM_MODE_FLAG_NHSYNC;
  2033.  
  2034.                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
  2035.                         flags |= DRM_MODE_FLAG_PVSYNC;
  2036.                 else
  2037.                         flags |= DRM_MODE_FLAG_NVSYNC;
  2038.         }
  2039.  
  2040.         pipe_config->adjusted_mode.flags |= flags;
  2041.  
  2042.         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
  2043.             tmp & DP_COLOR_RANGE_16_235)
  2044.                 pipe_config->limited_color_range = true;
  2045.  
  2046.         pipe_config->has_dp_encoder = true;
  2047.  
  2048.         intel_dp_get_m_n(crtc, pipe_config);
  2049.  
  2050.         if (port == PORT_A) {
  2051.                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
  2052.                         pipe_config->port_clock = 162000;
  2053.                 else
  2054.                         pipe_config->port_clock = 270000;
  2055.         }
  2056.  
  2057.         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
  2058.                                             &pipe_config->dp_m_n);
  2059.  
  2060.         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
  2061.                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
  2062.  
  2063.         pipe_config->adjusted_mode.crtc_clock = dotclock;
  2064.  
  2065.         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
  2066.             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
  2067.                 /*
  2068.                  * This is a big fat ugly hack.
  2069.                  *
  2070.                  * Some machines in UEFI boot mode provide us a VBT that has 18
  2071.                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
  2072.                  * unknown we fail to light up. Yet the same BIOS boots up with
  2073.                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
  2074.                  * max, not what it tells us to use.
  2075.                  *
  2076.                  * Note: This will still be broken if the eDP panel is not lit
  2077.                  * up by the BIOS, and thus we can't get the mode at module
  2078.                  * load.
  2079.                  */
  2080.                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
  2081.                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
  2082.                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
  2083.         }
  2084. }
  2085.  
  2086. static void intel_disable_dp(struct intel_encoder *encoder)
  2087. {
  2088.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2089.         struct drm_device *dev = encoder->base.dev;
  2090.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2091.  
  2092.         if (crtc->config.has_audio)
  2093.                 intel_audio_codec_disable(encoder);
  2094.  
  2095.         /* Make sure the panel is off before trying to change the mode. But also
  2096.          * ensure that we have vdd while we switch off the panel. */
  2097.         intel_edp_panel_vdd_on(intel_dp);
  2098.         intel_edp_backlight_off(intel_dp);
  2099.         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
  2100.         intel_edp_panel_off(intel_dp);
  2101.  
  2102.         /* disable the port before the pipe on g4x */
  2103.         if (INTEL_INFO(dev)->gen < 5)
  2104.                 intel_dp_link_down(intel_dp);
  2105. }
  2106.  
  2107. static void ilk_post_disable_dp(struct intel_encoder *encoder)
  2108. {
  2109.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2110.         enum port port = dp_to_dig_port(intel_dp)->port;
  2111.  
  2112.         intel_dp_link_down(intel_dp);
  2113.         if (port == PORT_A)
  2114.         ironlake_edp_pll_off(intel_dp);
  2115. }
  2116.  
  2117. static void vlv_post_disable_dp(struct intel_encoder *encoder)
  2118. {
  2119.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2120.  
  2121.         intel_dp_link_down(intel_dp);
  2122. }
  2123.  
  2124. static void chv_post_disable_dp(struct intel_encoder *encoder)
  2125. {
  2126.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2127.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2128.         struct drm_device *dev = encoder->base.dev;
  2129.         struct drm_i915_private *dev_priv = dev->dev_private;
  2130.         struct intel_crtc *intel_crtc =
  2131.                 to_intel_crtc(encoder->base.crtc);
  2132.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2133.         enum pipe pipe = intel_crtc->pipe;
  2134.         u32 val;
  2135.  
  2136.                 intel_dp_link_down(intel_dp);
  2137.  
  2138.         mutex_lock(&dev_priv->dpio_lock);
  2139.  
  2140.         /* Propagate soft reset to data lane reset */
  2141.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  2142.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2143.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  2144.  
  2145.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  2146.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2147.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  2148.  
  2149.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  2150.         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2151.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  2152.  
  2153.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  2154.         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2155.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  2156.  
  2157.         mutex_unlock(&dev_priv->dpio_lock);
  2158. }
  2159.  
  2160. static void
  2161. _intel_dp_set_link_train(struct intel_dp *intel_dp,
  2162.                          uint32_t *DP,
  2163.                          uint8_t dp_train_pat)
  2164. {
  2165.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2166.         struct drm_device *dev = intel_dig_port->base.base.dev;
  2167.         struct drm_i915_private *dev_priv = dev->dev_private;
  2168.         enum port port = intel_dig_port->port;
  2169.  
  2170.         if (HAS_DDI(dev)) {
  2171.                 uint32_t temp = I915_READ(DP_TP_CTL(port));
  2172.  
  2173.                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
  2174.                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
  2175.                 else
  2176.                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
  2177.  
  2178.                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  2179.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2180.                 case DP_TRAINING_PATTERN_DISABLE:
  2181.                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
  2182.  
  2183.                         break;
  2184.                 case DP_TRAINING_PATTERN_1:
  2185.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
  2186.                         break;
  2187.                 case DP_TRAINING_PATTERN_2:
  2188.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
  2189.                         break;
  2190.                 case DP_TRAINING_PATTERN_3:
  2191.                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
  2192.                         break;
  2193.                 }
  2194.                 I915_WRITE(DP_TP_CTL(port), temp);
  2195.  
  2196.         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
  2197.                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
  2198.  
  2199.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2200.                 case DP_TRAINING_PATTERN_DISABLE:
  2201.                         *DP |= DP_LINK_TRAIN_OFF_CPT;
  2202.                         break;
  2203.                 case DP_TRAINING_PATTERN_1:
  2204.                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
  2205.                         break;
  2206.                 case DP_TRAINING_PATTERN_2:
  2207.                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
  2208.                         break;
  2209.                 case DP_TRAINING_PATTERN_3:
  2210.                         DRM_ERROR("DP training pattern 3 not supported\n");
  2211.                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
  2212.                         break;
  2213.                 }
  2214.  
  2215.         } else {
  2216.                 if (IS_CHERRYVIEW(dev))
  2217.                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
  2218.                 else
  2219.                         *DP &= ~DP_LINK_TRAIN_MASK;
  2220.  
  2221.                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  2222.                 case DP_TRAINING_PATTERN_DISABLE:
  2223.                         *DP |= DP_LINK_TRAIN_OFF;
  2224.                         break;
  2225.                 case DP_TRAINING_PATTERN_1:
  2226.                         *DP |= DP_LINK_TRAIN_PAT_1;
  2227.                         break;
  2228.                 case DP_TRAINING_PATTERN_2:
  2229.                         *DP |= DP_LINK_TRAIN_PAT_2;
  2230.                         break;
  2231.                 case DP_TRAINING_PATTERN_3:
  2232.                         if (IS_CHERRYVIEW(dev)) {
  2233.                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
  2234.                         } else {
  2235.                                 DRM_ERROR("DP training pattern 3 not supported\n");
  2236.                                 *DP |= DP_LINK_TRAIN_PAT_2;
  2237.                         }
  2238.                         break;
  2239.                 }
  2240.         }
  2241. }
  2242.  
  2243. static void intel_dp_enable_port(struct intel_dp *intel_dp)
  2244. {
  2245.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2246.         struct drm_i915_private *dev_priv = dev->dev_private;
  2247.  
  2248.         /* enable with pattern 1 (as per spec) */
  2249.         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
  2250.                                  DP_TRAINING_PATTERN_1);
  2251.  
  2252.         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
  2253.         POSTING_READ(intel_dp->output_reg);
  2254.  
  2255.         /*
  2256.          * Magic for VLV/CHV. We _must_ first set up the register
  2257.          * without actually enabling the port, and then do another
  2258.          * write to enable the port. Otherwise link training will
  2259.          * fail when the power sequencer is freshly used for this port.
  2260.          */
  2261.         intel_dp->DP |= DP_PORT_EN;
  2262.  
  2263.         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
  2264.         POSTING_READ(intel_dp->output_reg);
  2265. }
  2266.  
  2267. static void intel_enable_dp(struct intel_encoder *encoder)
  2268. {
  2269.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2270.         struct drm_device *dev = encoder->base.dev;
  2271.         struct drm_i915_private *dev_priv = dev->dev_private;
  2272.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2273.         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
  2274.  
  2275.         if (WARN_ON(dp_reg & DP_PORT_EN))
  2276.                 return;
  2277.  
  2278.         pps_lock(intel_dp);
  2279.  
  2280.         if (IS_VALLEYVIEW(dev))
  2281.                 vlv_init_panel_power_sequencer(intel_dp);
  2282.  
  2283.         intel_dp_enable_port(intel_dp);
  2284.  
  2285.         edp_panel_vdd_on(intel_dp);
  2286.         edp_panel_on(intel_dp);
  2287.         edp_panel_vdd_off(intel_dp, true);
  2288.  
  2289.         pps_unlock(intel_dp);
  2290.  
  2291.         if (IS_VALLEYVIEW(dev))
  2292.                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
  2293.  
  2294.         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
  2295.                         intel_dp_start_link_train(intel_dp);
  2296.                         intel_dp_complete_link_train(intel_dp);
  2297.         intel_dp_stop_link_train(intel_dp);
  2298.  
  2299.         if (crtc->config.has_audio) {
  2300.                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
  2301.                                  pipe_name(crtc->pipe));
  2302.                 intel_audio_codec_enable(encoder);
  2303.         }
  2304. }
  2305.  
  2306. static void g4x_enable_dp(struct intel_encoder *encoder)
  2307. {
  2308.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2309.  
  2310.         intel_enable_dp(encoder);
  2311.         intel_edp_backlight_on(intel_dp);
  2312. }
  2313.  
  2314. static void vlv_enable_dp(struct intel_encoder *encoder)
  2315. {
  2316.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2317.  
  2318.         intel_edp_backlight_on(intel_dp);
  2319. }
  2320.  
  2321. static void g4x_pre_enable_dp(struct intel_encoder *encoder)
  2322. {
  2323.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2324.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2325.  
  2326.         intel_dp_prepare(encoder);
  2327.  
  2328.         /* Only ilk+ has port A */
  2329.         if (dport->port == PORT_A) {
  2330.                 ironlake_set_pll_cpu_edp(intel_dp);
  2331.                 ironlake_edp_pll_on(intel_dp);
  2332.         }
  2333. }
  2334.  
  2335. static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
  2336. {
  2337.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2338.         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
  2339.         enum pipe pipe = intel_dp->pps_pipe;
  2340.         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  2341.  
  2342.         edp_panel_vdd_off_sync(intel_dp);
  2343.  
  2344.         /*
  2345.          * VLV seems to get confused when multiple power seqeuencers
  2346.          * have the same port selected (even if only one has power/vdd
  2347.          * enabled). The failure manifests as vlv_wait_port_ready() failing
  2348.          * CHV on the other hand doesn't seem to mind having the same port
  2349.          * selected in multiple power seqeuencers, but let's clear the
  2350.          * port select always when logically disconnecting a power sequencer
  2351.          * from a port.
  2352.          */
  2353.         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
  2354.                       pipe_name(pipe), port_name(intel_dig_port->port));
  2355.         I915_WRITE(pp_on_reg, 0);
  2356.         POSTING_READ(pp_on_reg);
  2357.  
  2358.         intel_dp->pps_pipe = INVALID_PIPE;
  2359. }
  2360.  
  2361. static void vlv_steal_power_sequencer(struct drm_device *dev,
  2362.                                       enum pipe pipe)
  2363. {
  2364.         struct drm_i915_private *dev_priv = dev->dev_private;
  2365.         struct intel_encoder *encoder;
  2366.  
  2367.         lockdep_assert_held(&dev_priv->pps_mutex);
  2368.  
  2369.         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
  2370.                 return;
  2371.  
  2372.         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
  2373.                             base.head) {
  2374.                 struct intel_dp *intel_dp;
  2375.                 enum port port;
  2376.  
  2377.                 if (encoder->type != INTEL_OUTPUT_EDP)
  2378.                         continue;
  2379.  
  2380.                 intel_dp = enc_to_intel_dp(&encoder->base);
  2381.                 port = dp_to_dig_port(intel_dp)->port;
  2382.  
  2383.                 if (intel_dp->pps_pipe != pipe)
  2384.                         continue;
  2385.  
  2386.                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
  2387.                               pipe_name(pipe), port_name(port));
  2388.  
  2389.                 WARN(encoder->connectors_active,
  2390.                      "stealing pipe %c power sequencer from active eDP port %c\n",
  2391.                      pipe_name(pipe), port_name(port));
  2392.  
  2393.                 /* make sure vdd is off before we steal it */
  2394.                 vlv_detach_power_sequencer(intel_dp);
  2395.         }
  2396. }
  2397.  
  2398. static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
  2399. {
  2400.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2401.         struct intel_encoder *encoder = &intel_dig_port->base;
  2402.         struct drm_device *dev = encoder->base.dev;
  2403.         struct drm_i915_private *dev_priv = dev->dev_private;
  2404.         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
  2405.  
  2406.         lockdep_assert_held(&dev_priv->pps_mutex);
  2407.  
  2408.         if (!is_edp(intel_dp))
  2409.                 return;
  2410.  
  2411.         if (intel_dp->pps_pipe == crtc->pipe)
  2412.                 return;
  2413.  
  2414.         /*
  2415.          * If another power sequencer was being used on this
  2416.          * port previously make sure to turn off vdd there while
  2417.          * we still have control of it.
  2418.          */
  2419.         if (intel_dp->pps_pipe != INVALID_PIPE)
  2420.                 vlv_detach_power_sequencer(intel_dp);
  2421.  
  2422.         /*
  2423.          * We may be stealing the power
  2424.          * sequencer from another port.
  2425.          */
  2426.         vlv_steal_power_sequencer(dev, crtc->pipe);
  2427.  
  2428.         /* now it's all ours */
  2429.         intel_dp->pps_pipe = crtc->pipe;
  2430.  
  2431.         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
  2432.                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
  2433.  
  2434.         /* init power sequencer on this pipe and port */
  2435.         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  2436.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  2437. }
  2438.  
  2439. static void vlv_pre_enable_dp(struct intel_encoder *encoder)
  2440. {
  2441.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2442.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2443.         struct drm_device *dev = encoder->base.dev;
  2444.         struct drm_i915_private *dev_priv = dev->dev_private;
  2445.         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
  2446.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2447.                 int pipe = intel_crtc->pipe;
  2448.                 u32 val;
  2449.  
  2450.         mutex_lock(&dev_priv->dpio_lock);
  2451.  
  2452.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
  2453.                 val = 0;
  2454.                 if (pipe)
  2455.                         val |= (1<<21);
  2456.                 else
  2457.                         val &= ~(1<<21);
  2458.                 val |= 0x001000c4;
  2459.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
  2460.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
  2461.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  2462.  
  2463.         mutex_unlock(&dev_priv->dpio_lock);
  2464.  
  2465.         intel_enable_dp(encoder);
  2466. }
  2467.  
  2468. static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
  2469. {
  2470.         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  2471.         struct drm_device *dev = encoder->base.dev;
  2472.         struct drm_i915_private *dev_priv = dev->dev_private;
  2473.         struct intel_crtc *intel_crtc =
  2474.                 to_intel_crtc(encoder->base.crtc);
  2475.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2476.         int pipe = intel_crtc->pipe;
  2477.  
  2478.         intel_dp_prepare(encoder);
  2479.  
  2480.         /* Program Tx lane resets to default */
  2481.         mutex_lock(&dev_priv->dpio_lock);
  2482.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
  2483.                          DPIO_PCS_TX_LANE2_RESET |
  2484.                          DPIO_PCS_TX_LANE1_RESET);
  2485.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
  2486.                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
  2487.                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
  2488.                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
  2489.                                  DPIO_PCS_CLK_SOFT_RESET);
  2490.  
  2491.         /* Fix up inter-pair skew failure */
  2492.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
  2493.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
  2494.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
  2495.         mutex_unlock(&dev_priv->dpio_lock);
  2496. }
  2497.  
  2498. static void chv_pre_enable_dp(struct intel_encoder *encoder)
  2499. {
  2500.         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  2501.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2502.         struct drm_device *dev = encoder->base.dev;
  2503.         struct drm_i915_private *dev_priv = dev->dev_private;
  2504.         struct intel_crtc *intel_crtc =
  2505.                 to_intel_crtc(encoder->base.crtc);
  2506.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2507.         int pipe = intel_crtc->pipe;
  2508.         int data, i;
  2509.         u32 val;
  2510.  
  2511.         mutex_lock(&dev_priv->dpio_lock);
  2512.  
  2513.         /* allow hardware to manage TX FIFO reset source */
  2514.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
  2515.         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  2516.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
  2517.  
  2518.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
  2519.         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
  2520.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
  2521.  
  2522.         /* Deassert soft data lane reset*/
  2523.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
  2524.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2525.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
  2526.  
  2527.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
  2528.         val |= CHV_PCS_REQ_SOFTRESET_EN;
  2529.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
  2530.  
  2531.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
  2532.         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2533.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
  2534.  
  2535.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
  2536.         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
  2537.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
  2538.  
  2539.         /* Program Tx lane latency optimal setting*/
  2540.         for (i = 0; i < 4; i++) {
  2541.                 /* Set the latency optimal bit */
  2542.                 data = (i == 1) ? 0x0 : 0x6;
  2543.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
  2544.                                 data << DPIO_FRC_LATENCY_SHFIT);
  2545.  
  2546.                 /* Set the upar bit */
  2547.                 data = (i == 1) ? 0x0 : 0x1;
  2548.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
  2549.                                 data << DPIO_UPAR_SHIFT);
  2550.         }
  2551.  
  2552.         /* Data lane stagger programming */
  2553.         /* FIXME: Fix up value only after power analysis */
  2554.  
  2555.         mutex_unlock(&dev_priv->dpio_lock);
  2556.  
  2557.         intel_enable_dp(encoder);
  2558. }
  2559.  
  2560. static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
  2561. {
  2562.         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
  2563.         struct drm_device *dev = encoder->base.dev;
  2564.         struct drm_i915_private *dev_priv = dev->dev_private;
  2565.         struct intel_crtc *intel_crtc =
  2566.                 to_intel_crtc(encoder->base.crtc);
  2567.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2568.         enum pipe pipe = intel_crtc->pipe;
  2569.         u32 val;
  2570.  
  2571.         intel_dp_prepare(encoder);
  2572.  
  2573.         mutex_lock(&dev_priv->dpio_lock);
  2574.  
  2575.         /* program left/right clock distribution */
  2576.         if (pipe != PIPE_B) {
  2577.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  2578.                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  2579.                 if (ch == DPIO_CH0)
  2580.                         val |= CHV_BUFLEFTENA1_FORCE;
  2581.                 if (ch == DPIO_CH1)
  2582.                         val |= CHV_BUFRIGHTENA1_FORCE;
  2583.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  2584.         } else {
  2585.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  2586.                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  2587.                 if (ch == DPIO_CH0)
  2588.                         val |= CHV_BUFLEFTENA2_FORCE;
  2589.                 if (ch == DPIO_CH1)
  2590.                         val |= CHV_BUFRIGHTENA2_FORCE;
  2591.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  2592.         }
  2593.  
  2594.         /* program clock channel usage */
  2595.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
  2596.         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  2597.         if (pipe != PIPE_B)
  2598.                 val &= ~CHV_PCS_USEDCLKCHANNEL;
  2599.         else
  2600.                 val |= CHV_PCS_USEDCLKCHANNEL;
  2601.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
  2602.  
  2603.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
  2604.         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
  2605.         if (pipe != PIPE_B)
  2606.                 val &= ~CHV_PCS_USEDCLKCHANNEL;
  2607.         else
  2608.                 val |= CHV_PCS_USEDCLKCHANNEL;
  2609.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
  2610.  
  2611.         /*
  2612.          * This a a bit weird since generally CL
  2613.          * matches the pipe, but here we need to
  2614.          * pick the CL based on the port.
  2615.          */
  2616.         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
  2617.         if (pipe != PIPE_B)
  2618.                 val &= ~CHV_CMN_USEDCLKCHANNEL;
  2619.         else
  2620.                 val |= CHV_CMN_USEDCLKCHANNEL;
  2621.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
  2622.  
  2623.         mutex_unlock(&dev_priv->dpio_lock);
  2624. }
  2625.  
  2626. /*
  2627.  * Native read with retry for link status and receiver capability reads for
  2628.  * cases where the sink may still be asleep.
  2629.  *
  2630.  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
  2631.  * supposed to retry 3 times per the spec.
  2632.  */
  2633. static ssize_t
  2634. intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
  2635.                         void *buffer, size_t size)
  2636. {
  2637.         ssize_t ret;
  2638.         int i;
  2639.  
  2640.         /*
  2641.          * Sometime we just get the same incorrect byte repeated
  2642.          * over the entire buffer. Doing just one throw away read
  2643.          * initially seems to "solve" it.
  2644.          */
  2645.         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
  2646.  
  2647.         for (i = 0; i < 3; i++) {
  2648.                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
  2649.                 if (ret == size)
  2650.                         return ret;
  2651.                 msleep(1);
  2652.         }
  2653.  
  2654.         return ret;
  2655. }
  2656.  
  2657. /*
  2658.  * Fetch AUX CH registers 0x202 - 0x207 which contain
  2659.  * link status information
  2660.  */
  2661. static bool
  2662. intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
  2663. {
  2664.         return intel_dp_dpcd_read_wake(&intel_dp->aux,
  2665.                                               DP_LANE0_1_STATUS,
  2666.                                               link_status,
  2667.                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
  2668. }
  2669.  
  2670. /* These are source-specific values. */
  2671. static uint8_t
  2672. intel_dp_voltage_max(struct intel_dp *intel_dp)
  2673. {
  2674.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2675.         enum port port = dp_to_dig_port(intel_dp)->port;
  2676.  
  2677.         if (INTEL_INFO(dev)->gen >= 9)
  2678.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2679.         else if (IS_VALLEYVIEW(dev))
  2680.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  2681.         else if (IS_GEN7(dev) && port == PORT_A)
  2682.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2683.         else if (HAS_PCH_CPT(dev) && port != PORT_A)
  2684.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
  2685.         else
  2686.                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
  2687. }
  2688.  
  2689. static uint8_t
  2690. intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
  2691. {
  2692.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2693.         enum port port = dp_to_dig_port(intel_dp)->port;
  2694.  
  2695.         if (INTEL_INFO(dev)->gen >= 9) {
  2696.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2697.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2698.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2699.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2700.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2701.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2702.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2703.                 default:
  2704.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2705.                 }
  2706.         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  2707.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2708.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2709.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2710.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2711.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2712.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2713.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2714.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2715.                 default:
  2716.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2717.                 }
  2718.         } else if (IS_VALLEYVIEW(dev)) {
  2719.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2720.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2721.                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
  2722.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2723.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2724.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2725.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2726.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2727.                 default:
  2728.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2729.                 }
  2730.         } else if (IS_GEN7(dev) && port == PORT_A) {
  2731.                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2732.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2733.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2734.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2735.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2736.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2737.                 default:
  2738.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2739.                 }
  2740.         } else {
  2741.         switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2742.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2743.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2744.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2745.                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
  2746.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2747.                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
  2748.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2749.         default:
  2750.                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
  2751.         }
  2752.         }
  2753. }
  2754.  
  2755. static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
  2756. {
  2757.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2758.         struct drm_i915_private *dev_priv = dev->dev_private;
  2759.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2760.         struct intel_crtc *intel_crtc =
  2761.                 to_intel_crtc(dport->base.base.crtc);
  2762.         unsigned long demph_reg_value, preemph_reg_value,
  2763.                 uniqtranscale_reg_value;
  2764.         uint8_t train_set = intel_dp->train_set[0];
  2765.         enum dpio_channel port = vlv_dport_to_channel(dport);
  2766.         int pipe = intel_crtc->pipe;
  2767.  
  2768.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  2769.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  2770.                 preemph_reg_value = 0x0004000;
  2771.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2772.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2773.                         demph_reg_value = 0x2B405555;
  2774.                         uniqtranscale_reg_value = 0x552AB83A;
  2775.                         break;
  2776.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2777.                         demph_reg_value = 0x2B404040;
  2778.                         uniqtranscale_reg_value = 0x5548B83A;
  2779.                         break;
  2780.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2781.                         demph_reg_value = 0x2B245555;
  2782.                         uniqtranscale_reg_value = 0x5560B83A;
  2783.                         break;
  2784.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2785.                         demph_reg_value = 0x2B405555;
  2786.                         uniqtranscale_reg_value = 0x5598DA3A;
  2787.                         break;
  2788.                 default:
  2789.                         return 0;
  2790.                 }
  2791.                 break;
  2792.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  2793.                 preemph_reg_value = 0x0002000;
  2794.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2795.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2796.                         demph_reg_value = 0x2B404040;
  2797.                         uniqtranscale_reg_value = 0x5552B83A;
  2798.                         break;
  2799.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2800.                         demph_reg_value = 0x2B404848;
  2801.                         uniqtranscale_reg_value = 0x5580B83A;
  2802.                         break;
  2803.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2804.                         demph_reg_value = 0x2B404040;
  2805.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2806.                         break;
  2807.                 default:
  2808.                         return 0;
  2809.                 }
  2810.                 break;
  2811.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  2812.                 preemph_reg_value = 0x0000000;
  2813.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2814.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2815.                         demph_reg_value = 0x2B305555;
  2816.                         uniqtranscale_reg_value = 0x5570B83A;
  2817.                         break;
  2818.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2819.                         demph_reg_value = 0x2B2B4040;
  2820.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2821.                         break;
  2822.                 default:
  2823.                         return 0;
  2824.                 }
  2825.                 break;
  2826.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  2827.                 preemph_reg_value = 0x0006000;
  2828.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2829.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2830.                         demph_reg_value = 0x1B405555;
  2831.                         uniqtranscale_reg_value = 0x55ADDA3A;
  2832.                         break;
  2833.                 default:
  2834.                         return 0;
  2835.                 }
  2836.                 break;
  2837.         default:
  2838.                 return 0;
  2839.         }
  2840.  
  2841.         mutex_lock(&dev_priv->dpio_lock);
  2842.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
  2843.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
  2844.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
  2845.                          uniqtranscale_reg_value);
  2846.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
  2847.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
  2848.         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
  2849.         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
  2850.         mutex_unlock(&dev_priv->dpio_lock);
  2851.  
  2852.         return 0;
  2853. }
  2854.  
  2855. static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
  2856. {
  2857.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2858.         struct drm_i915_private *dev_priv = dev->dev_private;
  2859.         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
  2860.         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
  2861.         u32 deemph_reg_value, margin_reg_value, val;
  2862.         uint8_t train_set = intel_dp->train_set[0];
  2863.         enum dpio_channel ch = vlv_dport_to_channel(dport);
  2864.         enum pipe pipe = intel_crtc->pipe;
  2865.         int i;
  2866.  
  2867.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  2868.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  2869.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2870.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2871.                         deemph_reg_value = 128;
  2872.                         margin_reg_value = 52;
  2873.                         break;
  2874.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2875.                         deemph_reg_value = 128;
  2876.                         margin_reg_value = 77;
  2877.                         break;
  2878.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2879.                         deemph_reg_value = 128;
  2880.                         margin_reg_value = 102;
  2881.                         break;
  2882.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  2883.                         deemph_reg_value = 128;
  2884.                         margin_reg_value = 154;
  2885.                         /* FIXME extra to set for 1200 */
  2886.                         break;
  2887.                 default:
  2888.                         return 0;
  2889.                 }
  2890.                 break;
  2891.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  2892.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2893.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2894.                         deemph_reg_value = 85;
  2895.                         margin_reg_value = 78;
  2896.                         break;
  2897.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2898.                         deemph_reg_value = 85;
  2899.                         margin_reg_value = 116;
  2900.                         break;
  2901.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  2902.                         deemph_reg_value = 85;
  2903.                         margin_reg_value = 154;
  2904.                         break;
  2905.                 default:
  2906.                         return 0;
  2907.                 }
  2908.                 break;
  2909.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  2910.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2911.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2912.                         deemph_reg_value = 64;
  2913.                         margin_reg_value = 104;
  2914.                         break;
  2915.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  2916.                         deemph_reg_value = 64;
  2917.                         margin_reg_value = 154;
  2918.                         break;
  2919.                 default:
  2920.                         return 0;
  2921.                 }
  2922.                 break;
  2923.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  2924.                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  2925.                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  2926.                         deemph_reg_value = 43;
  2927.                         margin_reg_value = 154;
  2928.                         break;
  2929.                 default:
  2930.                         return 0;
  2931.                 }
  2932.                 break;
  2933.         default:
  2934.                 return 0;
  2935.         }
  2936.  
  2937.         mutex_lock(&dev_priv->dpio_lock);
  2938.  
  2939.         /* Clear calc init */
  2940.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  2941.         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  2942.         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  2943.         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  2944.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  2945.  
  2946.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  2947.         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
  2948.         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
  2949.         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
  2950.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  2951.  
  2952.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
  2953.         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  2954.         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  2955.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
  2956.  
  2957.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
  2958.         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
  2959.         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
  2960.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
  2961.  
  2962.         /* Program swing deemph */
  2963.         for (i = 0; i < 4; i++) {
  2964.                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
  2965.                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
  2966.                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
  2967.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
  2968.         }
  2969.  
  2970.         /* Program swing margin */
  2971.         for (i = 0; i < 4; i++) {
  2972.                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
  2973.                 val &= ~DPIO_SWING_MARGIN000_MASK;
  2974.                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
  2975.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
  2976.         }
  2977.  
  2978.         /* Disable unique transition scale */
  2979.         for (i = 0; i < 4; i++) {
  2980.                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
  2981.                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
  2982.                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
  2983.         }
  2984.  
  2985.         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
  2986.                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
  2987.                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
  2988.                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
  2989.  
  2990.                 /*
  2991.                  * The document said it needs to set bit 27 for ch0 and bit 26
  2992.                  * for ch1. Might be a typo in the doc.
  2993.                  * For now, for this unique transition scale selection, set bit
  2994.                  * 27 for ch0 and ch1.
  2995.                  */
  2996.                 for (i = 0; i < 4; i++) {
  2997.                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
  2998.                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
  2999.                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
  3000.                 }
  3001.  
  3002.                 for (i = 0; i < 4; i++) {
  3003.                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
  3004.                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  3005.                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
  3006.                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
  3007.                 }
  3008.         }
  3009.  
  3010.         /* Start swing calculation */
  3011.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
  3012.         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  3013.         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
  3014.  
  3015.         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
  3016.         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
  3017.         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
  3018.  
  3019.         /* LRC Bypass */
  3020.         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
  3021.         val |= DPIO_LRC_BYPASS;
  3022.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
  3023.  
  3024.         mutex_unlock(&dev_priv->dpio_lock);
  3025.  
  3026.         return 0;
  3027. }
  3028.  
  3029. static void
  3030. intel_get_adjust_train(struct intel_dp *intel_dp,
  3031.                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
  3032. {
  3033.         uint8_t v = 0;
  3034.         uint8_t p = 0;
  3035.         int lane;
  3036.         uint8_t voltage_max;
  3037.         uint8_t preemph_max;
  3038.  
  3039.         for (lane = 0; lane < intel_dp->lane_count; lane++) {
  3040.                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
  3041.                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
  3042.  
  3043.                 if (this_v > v)
  3044.                         v = this_v;
  3045.                 if (this_p > p)
  3046.                         p = this_p;
  3047.         }
  3048.  
  3049.         voltage_max = intel_dp_voltage_max(intel_dp);
  3050.         if (v >= voltage_max)
  3051.                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
  3052.  
  3053.         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
  3054.         if (p >= preemph_max)
  3055.                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
  3056.  
  3057.         for (lane = 0; lane < 4; lane++)
  3058.                 intel_dp->train_set[lane] = v | p;
  3059. }
  3060.  
  3061. static uint32_t
  3062. intel_gen4_signal_levels(uint8_t train_set)
  3063. {
  3064.         uint32_t        signal_levels = 0;
  3065.  
  3066.         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  3067.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
  3068.         default:
  3069.                 signal_levels |= DP_VOLTAGE_0_4;
  3070.                 break;
  3071.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
  3072.                 signal_levels |= DP_VOLTAGE_0_6;
  3073.                 break;
  3074.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
  3075.                 signal_levels |= DP_VOLTAGE_0_8;
  3076.                 break;
  3077.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
  3078.                 signal_levels |= DP_VOLTAGE_1_2;
  3079.                 break;
  3080.         }
  3081.         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  3082.         case DP_TRAIN_PRE_EMPH_LEVEL_0:
  3083.         default:
  3084.                 signal_levels |= DP_PRE_EMPHASIS_0;
  3085.                 break;
  3086.         case DP_TRAIN_PRE_EMPH_LEVEL_1:
  3087.                 signal_levels |= DP_PRE_EMPHASIS_3_5;
  3088.                 break;
  3089.         case DP_TRAIN_PRE_EMPH_LEVEL_2:
  3090.                 signal_levels |= DP_PRE_EMPHASIS_6;
  3091.                 break;
  3092.         case DP_TRAIN_PRE_EMPH_LEVEL_3:
  3093.                 signal_levels |= DP_PRE_EMPHASIS_9_5;
  3094.                 break;
  3095.         }
  3096.         return signal_levels;
  3097. }
  3098.  
  3099. /* Gen6's DP voltage swing and pre-emphasis control */
  3100. static uint32_t
  3101. intel_gen6_edp_signal_levels(uint8_t train_set)
  3102. {
  3103.         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  3104.                                          DP_TRAIN_PRE_EMPHASIS_MASK);
  3105.         switch (signal_levels) {
  3106.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3107.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3108.                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
  3109.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3110.                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
  3111.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3112.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3113.                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
  3114.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3115.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3116.                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
  3117.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3118.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3119.                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
  3120.         default:
  3121.                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  3122.                               "0x%x\n", signal_levels);
  3123.                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
  3124.         }
  3125. }
  3126.  
  3127. /* Gen7's DP voltage swing and pre-emphasis control */
  3128. static uint32_t
  3129. intel_gen7_edp_signal_levels(uint8_t train_set)
  3130. {
  3131.         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  3132.                                          DP_TRAIN_PRE_EMPHASIS_MASK);
  3133.         switch (signal_levels) {
  3134.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3135.                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
  3136.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3137.                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
  3138.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3139.                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
  3140.  
  3141.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3142.                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
  3143.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3144.                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
  3145.  
  3146.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3147.                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
  3148.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3149.                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
  3150.  
  3151.         default:
  3152.                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  3153.                               "0x%x\n", signal_levels);
  3154.                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
  3155.         }
  3156. }
  3157.  
  3158. /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
  3159. static uint32_t
  3160. intel_hsw_signal_levels(uint8_t train_set)
  3161. {
  3162.         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  3163.                                          DP_TRAIN_PRE_EMPHASIS_MASK);
  3164.         switch (signal_levels) {
  3165.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3166.                 return DDI_BUF_TRANS_SELECT(0);
  3167.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3168.                 return DDI_BUF_TRANS_SELECT(1);
  3169.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3170.                 return DDI_BUF_TRANS_SELECT(2);
  3171.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
  3172.                 return DDI_BUF_TRANS_SELECT(3);
  3173.  
  3174.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3175.                 return DDI_BUF_TRANS_SELECT(4);
  3176.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3177.                 return DDI_BUF_TRANS_SELECT(5);
  3178.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
  3179.                 return DDI_BUF_TRANS_SELECT(6);
  3180.  
  3181.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
  3182.                 return DDI_BUF_TRANS_SELECT(7);
  3183.         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
  3184.                 return DDI_BUF_TRANS_SELECT(8);
  3185.         default:
  3186.                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  3187.                               "0x%x\n", signal_levels);
  3188.                 return DDI_BUF_TRANS_SELECT(0);
  3189.         }
  3190. }
  3191.  
  3192. /* Properly updates "DP" with the correct signal levels. */
  3193. static void
  3194. intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
  3195. {
  3196.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3197.         enum port port = intel_dig_port->port;
  3198.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3199.         uint32_t signal_levels, mask;
  3200.         uint8_t train_set = intel_dp->train_set[0];
  3201.  
  3202.         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
  3203.                 signal_levels = intel_hsw_signal_levels(train_set);
  3204.                 mask = DDI_BUF_EMP_MASK;
  3205.         } else if (IS_CHERRYVIEW(dev)) {
  3206.                 signal_levels = intel_chv_signal_levels(intel_dp);
  3207.                 mask = 0;
  3208.         } else if (IS_VALLEYVIEW(dev)) {
  3209.                 signal_levels = intel_vlv_signal_levels(intel_dp);
  3210.                 mask = 0;
  3211.         } else if (IS_GEN7(dev) && port == PORT_A) {
  3212.                 signal_levels = intel_gen7_edp_signal_levels(train_set);
  3213.                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
  3214.         } else if (IS_GEN6(dev) && port == PORT_A) {
  3215.                 signal_levels = intel_gen6_edp_signal_levels(train_set);
  3216.                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
  3217.         } else {
  3218.                 signal_levels = intel_gen4_signal_levels(train_set);
  3219.                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
  3220.         }
  3221.  
  3222.         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
  3223.  
  3224.         *DP = (*DP & ~mask) | signal_levels;
  3225. }
  3226.  
  3227. static bool
  3228. intel_dp_set_link_train(struct intel_dp *intel_dp,
  3229.                         uint32_t *DP,
  3230.                         uint8_t dp_train_pat)
  3231. {
  3232.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3233.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3234.         struct drm_i915_private *dev_priv = dev->dev_private;
  3235.         uint8_t buf[sizeof(intel_dp->train_set) + 1];
  3236.         int ret, len;
  3237.  
  3238.         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
  3239.  
  3240.         I915_WRITE(intel_dp->output_reg, *DP);
  3241.         POSTING_READ(intel_dp->output_reg);
  3242.  
  3243.         buf[0] = dp_train_pat;
  3244.         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
  3245.             DP_TRAINING_PATTERN_DISABLE) {
  3246.                 /* don't write DP_TRAINING_LANEx_SET on disable */
  3247.                 len = 1;
  3248.         } else {
  3249.                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
  3250.                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
  3251.                 len = intel_dp->lane_count + 1;
  3252.         }
  3253.  
  3254.         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
  3255.                                         buf, len);
  3256.  
  3257.         return ret == len;
  3258. }
  3259.  
  3260. static bool
  3261. intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
  3262.                         uint8_t dp_train_pat)
  3263. {
  3264.         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
  3265.         intel_dp_set_signal_levels(intel_dp, DP);
  3266.         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
  3267. }
  3268.  
  3269. static bool
  3270. intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
  3271.                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
  3272. {
  3273.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3274.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3275.         struct drm_i915_private *dev_priv = dev->dev_private;
  3276.         int ret;
  3277.  
  3278.         intel_get_adjust_train(intel_dp, link_status);
  3279.         intel_dp_set_signal_levels(intel_dp, DP);
  3280.  
  3281.         I915_WRITE(intel_dp->output_reg, *DP);
  3282.         POSTING_READ(intel_dp->output_reg);
  3283.  
  3284.         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
  3285.                                 intel_dp->train_set, intel_dp->lane_count);
  3286.  
  3287.         return ret == intel_dp->lane_count;
  3288. }
  3289.  
  3290. static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
  3291. {
  3292.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3293.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3294.         struct drm_i915_private *dev_priv = dev->dev_private;
  3295.         enum port port = intel_dig_port->port;
  3296.         uint32_t val;
  3297.  
  3298.         if (!HAS_DDI(dev))
  3299.                 return;
  3300.  
  3301.         val = I915_READ(DP_TP_CTL(port));
  3302.         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  3303.         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
  3304.         I915_WRITE(DP_TP_CTL(port), val);
  3305.  
  3306.         /*
  3307.          * On PORT_A we can have only eDP in SST mode. There the only reason
  3308.          * we need to set idle transmission mode is to work around a HW issue
  3309.          * where we enable the pipe while not in idle link-training mode.
  3310.          * In this case there is requirement to wait for a minimum number of
  3311.          * idle patterns to be sent.
  3312.          */
  3313.         if (port == PORT_A)
  3314.                 return;
  3315.  
  3316.         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
  3317.                      1))
  3318.                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
  3319. }
  3320.  
  3321. /* Enable corresponding port and start training pattern 1 */
  3322. void
  3323. intel_dp_start_link_train(struct intel_dp *intel_dp)
  3324. {
  3325.         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
  3326.         struct drm_device *dev = encoder->dev;
  3327.         int i;
  3328.         uint8_t voltage;
  3329.         int voltage_tries, loop_tries;
  3330.         uint32_t DP = intel_dp->DP;
  3331.         uint8_t link_config[2];
  3332.  
  3333.         if (HAS_DDI(dev))
  3334.                 intel_ddi_prepare_link_retrain(encoder);
  3335.  
  3336.         /* Write the link configuration data */
  3337.         link_config[0] = intel_dp->link_bw;
  3338.         link_config[1] = intel_dp->lane_count;
  3339.         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
  3340.                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
  3341.         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
  3342.  
  3343.         link_config[0] = 0;
  3344.         link_config[1] = DP_SET_ANSI_8B10B;
  3345.         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
  3346.  
  3347.         DP |= DP_PORT_EN;
  3348.  
  3349.         /* clock recovery */
  3350.         if (!intel_dp_reset_link_train(intel_dp, &DP,
  3351.                                        DP_TRAINING_PATTERN_1 |
  3352.                                        DP_LINK_SCRAMBLING_DISABLE)) {
  3353.                 DRM_ERROR("failed to enable link training\n");
  3354.                 return;
  3355.         }
  3356.  
  3357.         voltage = 0xff;
  3358.         voltage_tries = 0;
  3359.         loop_tries = 0;
  3360.         for (;;) {
  3361.                 uint8_t     link_status[DP_LINK_STATUS_SIZE];
  3362.  
  3363.                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
  3364.                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
  3365.                         DRM_ERROR("failed to get link status\n");
  3366.                         break;
  3367.                 }
  3368.  
  3369.                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
  3370.                         DRM_DEBUG_KMS("clock recovery OK\n");
  3371.                         break;
  3372.                 }
  3373.  
  3374.                 /* Check to see if we've tried the max voltage */
  3375.                 for (i = 0; i < intel_dp->lane_count; i++)
  3376.                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
  3377.                                 break;
  3378.                 if (i == intel_dp->lane_count) {
  3379.                         ++loop_tries;
  3380.                         if (loop_tries == 5) {
  3381.                                 DRM_ERROR("too many full retries, give up\n");
  3382.                         break;
  3383.                         }
  3384.                         intel_dp_reset_link_train(intel_dp, &DP,
  3385.                                                   DP_TRAINING_PATTERN_1 |
  3386.                                                   DP_LINK_SCRAMBLING_DISABLE);
  3387.                         voltage_tries = 0;
  3388.                         continue;
  3389.                 }
  3390.  
  3391.                 /* Check to see if we've tried the same voltage 5 times */
  3392.                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
  3393.                         ++voltage_tries;
  3394.                         if (voltage_tries == 5) {
  3395.                                 DRM_ERROR("too many voltage retries, give up\n");
  3396.                                 break;
  3397.                         }
  3398.                 } else
  3399.                         voltage_tries = 0;
  3400.                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
  3401.  
  3402.                 /* Update training set as requested by target */
  3403.                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
  3404.                         DRM_ERROR("failed to update link training\n");
  3405.                         break;
  3406.                 }
  3407.         }
  3408.  
  3409.         intel_dp->DP = DP;
  3410. }
  3411.  
  3412. void
  3413. intel_dp_complete_link_train(struct intel_dp *intel_dp)
  3414. {
  3415.         bool channel_eq = false;
  3416.         int tries, cr_tries;
  3417.         uint32_t DP = intel_dp->DP;
  3418.         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
  3419.  
  3420.         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
  3421.         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
  3422.                 training_pattern = DP_TRAINING_PATTERN_3;
  3423.  
  3424.         /* channel equalization */
  3425.         if (!intel_dp_set_link_train(intel_dp, &DP,
  3426.                                      training_pattern |
  3427.                                      DP_LINK_SCRAMBLING_DISABLE)) {
  3428.                 DRM_ERROR("failed to start channel equalization\n");
  3429.                 return;
  3430.         }
  3431.  
  3432.         tries = 0;
  3433.         cr_tries = 0;
  3434.         channel_eq = false;
  3435.         for (;;) {
  3436.                 uint8_t     link_status[DP_LINK_STATUS_SIZE];
  3437.  
  3438.                 if (cr_tries > 5) {
  3439.                         DRM_ERROR("failed to train DP, aborting\n");
  3440.                         break;
  3441.                 }
  3442.  
  3443.                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
  3444.                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
  3445.                         DRM_ERROR("failed to get link status\n");
  3446.                         break;
  3447.                 }
  3448.  
  3449.                 /* Make sure clock is still ok */
  3450.                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
  3451.                         intel_dp_start_link_train(intel_dp);
  3452.                         intel_dp_set_link_train(intel_dp, &DP,
  3453.                                                 training_pattern |
  3454.                                                 DP_LINK_SCRAMBLING_DISABLE);
  3455.                         cr_tries++;
  3456.                         continue;
  3457.                 }
  3458.  
  3459.                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
  3460.                         channel_eq = true;
  3461.                         break;
  3462.                 }
  3463.  
  3464.                 /* Try 5 times, then try clock recovery if that fails */
  3465.                 if (tries > 5) {
  3466.                         intel_dp_start_link_train(intel_dp);
  3467.                         intel_dp_set_link_train(intel_dp, &DP,
  3468.                                                 training_pattern |
  3469.                                                 DP_LINK_SCRAMBLING_DISABLE);
  3470.                         tries = 0;
  3471.                         cr_tries++;
  3472.                         continue;
  3473.                 }
  3474.  
  3475.                 /* Update training set as requested by target */
  3476.                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
  3477.                         DRM_ERROR("failed to update link training\n");
  3478.                         break;
  3479.                 }
  3480.                 ++tries;
  3481.         }
  3482.  
  3483.         intel_dp_set_idle_link_train(intel_dp);
  3484.  
  3485.         intel_dp->DP = DP;
  3486.  
  3487.         if (channel_eq)
  3488.                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
  3489.  
  3490. }
  3491.  
  3492. void intel_dp_stop_link_train(struct intel_dp *intel_dp)
  3493. {
  3494.         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
  3495.                                 DP_TRAINING_PATTERN_DISABLE);
  3496. }
  3497.  
  3498. static void
  3499. intel_dp_link_down(struct intel_dp *intel_dp)
  3500. {
  3501.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3502.         enum port port = intel_dig_port->port;
  3503.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3504.         struct drm_i915_private *dev_priv = dev->dev_private;
  3505.         struct intel_crtc *intel_crtc =
  3506.                 to_intel_crtc(intel_dig_port->base.base.crtc);
  3507.         uint32_t DP = intel_dp->DP;
  3508.  
  3509.         if (WARN_ON(HAS_DDI(dev)))
  3510.                 return;
  3511.  
  3512.         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
  3513.                 return;
  3514.  
  3515.         DRM_DEBUG_KMS("\n");
  3516.  
  3517.         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
  3518.                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
  3519.                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
  3520.         } else {
  3521.                 if (IS_CHERRYVIEW(dev))
  3522.                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
  3523.                 else
  3524.                 DP &= ~DP_LINK_TRAIN_MASK;
  3525.                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
  3526.         }
  3527.         POSTING_READ(intel_dp->output_reg);
  3528.  
  3529.         if (HAS_PCH_IBX(dev) &&
  3530.             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
  3531.                 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  3532.  
  3533.                 /* Hardware workaround: leaving our transcoder select
  3534.                  * set to transcoder B while it's off will prevent the
  3535.                  * corresponding HDMI output on transcoder A.
  3536.                  *
  3537.                  * Combine this with another hardware workaround:
  3538.                  * transcoder select bit can only be cleared while the
  3539.                  * port is enabled.
  3540.                  */
  3541.                 DP &= ~DP_PIPEB_SELECT;
  3542.                 I915_WRITE(intel_dp->output_reg, DP);
  3543.  
  3544.                 /* Changes to enable or select take place the vblank
  3545.                  * after being written.
  3546.                  */
  3547.                 if (WARN_ON(crtc == NULL)) {
  3548.                         /* We should never try to disable a port without a crtc
  3549.                          * attached. For paranoia keep the code around for a
  3550.                          * bit. */
  3551.                         POSTING_READ(intel_dp->output_reg);
  3552.                         msleep(50);
  3553.                 } else
  3554.                         intel_wait_for_vblank(dev, intel_crtc->pipe);
  3555.         }
  3556.  
  3557.         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
  3558.         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
  3559.         POSTING_READ(intel_dp->output_reg);
  3560.         msleep(intel_dp->panel_power_down_delay);
  3561. }
  3562.  
  3563. static bool
  3564. intel_dp_get_dpcd(struct intel_dp *intel_dp)
  3565. {
  3566.         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  3567.         struct drm_device *dev = dig_port->base.base.dev;
  3568.         struct drm_i915_private *dev_priv = dev->dev_private;
  3569.  
  3570.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
  3571.                                     sizeof(intel_dp->dpcd)) < 0)
  3572.                 return false; /* aux transfer failed */
  3573.  
  3574.         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
  3575.  
  3576.         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
  3577.                 return false; /* DPCD not present */
  3578.  
  3579.         /* Check if the panel supports PSR */
  3580.         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
  3581.         if (is_edp(intel_dp)) {
  3582.                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
  3583.                                        intel_dp->psr_dpcd,
  3584.                                        sizeof(intel_dp->psr_dpcd));
  3585.                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
  3586.                         dev_priv->psr.sink_support = true;
  3587.                 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
  3588.                 }
  3589.         }
  3590.  
  3591.         /* Training Pattern 3 support, both source and sink */
  3592.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
  3593.             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
  3594.             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
  3595.                 intel_dp->use_tps3 = true;
  3596.                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
  3597.         } else
  3598.                 intel_dp->use_tps3 = false;
  3599.  
  3600.         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
  3601.               DP_DWN_STRM_PORT_PRESENT))
  3602.                 return true; /* native DP sink */
  3603.  
  3604.         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
  3605.                 return true; /* no per-port downstream info */
  3606.  
  3607.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
  3608.                                            intel_dp->downstream_ports,
  3609.                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
  3610.                 return false; /* downstream port status fetch failed */
  3611.  
  3612.                 return true;
  3613. }
  3614.  
  3615. static void
  3616. intel_dp_probe_oui(struct intel_dp *intel_dp)
  3617. {
  3618.         u8 buf[3];
  3619.  
  3620.         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
  3621.                 return;
  3622.  
  3623.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
  3624.                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
  3625.                               buf[0], buf[1], buf[2]);
  3626.  
  3627.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
  3628.                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
  3629.                               buf[0], buf[1], buf[2]);
  3630. }
  3631.  
  3632. static bool
  3633. intel_dp_probe_mst(struct intel_dp *intel_dp)
  3634. {
  3635.         u8 buf[1];
  3636.  
  3637.         if (!intel_dp->can_mst)
  3638.                 return false;
  3639.  
  3640.         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
  3641.                 return false;
  3642.  
  3643.         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
  3644.                 if (buf[0] & DP_MST_CAP) {
  3645.                         DRM_DEBUG_KMS("Sink is MST capable\n");
  3646.                         intel_dp->is_mst = true;
  3647.                 } else {
  3648.                         DRM_DEBUG_KMS("Sink is not MST capable\n");
  3649.                         intel_dp->is_mst = false;
  3650.                 }
  3651.         }
  3652.  
  3653.         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
  3654.         return intel_dp->is_mst;
  3655. }
  3656.  
  3657. int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
  3658. {
  3659.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3660.         struct drm_device *dev = intel_dig_port->base.base.dev;
  3661.         struct intel_crtc *intel_crtc =
  3662.                 to_intel_crtc(intel_dig_port->base.base.crtc);
  3663.         u8 buf;
  3664.         int test_crc_count;
  3665.         int attempts = 6;
  3666.  
  3667.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
  3668.                 return -EIO;
  3669.  
  3670.         if (!(buf & DP_TEST_CRC_SUPPORTED))
  3671.                 return -ENOTTY;
  3672.  
  3673.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
  3674.                 return -EIO;
  3675.  
  3676.         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
  3677.                                 buf | DP_TEST_SINK_START) < 0)
  3678.                 return -EIO;
  3679.  
  3680.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
  3681.                 return -EIO;
  3682.         test_crc_count = buf & DP_TEST_COUNT_MASK;
  3683.  
  3684.         do {
  3685.                 if (drm_dp_dpcd_readb(&intel_dp->aux,
  3686.                                       DP_TEST_SINK_MISC, &buf) < 0)
  3687.                         return -EIO;
  3688.         intel_wait_for_vblank(dev, intel_crtc->pipe);
  3689.         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
  3690.  
  3691.         if (attempts == 0) {
  3692.                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
  3693.                 return -ETIMEDOUT;
  3694.         }
  3695.  
  3696.         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
  3697.                 return -EIO;
  3698.  
  3699.         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
  3700.                 return -EIO;
  3701.         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
  3702.                                buf & ~DP_TEST_SINK_START) < 0)
  3703.                 return -EIO;
  3704.  
  3705.         return 0;
  3706. }
  3707.  
  3708. static bool
  3709. intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  3710. {
  3711.         return intel_dp_dpcd_read_wake(&intel_dp->aux,
  3712.                                              DP_DEVICE_SERVICE_IRQ_VECTOR,
  3713.                                        sink_irq_vector, 1) == 1;
  3714. }
  3715.  
  3716. static bool
  3717. intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  3718. {
  3719.         int ret;
  3720.  
  3721.         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
  3722.                                              DP_SINK_COUNT_ESI,
  3723.                                              sink_irq_vector, 14);
  3724.         if (ret != 14)
  3725.                 return false;
  3726.  
  3727.         return true;
  3728. }
  3729.  
  3730. static void
  3731. intel_dp_handle_test_request(struct intel_dp *intel_dp)
  3732. {
  3733.         /* NAK by default */
  3734.         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
  3735. }
  3736.  
  3737. static int
  3738. intel_dp_check_mst_status(struct intel_dp *intel_dp)
  3739. {
  3740.         bool bret;
  3741.  
  3742.         if (intel_dp->is_mst) {
  3743.                 u8 esi[16] = { 0 };
  3744.                 int ret = 0;
  3745.                 int retry;
  3746.                 bool handled;
  3747.                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
  3748. go_again:
  3749.                 if (bret == true) {
  3750.  
  3751.                         /* check link status - esi[10] = 0x200c */
  3752.                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
  3753.                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
  3754.                                 intel_dp_start_link_train(intel_dp);
  3755.                                 intel_dp_complete_link_train(intel_dp);
  3756.                                 intel_dp_stop_link_train(intel_dp);
  3757.                         }
  3758.  
  3759.                         DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  3760.                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
  3761.  
  3762.                         if (handled) {
  3763.                                 for (retry = 0; retry < 3; retry++) {
  3764.                                         int wret;
  3765.                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
  3766.                                                                  DP_SINK_COUNT_ESI+1,
  3767.                                                                  &esi[1], 3);
  3768.                                         if (wret == 3) {
  3769.                                                 break;
  3770.                                         }
  3771.                                 }
  3772.  
  3773.                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
  3774.                                 if (bret == true) {
  3775.                                         DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  3776.                                         goto go_again;
  3777.                                 }
  3778.                         } else
  3779.                                 ret = 0;
  3780.  
  3781.                         return ret;
  3782.                 } else {
  3783.                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3784.                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
  3785.                         intel_dp->is_mst = false;
  3786.                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
  3787.                         /* send a hotplug event */
  3788.                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
  3789.                 }
  3790.         }
  3791.         return -EINVAL;
  3792. }
  3793.  
  3794. /*
  3795.  * According to DP spec
  3796.  * 5.1.2:
  3797.  *  1. Read DPCD
  3798.  *  2. Configure link according to Receiver Capabilities
  3799.  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
  3800.  *  4. Check link status on receipt of hot-plug interrupt
  3801.  */
  3802. void
  3803. intel_dp_check_link_status(struct intel_dp *intel_dp)
  3804. {
  3805.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3806.         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
  3807.         u8 sink_irq_vector;
  3808.         u8 link_status[DP_LINK_STATUS_SIZE];
  3809.  
  3810.         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  3811.  
  3812.         if (!intel_encoder->connectors_active)
  3813.                 return;
  3814.  
  3815.         if (WARN_ON(!intel_encoder->base.crtc))
  3816.                 return;
  3817.  
  3818.         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
  3819.                 return;
  3820.  
  3821.         /* Try to read receiver status if the link appears to be up */
  3822.         if (!intel_dp_get_link_status(intel_dp, link_status)) {
  3823.                 return;
  3824.         }
  3825.  
  3826.         /* Now read the DPCD to see if it's actually running */
  3827.         if (!intel_dp_get_dpcd(intel_dp)) {
  3828.                 return;
  3829.         }
  3830.  
  3831.         /* Try to read the source of the interrupt */
  3832.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
  3833.             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
  3834.                 /* Clear interrupt source */
  3835.                 drm_dp_dpcd_writeb(&intel_dp->aux,
  3836.                                             DP_DEVICE_SERVICE_IRQ_VECTOR,
  3837.                                             sink_irq_vector);
  3838.  
  3839.                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
  3840.                         intel_dp_handle_test_request(intel_dp);
  3841.                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
  3842.                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
  3843.         }
  3844.  
  3845.         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
  3846.                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
  3847.                               intel_encoder->base.name);
  3848.                 intel_dp_start_link_train(intel_dp);
  3849.                 intel_dp_complete_link_train(intel_dp);
  3850.                 intel_dp_stop_link_train(intel_dp);
  3851.         }
  3852. }
  3853.  
  3854. /* XXX this is probably wrong for multiple downstream ports */
  3855. static enum drm_connector_status
  3856. intel_dp_detect_dpcd(struct intel_dp *intel_dp)
  3857. {
  3858.         uint8_t *dpcd = intel_dp->dpcd;
  3859.         uint8_t type;
  3860.  
  3861.         if (!intel_dp_get_dpcd(intel_dp))
  3862.                 return connector_status_disconnected;
  3863.  
  3864.         /* if there's no downstream port, we're done */
  3865.         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
  3866.                 return connector_status_connected;
  3867.  
  3868.         /* If we're HPD-aware, SINK_COUNT changes dynamically */
  3869.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
  3870.             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
  3871.                 uint8_t reg;
  3872.  
  3873.                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
  3874.                                             &reg, 1) < 0)
  3875.                         return connector_status_unknown;
  3876.  
  3877.                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
  3878.                                               : connector_status_disconnected;
  3879.         }
  3880.  
  3881.         /* If no HPD, poke DDC gently */
  3882.         if (drm_probe_ddc(&intel_dp->aux.ddc))
  3883.                 return connector_status_connected;
  3884.  
  3885.         /* Well we tried, say unknown for unreliable port types */
  3886.         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
  3887.         type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
  3888.                 if (type == DP_DS_PORT_TYPE_VGA ||
  3889.                     type == DP_DS_PORT_TYPE_NON_EDID)
  3890.                 return connector_status_unknown;
  3891.         } else {
  3892.                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
  3893.                         DP_DWN_STRM_PORT_TYPE_MASK;
  3894.                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
  3895.                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
  3896.                         return connector_status_unknown;
  3897.         }
  3898.  
  3899.         /* Anything else is out of spec, warn and ignore */
  3900.         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
  3901.         return connector_status_disconnected;
  3902. }
  3903.  
  3904. static enum drm_connector_status
  3905. edp_detect(struct intel_dp *intel_dp)
  3906. {
  3907.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3908.         enum drm_connector_status status;
  3909.  
  3910.                 status = intel_panel_detect(dev);
  3911.                 if (status == connector_status_unknown)
  3912.                         status = connector_status_connected;
  3913.  
  3914.                 return status;
  3915. }
  3916.  
  3917. static enum drm_connector_status
  3918. ironlake_dp_detect(struct intel_dp *intel_dp)
  3919. {
  3920.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3921.         struct drm_i915_private *dev_priv = dev->dev_private;
  3922.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3923.  
  3924.         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
  3925.                 return connector_status_disconnected;
  3926.  
  3927.         return intel_dp_detect_dpcd(intel_dp);
  3928. }
  3929.  
  3930. static int g4x_digital_port_connected(struct drm_device *dev,
  3931.                                        struct intel_digital_port *intel_dig_port)
  3932. {
  3933.         struct drm_i915_private *dev_priv = dev->dev_private;
  3934.         uint32_t bit;
  3935.  
  3936.         if (IS_VALLEYVIEW(dev)) {
  3937.                 switch (intel_dig_port->port) {
  3938.                 case PORT_B:
  3939.                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
  3940.                         break;
  3941.                 case PORT_C:
  3942.                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
  3943.                         break;
  3944.                 case PORT_D:
  3945.                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
  3946.                         break;
  3947.                 default:
  3948.                         return -EINVAL;
  3949.                 }
  3950.         } else {
  3951.         switch (intel_dig_port->port) {
  3952.         case PORT_B:
  3953.                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
  3954.                 break;
  3955.         case PORT_C:
  3956.                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
  3957.                 break;
  3958.         case PORT_D:
  3959.                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
  3960.                 break;
  3961.         default:
  3962.                         return -EINVAL;
  3963.         }
  3964.         }
  3965.  
  3966.         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
  3967.                 return 0;
  3968.         return 1;
  3969. }
  3970.  
  3971. static enum drm_connector_status
  3972. g4x_dp_detect(struct intel_dp *intel_dp)
  3973. {
  3974.         struct drm_device *dev = intel_dp_to_dev(intel_dp);
  3975.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  3976.         int ret;
  3977.  
  3978.         /* Can't disconnect eDP, but you can close the lid... */
  3979.         if (is_edp(intel_dp)) {
  3980.                 enum drm_connector_status status;
  3981.  
  3982.                 status = intel_panel_detect(dev);
  3983.                 if (status == connector_status_unknown)
  3984.                         status = connector_status_connected;
  3985.                 return status;
  3986.         }
  3987.  
  3988.         ret = g4x_digital_port_connected(dev, intel_dig_port);
  3989.         if (ret == -EINVAL)
  3990.                 return connector_status_unknown;
  3991.         else if (ret == 0)
  3992.                 return connector_status_disconnected;
  3993.  
  3994.         return intel_dp_detect_dpcd(intel_dp);
  3995. }
  3996.  
  3997. static struct edid *
  3998. intel_dp_get_edid(struct intel_dp *intel_dp)
  3999. {
  4000.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  4001.  
  4002.         /* use cached edid if we have one */
  4003.         if (intel_connector->edid) {
  4004.                 /* invalid edid */
  4005.                 if (IS_ERR(intel_connector->edid))
  4006.                         return NULL;
  4007.  
  4008.                 return drm_edid_duplicate(intel_connector->edid);
  4009.         } else
  4010.                 return drm_get_edid(&intel_connector->base,
  4011.                                     &intel_dp->aux.ddc);
  4012. }
  4013.  
  4014. static void
  4015. intel_dp_set_edid(struct intel_dp *intel_dp)
  4016. {
  4017.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  4018.         struct edid *edid;
  4019.  
  4020.         edid = intel_dp_get_edid(intel_dp);
  4021.         intel_connector->detect_edid = edid;
  4022.  
  4023.         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
  4024.                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
  4025.         else
  4026.                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
  4027. }
  4028.  
  4029. static void
  4030. intel_dp_unset_edid(struct intel_dp *intel_dp)
  4031. {
  4032.         struct intel_connector *intel_connector = intel_dp->attached_connector;
  4033.  
  4034.         kfree(intel_connector->detect_edid);
  4035.         intel_connector->detect_edid = NULL;
  4036.  
  4037.         intel_dp->has_audio = false;
  4038. }
  4039.  
  4040. static enum intel_display_power_domain
  4041. intel_dp_power_get(struct intel_dp *dp)
  4042. {
  4043.         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
  4044.         enum intel_display_power_domain power_domain;
  4045.  
  4046.         power_domain = intel_display_port_power_domain(encoder);
  4047.         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
  4048.  
  4049.         return power_domain;
  4050. }
  4051.  
  4052. static void
  4053. intel_dp_power_put(struct intel_dp *dp,
  4054.                    enum intel_display_power_domain power_domain)
  4055. {
  4056.         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
  4057.         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
  4058. }
  4059.  
  4060. static enum drm_connector_status
  4061. intel_dp_detect(struct drm_connector *connector, bool force)
  4062. {
  4063.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  4064.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  4065.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4066.         struct drm_device *dev = connector->dev;
  4067.         enum drm_connector_status status;
  4068.         enum intel_display_power_domain power_domain;
  4069.         bool ret;
  4070.  
  4071.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  4072.                       connector->base.id, connector->name);
  4073.         intel_dp_unset_edid(intel_dp);
  4074.  
  4075.         if (intel_dp->is_mst) {
  4076.                 /* MST devices are disconnected from a monitor POV */
  4077.                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4078.                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4079.                 return connector_status_disconnected;
  4080.         }
  4081.  
  4082.         power_domain = intel_dp_power_get(intel_dp);
  4083.  
  4084.         /* Can't disconnect eDP, but you can close the lid... */
  4085.         if (is_edp(intel_dp))
  4086.                 status = edp_detect(intel_dp);
  4087.         else if (HAS_PCH_SPLIT(dev))
  4088.                 status = ironlake_dp_detect(intel_dp);
  4089.         else
  4090.                 status = g4x_dp_detect(intel_dp);
  4091.         if (status != connector_status_connected)
  4092.                 goto out;
  4093.  
  4094.         intel_dp_probe_oui(intel_dp);
  4095.  
  4096.         ret = intel_dp_probe_mst(intel_dp);
  4097.         if (ret) {
  4098.                 /* if we are in MST mode then this connector
  4099.                    won't appear connected or have anything with EDID on it */
  4100.                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4101.                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4102.                 status = connector_status_disconnected;
  4103.                 goto out;
  4104.         }
  4105.  
  4106.         intel_dp_set_edid(intel_dp);
  4107.  
  4108.         if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4109.                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4110.         status = connector_status_connected;
  4111.  
  4112. out:
  4113.         intel_dp_power_put(intel_dp, power_domain);
  4114.         return status;
  4115. }
  4116.  
  4117. static void
  4118. intel_dp_force(struct drm_connector *connector)
  4119. {
  4120.         struct intel_dp *intel_dp = intel_attached_dp(connector);
  4121.         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
  4122.         enum intel_display_power_domain power_domain;
  4123.  
  4124.         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  4125.                       connector->base.id, connector->name);
  4126.         intel_dp_unset_edid(intel_dp);
  4127.  
  4128.         if (connector->status != connector_status_connected)
  4129.                 return;
  4130.  
  4131.         power_domain = intel_dp_power_get(intel_dp);
  4132.  
  4133.         intel_dp_set_edid(intel_dp);
  4134.  
  4135.         intel_dp_power_put(intel_dp, power_domain);
  4136.  
  4137.         if (intel_encoder->type != INTEL_OUTPUT_EDP)
  4138.                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  4139. }
  4140.  
  4141. static int intel_dp_get_modes(struct drm_connector *connector)
  4142. {
  4143.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4144.         struct edid *edid;
  4145.  
  4146.         edid = intel_connector->detect_edid;
  4147.         if (edid) {
  4148.                 int ret = intel_connector_update_modes(connector, edid);
  4149.         if (ret)
  4150.                 return ret;
  4151.         }
  4152.  
  4153.         /* if eDP has no EDID, fall back to fixed mode */
  4154.         if (is_edp(intel_attached_dp(connector)) &&
  4155.             intel_connector->panel.fixed_mode) {
  4156.                         struct drm_display_mode *mode;
  4157.  
  4158.                 mode = drm_mode_duplicate(connector->dev,
  4159.                                           intel_connector->panel.fixed_mode);
  4160.                 if (mode) {
  4161.                         drm_mode_probed_add(connector, mode);
  4162.                         return 1;
  4163.                 }
  4164.         }
  4165.  
  4166.         return 0;
  4167. }
  4168.  
  4169. static bool
  4170. intel_dp_detect_audio(struct drm_connector *connector)
  4171. {
  4172.         bool has_audio = false;
  4173.         struct edid *edid;
  4174.  
  4175.         edid = to_intel_connector(connector)->detect_edid;
  4176.         if (edid)
  4177.                 has_audio = drm_detect_monitor_audio(edid);
  4178.  
  4179.         return has_audio;
  4180. }
  4181.  
  4182. static int
  4183. intel_dp_set_property(struct drm_connector *connector,
  4184.                       struct drm_property *property,
  4185.                       uint64_t val)
  4186. {
  4187.         struct drm_i915_private *dev_priv = connector->dev->dev_private;
  4188.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4189.         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
  4190.         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  4191.         int ret;
  4192.  
  4193.         ret = drm_object_property_set_value(&connector->base, property, val);
  4194.         if (ret)
  4195.                 return ret;
  4196.  
  4197.         if (property == dev_priv->force_audio_property) {
  4198.                 int i = val;
  4199.                 bool has_audio;
  4200.  
  4201.                 if (i == intel_dp->force_audio)
  4202.                         return 0;
  4203.  
  4204.                 intel_dp->force_audio = i;
  4205.  
  4206.                 if (i == HDMI_AUDIO_AUTO)
  4207.                         has_audio = intel_dp_detect_audio(connector);
  4208.                 else
  4209.                         has_audio = (i == HDMI_AUDIO_ON);
  4210.  
  4211.                 if (has_audio == intel_dp->has_audio)
  4212.                         return 0;
  4213.  
  4214.                 intel_dp->has_audio = has_audio;
  4215.                 goto done;
  4216.         }
  4217.  
  4218.         if (property == dev_priv->broadcast_rgb_property) {
  4219.                 bool old_auto = intel_dp->color_range_auto;
  4220.                 uint32_t old_range = intel_dp->color_range;
  4221.  
  4222.                 switch (val) {
  4223.                 case INTEL_BROADCAST_RGB_AUTO:
  4224.                         intel_dp->color_range_auto = true;
  4225.                         break;
  4226.                 case INTEL_BROADCAST_RGB_FULL:
  4227.                         intel_dp->color_range_auto = false;
  4228.                         intel_dp->color_range = 0;
  4229.                         break;
  4230.                 case INTEL_BROADCAST_RGB_LIMITED:
  4231.                         intel_dp->color_range_auto = false;
  4232.                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
  4233.                         break;
  4234.                 default:
  4235.                         return -EINVAL;
  4236.                 }
  4237.  
  4238.                 if (old_auto == intel_dp->color_range_auto &&
  4239.                     old_range == intel_dp->color_range)
  4240.                         return 0;
  4241.  
  4242.         goto done;
  4243.         }
  4244.  
  4245.         if (is_edp(intel_dp) &&
  4246.             property == connector->dev->mode_config.scaling_mode_property) {
  4247.                 if (val == DRM_MODE_SCALE_NONE) {
  4248.                         DRM_DEBUG_KMS("no scaling not supported\n");
  4249.                         return -EINVAL;
  4250.                 }
  4251.  
  4252.                 if (intel_connector->panel.fitting_mode == val) {
  4253.                         /* the eDP scaling property is not changed */
  4254.                         return 0;
  4255.                 }
  4256.                 intel_connector->panel.fitting_mode = val;
  4257.  
  4258.                 goto done;
  4259.         }
  4260.  
  4261.         return -EINVAL;
  4262.  
  4263. done:
  4264.         if (intel_encoder->base.crtc)
  4265.                 intel_crtc_restore_mode(intel_encoder->base.crtc);
  4266.  
  4267.         return 0;
  4268. }
  4269.  
  4270. static void
  4271. intel_dp_connector_destroy(struct drm_connector *connector)
  4272. {
  4273.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4274.  
  4275.         kfree(intel_connector->detect_edid);
  4276.  
  4277.         if (!IS_ERR_OR_NULL(intel_connector->edid))
  4278.                 kfree(intel_connector->edid);
  4279.  
  4280.         /* Can't call is_edp() since the encoder may have been destroyed
  4281.          * already. */
  4282.         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
  4283.                 intel_panel_fini(&intel_connector->panel);
  4284.  
  4285.         drm_connector_cleanup(connector);
  4286.         kfree(connector);
  4287. }
  4288.  
  4289. void intel_dp_encoder_destroy(struct drm_encoder *encoder)
  4290. {
  4291.         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
  4292.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4293.  
  4294.         drm_dp_aux_unregister(&intel_dp->aux);
  4295.         intel_dp_mst_encoder_cleanup(intel_dig_port);
  4296.         drm_encoder_cleanup(encoder);
  4297.         if (is_edp(intel_dp)) {
  4298.                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  4299.                 /*
  4300.                  * vdd might still be enabled do to the delayed vdd off.
  4301.                  * Make sure vdd is actually turned off here.
  4302.                  */
  4303.                 pps_lock(intel_dp);
  4304.                 edp_panel_vdd_off_sync(intel_dp);
  4305.                 pps_unlock(intel_dp);
  4306.  
  4307.         }
  4308.         kfree(intel_dig_port);
  4309. }
  4310.  
  4311. static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
  4312. {
  4313.         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  4314.  
  4315.         if (!is_edp(intel_dp))
  4316.                 return;
  4317.  
  4318.         /*
  4319.          * vdd might still be enabled do to the delayed vdd off.
  4320.          * Make sure vdd is actually turned off here.
  4321.          */
  4322.         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  4323.         pps_lock(intel_dp);
  4324.         edp_panel_vdd_off_sync(intel_dp);
  4325.         pps_unlock(intel_dp);
  4326. }
  4327.  
  4328. static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
  4329. {
  4330.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  4331.         struct drm_device *dev = intel_dig_port->base.base.dev;
  4332.         struct drm_i915_private *dev_priv = dev->dev_private;
  4333.         enum intel_display_power_domain power_domain;
  4334.  
  4335.         lockdep_assert_held(&dev_priv->pps_mutex);
  4336.  
  4337.         if (!edp_have_panel_vdd(intel_dp))
  4338.                 return;
  4339.  
  4340.         /*
  4341.          * The VDD bit needs a power domain reference, so if the bit is
  4342.          * already enabled when we boot or resume, grab this reference and
  4343.          * schedule a vdd off, so we don't hold on to the reference
  4344.          * indefinitely.
  4345.          */
  4346.         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
  4347.         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
  4348.         intel_display_power_get(dev_priv, power_domain);
  4349.  
  4350.         edp_panel_vdd_schedule_off(intel_dp);
  4351. }
  4352.  
  4353. static void intel_dp_encoder_reset(struct drm_encoder *encoder)
  4354. {
  4355.         struct intel_dp *intel_dp;
  4356.  
  4357.         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
  4358.                 return;
  4359.  
  4360.         intel_dp = enc_to_intel_dp(encoder);
  4361.  
  4362.         pps_lock(intel_dp);
  4363.  
  4364.         /*
  4365.          * Read out the current power sequencer assignment,
  4366.          * in case the BIOS did something with it.
  4367.          */
  4368.         if (IS_VALLEYVIEW(encoder->dev))
  4369.                 vlv_initial_power_sequencer_setup(intel_dp);
  4370.  
  4371.         intel_edp_panel_vdd_sanitize(intel_dp);
  4372.  
  4373.         pps_unlock(intel_dp);
  4374. }
  4375.  
  4376. static const struct drm_connector_funcs intel_dp_connector_funcs = {
  4377.         .dpms = intel_connector_dpms,
  4378.         .detect = intel_dp_detect,
  4379.         .force = intel_dp_force,
  4380.         .fill_modes = drm_helper_probe_single_connector_modes,
  4381.         .set_property = intel_dp_set_property,
  4382.         .destroy = intel_dp_connector_destroy,
  4383. };
  4384.  
  4385. static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
  4386.         .get_modes = intel_dp_get_modes,
  4387.         .mode_valid = intel_dp_mode_valid,
  4388.         .best_encoder = intel_best_encoder,
  4389. };
  4390.  
  4391. static const struct drm_encoder_funcs intel_dp_enc_funcs = {
  4392.         .reset = intel_dp_encoder_reset,
  4393.         .destroy = intel_dp_encoder_destroy,
  4394. };
  4395.  
  4396. void
  4397. intel_dp_hot_plug(struct intel_encoder *intel_encoder)
  4398. {
  4399.         return;
  4400. }
  4401.  
  4402. bool
  4403. intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
  4404. {
  4405.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4406.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4407.         struct drm_device *dev = intel_dig_port->base.base.dev;
  4408.         struct drm_i915_private *dev_priv = dev->dev_private;
  4409.         enum intel_display_power_domain power_domain;
  4410.         bool ret = true;
  4411.  
  4412.         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
  4413.                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
  4414.  
  4415.         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
  4416.                 /*
  4417.                  * vdd off can generate a long pulse on eDP which
  4418.                  * would require vdd on to handle it, and thus we
  4419.                  * would end up in an endless cycle of
  4420.                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
  4421.                  */
  4422.                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
  4423.                               port_name(intel_dig_port->port));
  4424.                 return false;
  4425.         }
  4426.  
  4427.         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
  4428.                       port_name(intel_dig_port->port),
  4429.                       long_hpd ? "long" : "short");
  4430.  
  4431.         power_domain = intel_display_port_power_domain(intel_encoder);
  4432.         intel_display_power_get(dev_priv, power_domain);
  4433.  
  4434.         if (long_hpd) {
  4435.  
  4436.                 if (HAS_PCH_SPLIT(dev)) {
  4437.                 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
  4438.                         goto mst_fail;
  4439.                 } else {
  4440.                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
  4441.                                 goto mst_fail;
  4442.                 }
  4443.  
  4444.                 if (!intel_dp_get_dpcd(intel_dp)) {
  4445.                         goto mst_fail;
  4446.                 }
  4447.  
  4448.                 intel_dp_probe_oui(intel_dp);
  4449.  
  4450.                 if (!intel_dp_probe_mst(intel_dp))
  4451.                         goto mst_fail;
  4452.  
  4453.         } else {
  4454.                 if (intel_dp->is_mst) {
  4455.                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
  4456.                                 goto mst_fail;
  4457.                 }
  4458.  
  4459.                 if (!intel_dp->is_mst) {
  4460.                         /*
  4461.                          * we'll check the link status via the normal hot plug path later -
  4462.                          * but for short hpds we should check it now
  4463.                          */
  4464.                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  4465.                         intel_dp_check_link_status(intel_dp);
  4466.                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
  4467.                 }
  4468.         }
  4469.         ret = false;
  4470.         goto put_power;
  4471. mst_fail:
  4472.         /* if we were in MST mode, and device is not there get out of MST mode */
  4473.         if (intel_dp->is_mst) {
  4474.                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
  4475.                 intel_dp->is_mst = false;
  4476.                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
  4477.         }
  4478. put_power:
  4479.         intel_display_power_put(dev_priv, power_domain);
  4480.  
  4481.         return ret;
  4482. }
  4483.  
  4484. /* Return which DP Port should be selected for Transcoder DP control */
  4485. int
  4486. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  4487. {
  4488.         struct drm_device *dev = crtc->dev;
  4489.         struct intel_encoder *intel_encoder;
  4490.         struct intel_dp *intel_dp;
  4491.  
  4492.         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
  4493.                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
  4494.  
  4495.                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
  4496.                     intel_encoder->type == INTEL_OUTPUT_EDP)
  4497.                         return intel_dp->output_reg;
  4498.         }
  4499.  
  4500.         return -1;
  4501. }
  4502.  
  4503. /* check the VBT to see whether the eDP is on DP-D port */
  4504. bool intel_dp_is_edp(struct drm_device *dev, enum port port)
  4505. {
  4506.         struct drm_i915_private *dev_priv = dev->dev_private;
  4507.         union child_device_config *p_child;
  4508.         int i;
  4509.         static const short port_mapping[] = {
  4510.                 [PORT_B] = PORT_IDPB,
  4511.                 [PORT_C] = PORT_IDPC,
  4512.                 [PORT_D] = PORT_IDPD,
  4513.         };
  4514.  
  4515.         if (port == PORT_A)
  4516.                 return true;
  4517.  
  4518.         if (!dev_priv->vbt.child_dev_num)
  4519.                 return false;
  4520.  
  4521.         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
  4522.                 p_child = dev_priv->vbt.child_dev + i;
  4523.  
  4524.                 if (p_child->common.dvo_port == port_mapping[port] &&
  4525.                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
  4526.                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
  4527.                         return true;
  4528.         }
  4529.         return false;
  4530. }
  4531.  
  4532. void
  4533. intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
  4534. {
  4535.         struct intel_connector *intel_connector = to_intel_connector(connector);
  4536.  
  4537.         intel_attach_force_audio_property(connector);
  4538.         intel_attach_broadcast_rgb_property(connector);
  4539.         intel_dp->color_range_auto = true;
  4540.  
  4541.         if (is_edp(intel_dp)) {
  4542.                 drm_mode_create_scaling_mode_property(connector->dev);
  4543.                 drm_object_attach_property(
  4544.                         &connector->base,
  4545.                         connector->dev->mode_config.scaling_mode_property,
  4546.                         DRM_MODE_SCALE_ASPECT);
  4547.                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
  4548.         }
  4549. }
  4550.  
  4551. static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
  4552. {
  4553.         intel_dp->last_power_cycle = jiffies;
  4554.         intel_dp->last_power_on = jiffies;
  4555.         intel_dp->last_backlight_off = jiffies;
  4556. }
  4557.  
  4558. static void
  4559. intel_dp_init_panel_power_sequencer(struct drm_device *dev,
  4560.                                     struct intel_dp *intel_dp)
  4561. {
  4562.         struct drm_i915_private *dev_priv = dev->dev_private;
  4563.         struct edp_power_seq cur, vbt, spec,
  4564.                 *final = &intel_dp->pps_delays;
  4565.         u32 pp_on, pp_off, pp_div, pp;
  4566.         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
  4567.  
  4568.         lockdep_assert_held(&dev_priv->pps_mutex);
  4569.  
  4570.         /* already initialized? */
  4571.         if (final->t11_t12 != 0)
  4572.                 return;
  4573.  
  4574.         if (HAS_PCH_SPLIT(dev)) {
  4575.                 pp_ctrl_reg = PCH_PP_CONTROL;
  4576.                 pp_on_reg = PCH_PP_ON_DELAYS;
  4577.                 pp_off_reg = PCH_PP_OFF_DELAYS;
  4578.                 pp_div_reg = PCH_PP_DIVISOR;
  4579.         } else {
  4580.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  4581.  
  4582.                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
  4583.                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  4584.                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
  4585.                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
  4586.         }
  4587.  
  4588.         /* Workaround: Need to write PP_CONTROL with the unlock key as
  4589.          * the very first thing. */
  4590.         pp = ironlake_get_pp_control(intel_dp);
  4591.         I915_WRITE(pp_ctrl_reg, pp);
  4592.  
  4593.         pp_on = I915_READ(pp_on_reg);
  4594.         pp_off = I915_READ(pp_off_reg);
  4595.         pp_div = I915_READ(pp_div_reg);
  4596.  
  4597.         /* Pull timing values out of registers */
  4598.         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
  4599.                 PANEL_POWER_UP_DELAY_SHIFT;
  4600.  
  4601.         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
  4602.                 PANEL_LIGHT_ON_DELAY_SHIFT;
  4603.  
  4604.         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
  4605.                 PANEL_LIGHT_OFF_DELAY_SHIFT;
  4606.  
  4607.         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
  4608.                 PANEL_POWER_DOWN_DELAY_SHIFT;
  4609.  
  4610.         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
  4611.                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
  4612.  
  4613.         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
  4614.                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
  4615.  
  4616.         vbt = dev_priv->vbt.edp_pps;
  4617.  
  4618.         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
  4619.          * our hw here, which are all in 100usec. */
  4620.         spec.t1_t3 = 210 * 10;
  4621.         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
  4622.         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
  4623.         spec.t10 = 500 * 10;
  4624.         /* This one is special and actually in units of 100ms, but zero
  4625.          * based in the hw (so we need to add 100 ms). But the sw vbt
  4626.          * table multiplies it with 1000 to make it in units of 100usec,
  4627.          * too. */
  4628.         spec.t11_t12 = (510 + 100) * 10;
  4629.  
  4630.         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
  4631.                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
  4632.  
  4633.         /* Use the max of the register settings and vbt. If both are
  4634.          * unset, fall back to the spec limits. */
  4635. #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
  4636.                                        spec.field : \
  4637.                                        max(cur.field, vbt.field))
  4638.         assign_final(t1_t3);
  4639.         assign_final(t8);
  4640.         assign_final(t9);
  4641.         assign_final(t10);
  4642.         assign_final(t11_t12);
  4643. #undef assign_final
  4644.  
  4645. #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
  4646.         intel_dp->panel_power_up_delay = get_delay(t1_t3);
  4647.         intel_dp->backlight_on_delay = get_delay(t8);
  4648.         intel_dp->backlight_off_delay = get_delay(t9);
  4649.         intel_dp->panel_power_down_delay = get_delay(t10);
  4650.         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
  4651. #undef get_delay
  4652.  
  4653.         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
  4654.                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
  4655.                       intel_dp->panel_power_cycle_delay);
  4656.  
  4657.         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
  4658.                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
  4659. }
  4660.  
  4661. static void
  4662. intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  4663.                                               struct intel_dp *intel_dp)
  4664. {
  4665.         struct drm_i915_private *dev_priv = dev->dev_private;
  4666.         u32 pp_on, pp_off, pp_div, port_sel = 0;
  4667.         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
  4668.         int pp_on_reg, pp_off_reg, pp_div_reg;
  4669.         enum port port = dp_to_dig_port(intel_dp)->port;
  4670.         const struct edp_power_seq *seq = &intel_dp->pps_delays;
  4671.  
  4672.         lockdep_assert_held(&dev_priv->pps_mutex);
  4673.  
  4674.         if (HAS_PCH_SPLIT(dev)) {
  4675.                 pp_on_reg = PCH_PP_ON_DELAYS;
  4676.                 pp_off_reg = PCH_PP_OFF_DELAYS;
  4677.                 pp_div_reg = PCH_PP_DIVISOR;
  4678.         } else {
  4679.                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
  4680.  
  4681.                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
  4682.                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
  4683.                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
  4684.         }
  4685.  
  4686.         /*
  4687.          * And finally store the new values in the power sequencer. The
  4688.          * backlight delays are set to 1 because we do manual waits on them. For
  4689.          * T8, even BSpec recommends doing it. For T9, if we don't do this,
  4690.          * we'll end up waiting for the backlight off delay twice: once when we
  4691.          * do the manual sleep, and once when we disable the panel and wait for
  4692.          * the PP_STATUS bit to become zero.
  4693.          */
  4694.         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
  4695.                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
  4696.         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
  4697.                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
  4698.         /* Compute the divisor for the pp clock, simply match the Bspec
  4699.          * formula. */
  4700.         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
  4701.         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
  4702.                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
  4703.  
  4704.         /* Haswell doesn't have any port selection bits for the panel
  4705.          * power sequencer any more. */
  4706.         if (IS_VALLEYVIEW(dev)) {
  4707.                 port_sel = PANEL_PORT_SELECT_VLV(port);
  4708.         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
  4709.                 if (port == PORT_A)
  4710.                         port_sel = PANEL_PORT_SELECT_DPA;
  4711.                 else
  4712.                         port_sel = PANEL_PORT_SELECT_DPD;
  4713.         }
  4714.  
  4715.         pp_on |= port_sel;
  4716.  
  4717.         I915_WRITE(pp_on_reg, pp_on);
  4718.         I915_WRITE(pp_off_reg, pp_off);
  4719.         I915_WRITE(pp_div_reg, pp_div);
  4720.  
  4721.         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
  4722.                       I915_READ(pp_on_reg),
  4723.                       I915_READ(pp_off_reg),
  4724.                       I915_READ(pp_div_reg));
  4725. }
  4726.  
  4727. void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
  4728. {
  4729.         struct drm_i915_private *dev_priv = dev->dev_private;
  4730.         struct intel_encoder *encoder;
  4731.         struct intel_dp *intel_dp = NULL;
  4732.         struct intel_crtc_config *config = NULL;
  4733.         struct intel_crtc *intel_crtc = NULL;
  4734.         struct intel_connector *intel_connector = dev_priv->drrs.connector;
  4735.         u32 reg, val;
  4736.         enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
  4737.  
  4738.         if (refresh_rate <= 0) {
  4739.                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
  4740.                 return;
  4741.         }
  4742.  
  4743.         if (intel_connector == NULL) {
  4744.                 DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
  4745.                 return;
  4746.         }
  4747.  
  4748.         /*
  4749.          * FIXME: This needs proper synchronization with psr state. But really
  4750.          * hard to tell without seeing the user of this function of this code.
  4751.          * Check locking and ordering once that lands.
  4752.          */
  4753.         if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
  4754.                 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
  4755.                 return;
  4756.         }
  4757.  
  4758.         encoder = intel_attached_encoder(&intel_connector->base);
  4759.         intel_dp = enc_to_intel_dp(&encoder->base);
  4760.         intel_crtc = encoder->new_crtc;
  4761.  
  4762.         if (!intel_crtc) {
  4763.                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
  4764.                 return;
  4765.         }
  4766.  
  4767.         config = &intel_crtc->config;
  4768.  
  4769.         if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
  4770.                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
  4771.                 return;
  4772.         }
  4773.  
  4774.         if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
  4775.                 index = DRRS_LOW_RR;
  4776.  
  4777.         if (index == intel_dp->drrs_state.refresh_rate_type) {
  4778.                 DRM_DEBUG_KMS(
  4779.                         "DRRS requested for previously set RR...ignoring\n");
  4780.                 return;
  4781.         }
  4782.  
  4783.         if (!intel_crtc->active) {
  4784.                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
  4785.                 return;
  4786.         }
  4787.  
  4788.         if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
  4789.                 reg = PIPECONF(intel_crtc->config.cpu_transcoder);
  4790.                 val = I915_READ(reg);
  4791.                 if (index > DRRS_HIGH_RR) {
  4792.                         val |= PIPECONF_EDP_RR_MODE_SWITCH;
  4793.                         intel_dp_set_m_n(intel_crtc);
  4794.                 } else {
  4795.                         val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
  4796.                 }
  4797.                 I915_WRITE(reg, val);
  4798.         }
  4799.  
  4800.         /*
  4801.          * mutex taken to ensure that there is no race between differnt
  4802.          * drrs calls trying to update refresh rate. This scenario may occur
  4803.          * in future when idleness detection based DRRS in kernel and
  4804.          * possible calls from user space to set differnt RR are made.
  4805.          */
  4806.  
  4807.         mutex_lock(&intel_dp->drrs_state.mutex);
  4808.  
  4809.         intel_dp->drrs_state.refresh_rate_type = index;
  4810.  
  4811.         mutex_unlock(&intel_dp->drrs_state.mutex);
  4812.  
  4813.         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
  4814. }
  4815.  
  4816. static struct drm_display_mode *
  4817. intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
  4818.                         struct intel_connector *intel_connector,
  4819.                         struct drm_display_mode *fixed_mode)
  4820. {
  4821.         struct drm_connector *connector = &intel_connector->base;
  4822.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4823.         struct drm_device *dev = intel_dig_port->base.base.dev;
  4824.         struct drm_i915_private *dev_priv = dev->dev_private;
  4825.         struct drm_display_mode *downclock_mode = NULL;
  4826.  
  4827.         if (INTEL_INFO(dev)->gen <= 6) {
  4828.                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
  4829.                 return NULL;
  4830.         }
  4831.  
  4832.         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
  4833.                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
  4834.                 return NULL;
  4835.         }
  4836.  
  4837.         downclock_mode = intel_find_panel_downclock
  4838.                                         (dev, fixed_mode, connector);
  4839.  
  4840.         if (!downclock_mode) {
  4841.                 DRM_DEBUG_KMS("DRRS not supported\n");
  4842.                 return NULL;
  4843.         }
  4844.  
  4845.         dev_priv->drrs.connector = intel_connector;
  4846.  
  4847.         mutex_init(&intel_dp->drrs_state.mutex);
  4848.  
  4849.         intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
  4850.  
  4851.         intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
  4852.         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
  4853.         return downclock_mode;
  4854. }
  4855.  
  4856. static bool intel_edp_init_connector(struct intel_dp *intel_dp,
  4857.                                      struct intel_connector *intel_connector)
  4858. {
  4859.         struct drm_connector *connector = &intel_connector->base;
  4860.         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  4861.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4862.         struct drm_device *dev = intel_encoder->base.dev;
  4863.         struct drm_i915_private *dev_priv = dev->dev_private;
  4864.         struct drm_display_mode *fixed_mode = NULL;
  4865.         struct drm_display_mode *downclock_mode = NULL;
  4866.         bool has_dpcd;
  4867.         struct drm_display_mode *scan;
  4868.         struct edid *edid;
  4869.         enum pipe pipe = INVALID_PIPE;
  4870.  
  4871.         intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
  4872.  
  4873.         if (!is_edp(intel_dp))
  4874.                 return true;
  4875.  
  4876.         pps_lock(intel_dp);
  4877.         intel_edp_panel_vdd_sanitize(intel_dp);
  4878.         pps_unlock(intel_dp);
  4879.  
  4880.         /* Cache DPCD and EDID for edp. */
  4881.         has_dpcd = intel_dp_get_dpcd(intel_dp);
  4882.  
  4883.         if (has_dpcd) {
  4884.                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
  4885.                         dev_priv->no_aux_handshake =
  4886.                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
  4887.                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
  4888.         } else {
  4889.                 /* if this fails, presume the device is a ghost */
  4890.                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
  4891.                 return false;
  4892.         }
  4893.  
  4894.         /* We now know it's not a ghost, init power sequence regs. */
  4895.         pps_lock(intel_dp);
  4896.         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
  4897.         pps_unlock(intel_dp);
  4898.  
  4899.         mutex_lock(&dev->mode_config.mutex);
  4900.         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
  4901.         if (edid) {
  4902.                 if (drm_add_edid_modes(connector, edid)) {
  4903.                         drm_mode_connector_update_edid_property(connector,
  4904.                                                                 edid);
  4905.                         drm_edid_to_eld(connector, edid);
  4906.                 } else {
  4907.                         kfree(edid);
  4908.                         edid = ERR_PTR(-EINVAL);
  4909.                 }
  4910.         } else {
  4911.                 edid = ERR_PTR(-ENOENT);
  4912.         }
  4913.         intel_connector->edid = edid;
  4914.  
  4915.         /* prefer fixed mode from EDID if available */
  4916.         list_for_each_entry(scan, &connector->probed_modes, head) {
  4917.                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
  4918.                         fixed_mode = drm_mode_duplicate(dev, scan);
  4919.                         downclock_mode = intel_dp_drrs_init(
  4920.                                                 intel_dig_port,
  4921.                                                 intel_connector, fixed_mode);
  4922.                         break;
  4923.                 }
  4924.         }
  4925.  
  4926.         /* fallback to VBT if available for eDP */
  4927.         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
  4928.                 fixed_mode = drm_mode_duplicate(dev,
  4929.                                         dev_priv->vbt.lfp_lvds_vbt_mode);
  4930.                 if (fixed_mode)
  4931.                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
  4932.         }
  4933.         mutex_unlock(&dev->mode_config.mutex);
  4934.  
  4935.         if (IS_VALLEYVIEW(dev)) {
  4936.  
  4937.                 /*
  4938.                  * Figure out the current pipe for the initial backlight setup.
  4939.                  * If the current pipe isn't valid, try the PPS pipe, and if that
  4940.                  * fails just assume pipe A.
  4941.                  */
  4942.                 if (IS_CHERRYVIEW(dev))
  4943.                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
  4944.                 else
  4945.                         pipe = PORT_TO_PIPE(intel_dp->DP);
  4946.  
  4947.                 if (pipe != PIPE_A && pipe != PIPE_B)
  4948.                         pipe = intel_dp->pps_pipe;
  4949.  
  4950.                 if (pipe != PIPE_A && pipe != PIPE_B)
  4951.                         pipe = PIPE_A;
  4952.  
  4953.                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
  4954.                               pipe_name(pipe));
  4955.         }
  4956.  
  4957.         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
  4958.         intel_connector->panel.backlight_power = intel_edp_backlight_power;
  4959.         intel_panel_setup_backlight(connector, pipe);
  4960.  
  4961.         return true;
  4962. }
  4963.  
  4964. bool
  4965. intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  4966.                         struct intel_connector *intel_connector)
  4967. {
  4968.         struct drm_connector *connector = &intel_connector->base;
  4969.         struct intel_dp *intel_dp = &intel_dig_port->dp;
  4970.         struct intel_encoder *intel_encoder = &intel_dig_port->base;
  4971.         struct drm_device *dev = intel_encoder->base.dev;
  4972.         struct drm_i915_private *dev_priv = dev->dev_private;
  4973.         enum port port = intel_dig_port->port;
  4974.         int type;
  4975.  
  4976.         intel_dp->pps_pipe = INVALID_PIPE;
  4977.  
  4978.         /* intel_dp vfuncs */
  4979.         if (INTEL_INFO(dev)->gen >= 9)
  4980.                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
  4981.         else if (IS_VALLEYVIEW(dev))
  4982.                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
  4983.         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  4984.                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
  4985.         else if (HAS_PCH_SPLIT(dev))
  4986.                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
  4987.         else
  4988.                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
  4989.  
  4990.         if (INTEL_INFO(dev)->gen >= 9)
  4991.                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
  4992.         else
  4993.         intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
  4994.  
  4995.         /* Preserve the current hw state. */
  4996.         intel_dp->DP = I915_READ(intel_dp->output_reg);
  4997.         intel_dp->attached_connector = intel_connector;
  4998.  
  4999.         if (intel_dp_is_edp(dev, port))
  5000.                 type = DRM_MODE_CONNECTOR_eDP;
  5001.         else
  5002.         type = DRM_MODE_CONNECTOR_DisplayPort;
  5003.  
  5004.         /*
  5005.          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
  5006.          * for DP the encoder type can be set by the caller to
  5007.          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
  5008.          */
  5009.         if (type == DRM_MODE_CONNECTOR_eDP)
  5010.                 intel_encoder->type = INTEL_OUTPUT_EDP;
  5011.  
  5012.         /* eDP only on port B and/or C on vlv/chv */
  5013.         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
  5014.                     port != PORT_B && port != PORT_C))
  5015.                 return false;
  5016.  
  5017.         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
  5018.                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
  5019.                         port_name(port));
  5020.  
  5021.         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
  5022.         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
  5023.  
  5024.         connector->interlace_allowed = true;
  5025.         connector->doublescan_allowed = 0;
  5026.  
  5027.         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
  5028.                           edp_panel_vdd_work);
  5029.  
  5030.         intel_connector_attach_encoder(intel_connector, intel_encoder);
  5031.         drm_connector_register(connector);
  5032.  
  5033.         if (HAS_DDI(dev))
  5034.                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
  5035.         else
  5036.         intel_connector->get_hw_state = intel_connector_get_hw_state;
  5037.         intel_connector->unregister = intel_dp_connector_unregister;
  5038.  
  5039.         /* Set up the hotplug pin. */
  5040.         switch (port) {
  5041.         case PORT_A:
  5042.                 intel_encoder->hpd_pin = HPD_PORT_A;
  5043.                         break;
  5044.         case PORT_B:
  5045.                 intel_encoder->hpd_pin = HPD_PORT_B;
  5046.                         break;
  5047.         case PORT_C:
  5048.                 intel_encoder->hpd_pin = HPD_PORT_C;
  5049.                         break;
  5050.         case PORT_D:
  5051.                 intel_encoder->hpd_pin = HPD_PORT_D;
  5052.                         break;
  5053.         default:
  5054.                 BUG();
  5055.         }
  5056.  
  5057.         if (is_edp(intel_dp)) {
  5058.                 pps_lock(intel_dp);
  5059.                 intel_dp_init_panel_power_timestamps(intel_dp);
  5060.                 if (IS_VALLEYVIEW(dev))
  5061.                         vlv_initial_power_sequencer_setup(intel_dp);
  5062.                 else
  5063.                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
  5064.                 pps_unlock(intel_dp);
  5065.         }
  5066.  
  5067.         intel_dp_aux_init(intel_dp, intel_connector);
  5068.  
  5069.         /* init MST on ports that can support it */
  5070.         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  5071.                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
  5072.                         intel_dp_mst_encoder_init(intel_dig_port,
  5073.                                                   intel_connector->base.base.id);
  5074.                 }
  5075.         }
  5076.  
  5077.         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
  5078.                 drm_dp_aux_unregister(&intel_dp->aux);
  5079.         if (is_edp(intel_dp)) {
  5080.                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  5081.                         /*
  5082.                          * vdd might still be enabled do to the delayed vdd off.
  5083.                          * Make sure vdd is actually turned off here.
  5084.                          */
  5085.                         pps_lock(intel_dp);
  5086.                         edp_panel_vdd_off_sync(intel_dp);
  5087.                         pps_unlock(intel_dp);
  5088.                 }
  5089.                 drm_connector_unregister(connector);
  5090.                 drm_connector_cleanup(connector);
  5091.                 return false;
  5092.         }
  5093.  
  5094.         intel_dp_add_properties(intel_dp, connector);
  5095.  
  5096.         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
  5097.          * 0xd.  Failure to do so will result in spurious interrupts being
  5098.          * generated on the port when a cable is not attached.
  5099.          */
  5100.         if (IS_G4X(dev) && !IS_GM45(dev)) {
  5101.                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
  5102.                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
  5103.         }
  5104.  
  5105.         return true;
  5106. }
  5107.  
  5108. void
  5109. intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
  5110. {
  5111.         struct drm_i915_private *dev_priv = dev->dev_private;
  5112.         struct intel_digital_port *intel_dig_port;
  5113.         struct intel_encoder *intel_encoder;
  5114.         struct drm_encoder *encoder;
  5115.         struct intel_connector *intel_connector;
  5116.  
  5117.         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
  5118.         if (!intel_dig_port)
  5119.                 return;
  5120.  
  5121.         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
  5122.         if (!intel_connector) {
  5123.                 kfree(intel_dig_port);
  5124.                 return;
  5125.         }
  5126.  
  5127.         intel_encoder = &intel_dig_port->base;
  5128.         encoder = &intel_encoder->base;
  5129.  
  5130.         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
  5131.                          DRM_MODE_ENCODER_TMDS);
  5132.  
  5133.         intel_encoder->compute_config = intel_dp_compute_config;
  5134.         intel_encoder->disable = intel_disable_dp;
  5135.         intel_encoder->get_hw_state = intel_dp_get_hw_state;
  5136.         intel_encoder->get_config = intel_dp_get_config;
  5137.         intel_encoder->suspend = intel_dp_encoder_suspend;
  5138.         if (IS_CHERRYVIEW(dev)) {
  5139.                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
  5140.                 intel_encoder->pre_enable = chv_pre_enable_dp;
  5141.                 intel_encoder->enable = vlv_enable_dp;
  5142.                 intel_encoder->post_disable = chv_post_disable_dp;
  5143.         } else if (IS_VALLEYVIEW(dev)) {
  5144.                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
  5145.                 intel_encoder->pre_enable = vlv_pre_enable_dp;
  5146.                 intel_encoder->enable = vlv_enable_dp;
  5147.                 intel_encoder->post_disable = vlv_post_disable_dp;
  5148.         } else {
  5149.                 intel_encoder->pre_enable = g4x_pre_enable_dp;
  5150.                 intel_encoder->enable = g4x_enable_dp;
  5151.                 if (INTEL_INFO(dev)->gen >= 5)
  5152.                         intel_encoder->post_disable = ilk_post_disable_dp;
  5153.         }
  5154.  
  5155.         intel_dig_port->port = port;
  5156.         intel_dig_port->dp.output_reg = output_reg;
  5157.  
  5158.         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  5159.         if (IS_CHERRYVIEW(dev)) {
  5160.                 if (port == PORT_D)
  5161.                         intel_encoder->crtc_mask = 1 << 2;
  5162.                 else
  5163.                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
  5164.         } else {
  5165.         intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
  5166.         }
  5167.         intel_encoder->cloneable = 0;
  5168.         intel_encoder->hot_plug = intel_dp_hot_plug;
  5169.  
  5170.         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
  5171.         dev_priv->hpd_irq_port[port] = intel_dig_port;
  5172.  
  5173.         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
  5174.                 drm_encoder_cleanup(encoder);
  5175.                 kfree(intel_dig_port);
  5176.                 kfree(intel_connector);
  5177.         }
  5178. }
  5179.  
  5180. void intel_dp_mst_suspend(struct drm_device *dev)
  5181. {
  5182.         struct drm_i915_private *dev_priv = dev->dev_private;
  5183.         int i;
  5184.  
  5185.         /* disable MST */
  5186.         for (i = 0; i < I915_MAX_PORTS; i++) {
  5187.                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
  5188.                 if (!intel_dig_port)
  5189.                         continue;
  5190.  
  5191.                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
  5192.                         if (!intel_dig_port->dp.can_mst)
  5193.                                 continue;
  5194.                         if (intel_dig_port->dp.is_mst)
  5195.                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
  5196.                 }
  5197.         }
  5198. }
  5199.  
  5200. void intel_dp_mst_resume(struct drm_device *dev)
  5201. {
  5202.         struct drm_i915_private *dev_priv = dev->dev_private;
  5203.         int i;
  5204.  
  5205.         for (i = 0; i < I915_MAX_PORTS; i++) {
  5206.                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
  5207.                 if (!intel_dig_port)
  5208.                         continue;
  5209.                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
  5210.                         int ret;
  5211.  
  5212.                         if (!intel_dig_port->dp.can_mst)
  5213.                                 continue;
  5214.  
  5215.                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
  5216.                         if (ret != 0) {
  5217.                                 intel_dp_check_mst_status(&intel_dig_port->dp);
  5218.                         }
  5219.                 }
  5220.         }
  5221. }
  5222.