Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2006-2007 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *  Eric Anholt <eric@anholt.net>
  25.  */
  26.  
  27. #include <linux/dmi.h>
  28. #include <linux/module.h>
  29. //#include <linux/input.h>
  30. #include <linux/i2c.h>
  31. #include <linux/kernel.h>
  32. #include <linux/slab.h>
  33. #include <linux/vgaarb.h>
  34. #include <drm/drm_edid.h>
  35. #include <drm/drmP.h>
  36. #include "intel_drv.h"
  37. #include <drm/i915_drm.h>
  38. #include "i915_drv.h"
  39. #include "i915_trace.h"
  40. #include <drm/drm_dp_helper.h>
  41. #include <drm/drm_crtc_helper.h>
  42. #include <drm/drm_plane_helper.h>
  43. #include <drm/drm_rect.h>
  44. #include <linux/dma_remapping.h>
  45.  
  46. /* Primary plane formats supported by all gen */
  47. #define COMMON_PRIMARY_FORMATS \
  48.         DRM_FORMAT_C8, \
  49.         DRM_FORMAT_RGB565, \
  50.         DRM_FORMAT_XRGB8888, \
  51.         DRM_FORMAT_ARGB8888
  52.  
  53. /* Primary plane formats for gen <= 3 */
  54. static const uint32_t intel_primary_formats_gen2[] = {
  55.         COMMON_PRIMARY_FORMATS,
  56.         DRM_FORMAT_XRGB1555,
  57.         DRM_FORMAT_ARGB1555,
  58. };
  59.  
  60. /* Primary plane formats for gen >= 4 */
  61. static const uint32_t intel_primary_formats_gen4[] = {
  62.         COMMON_PRIMARY_FORMATS, \
  63.         DRM_FORMAT_XBGR8888,
  64.         DRM_FORMAT_ABGR8888,
  65.         DRM_FORMAT_XRGB2101010,
  66.         DRM_FORMAT_ARGB2101010,
  67.         DRM_FORMAT_XBGR2101010,
  68.         DRM_FORMAT_ABGR2101010,
  69. };
  70.  
  71. /* Cursor formats */
  72. static const uint32_t intel_cursor_formats[] = {
  73.         DRM_FORMAT_ARGB8888,
  74. };
  75.  
  76. void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  77.  
  78. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  79.                                 struct intel_crtc_config *pipe_config);
  80. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  81.                                     struct intel_crtc_config *pipe_config);
  82.  
  83. static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
  84.                           int x, int y, struct drm_framebuffer *old_fb);
  85. static int intel_framebuffer_init(struct drm_device *dev,
  86.                                   struct intel_framebuffer *ifb,
  87.                                   struct drm_mode_fb_cmd2 *mode_cmd,
  88.                                   struct drm_i915_gem_object *obj);
  89. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  90. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  91. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  92.                                          struct intel_link_m_n *m_n,
  93.                                          struct intel_link_m_n *m2_n2);
  94. static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  95. static void haswell_set_pipeconf(struct drm_crtc *crtc);
  96. static void intel_set_pipe_csc(struct drm_crtc *crtc);
  97. static void vlv_prepare_pll(struct intel_crtc *crtc,
  98.                             const struct intel_crtc_config *pipe_config);
  99. static void chv_prepare_pll(struct intel_crtc *crtc,
  100.                             const struct intel_crtc_config *pipe_config);
  101.  
  102. static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
  103. {
  104.         if (!connector->mst_port)
  105.                 return connector->encoder;
  106.         else
  107.                 return &connector->mst_port->mst_encoders[pipe]->base;
  108. }
  109.  
  110. typedef struct {
  111.     int min, max;
  112. } intel_range_t;
  113.  
  114. typedef struct {
  115.     int dot_limit;
  116.     int p2_slow, p2_fast;
  117. } intel_p2_t;
  118.  
  119. typedef struct intel_limit intel_limit_t;
  120. struct intel_limit {
  121.     intel_range_t   dot, vco, n, m, m1, m2, p, p1;
  122.     intel_p2_t      p2;
  123. };
  124.  
  125. int
  126. intel_pch_rawclk(struct drm_device *dev)
  127. {
  128.         struct drm_i915_private *dev_priv = dev->dev_private;
  129.  
  130.         WARN_ON(!HAS_PCH_SPLIT(dev));
  131.  
  132.         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
  133. }
  134.  
  135. static inline u32 /* units of 100MHz */
  136. intel_fdi_link_freq(struct drm_device *dev)
  137. {
  138.         if (IS_GEN5(dev)) {
  139.                 struct drm_i915_private *dev_priv = dev->dev_private;
  140.                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
  141.         } else
  142.                 return 27;
  143. }
  144.  
  145. static const intel_limit_t intel_limits_i8xx_dac = {
  146.         .dot = { .min = 25000, .max = 350000 },
  147.         .vco = { .min = 908000, .max = 1512000 },
  148.         .n = { .min = 2, .max = 16 },
  149.         .m = { .min = 96, .max = 140 },
  150.         .m1 = { .min = 18, .max = 26 },
  151.         .m2 = { .min = 6, .max = 16 },
  152.         .p = { .min = 4, .max = 128 },
  153.         .p1 = { .min = 2, .max = 33 },
  154.         .p2 = { .dot_limit = 165000,
  155.                 .p2_slow = 4, .p2_fast = 2 },
  156. };
  157.  
  158. static const intel_limit_t intel_limits_i8xx_dvo = {
  159.         .dot = { .min = 25000, .max = 350000 },
  160.         .vco = { .min = 908000, .max = 1512000 },
  161.         .n = { .min = 2, .max = 16 },
  162.         .m = { .min = 96, .max = 140 },
  163.         .m1 = { .min = 18, .max = 26 },
  164.         .m2 = { .min = 6, .max = 16 },
  165.         .p = { .min = 4, .max = 128 },
  166.         .p1 = { .min = 2, .max = 33 },
  167.         .p2 = { .dot_limit = 165000,
  168.                 .p2_slow = 4, .p2_fast = 4 },
  169. };
  170.  
  171. static const intel_limit_t intel_limits_i8xx_lvds = {
  172.         .dot = { .min = 25000, .max = 350000 },
  173.         .vco = { .min = 908000, .max = 1512000 },
  174.         .n = { .min = 2, .max = 16 },
  175.         .m = { .min = 96, .max = 140 },
  176.         .m1 = { .min = 18, .max = 26 },
  177.         .m2 = { .min = 6, .max = 16 },
  178.         .p = { .min = 4, .max = 128 },
  179.         .p1 = { .min = 1, .max = 6 },
  180.         .p2 = { .dot_limit = 165000,
  181.                 .p2_slow = 14, .p2_fast = 7 },
  182. };
  183.  
  184. static const intel_limit_t intel_limits_i9xx_sdvo = {
  185.         .dot = { .min = 20000, .max = 400000 },
  186.         .vco = { .min = 1400000, .max = 2800000 },
  187.         .n = { .min = 1, .max = 6 },
  188.         .m = { .min = 70, .max = 120 },
  189.         .m1 = { .min = 8, .max = 18 },
  190.         .m2 = { .min = 3, .max = 7 },
  191.         .p = { .min = 5, .max = 80 },
  192.         .p1 = { .min = 1, .max = 8 },
  193.         .p2 = { .dot_limit = 200000,
  194.                 .p2_slow = 10, .p2_fast = 5 },
  195. };
  196.  
  197. static const intel_limit_t intel_limits_i9xx_lvds = {
  198.         .dot = { .min = 20000, .max = 400000 },
  199.         .vco = { .min = 1400000, .max = 2800000 },
  200.         .n = { .min = 1, .max = 6 },
  201.         .m = { .min = 70, .max = 120 },
  202.         .m1 = { .min = 8, .max = 18 },
  203.         .m2 = { .min = 3, .max = 7 },
  204.         .p = { .min = 7, .max = 98 },
  205.         .p1 = { .min = 1, .max = 8 },
  206.         .p2 = { .dot_limit = 112000,
  207.                 .p2_slow = 14, .p2_fast = 7 },
  208. };
  209.  
  210.  
  211. static const intel_limit_t intel_limits_g4x_sdvo = {
  212.         .dot = { .min = 25000, .max = 270000 },
  213.         .vco = { .min = 1750000, .max = 3500000},
  214.         .n = { .min = 1, .max = 4 },
  215.         .m = { .min = 104, .max = 138 },
  216.         .m1 = { .min = 17, .max = 23 },
  217.         .m2 = { .min = 5, .max = 11 },
  218.         .p = { .min = 10, .max = 30 },
  219.         .p1 = { .min = 1, .max = 3},
  220.         .p2 = { .dot_limit = 270000,
  221.                 .p2_slow = 10,
  222.                 .p2_fast = 10
  223.         },
  224. };
  225.  
  226. static const intel_limit_t intel_limits_g4x_hdmi = {
  227.         .dot = { .min = 22000, .max = 400000 },
  228.         .vco = { .min = 1750000, .max = 3500000},
  229.         .n = { .min = 1, .max = 4 },
  230.         .m = { .min = 104, .max = 138 },
  231.         .m1 = { .min = 16, .max = 23 },
  232.         .m2 = { .min = 5, .max = 11 },
  233.         .p = { .min = 5, .max = 80 },
  234.         .p1 = { .min = 1, .max = 8},
  235.         .p2 = { .dot_limit = 165000,
  236.                 .p2_slow = 10, .p2_fast = 5 },
  237. };
  238.  
  239. static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
  240.         .dot = { .min = 20000, .max = 115000 },
  241.         .vco = { .min = 1750000, .max = 3500000 },
  242.         .n = { .min = 1, .max = 3 },
  243.         .m = { .min = 104, .max = 138 },
  244.         .m1 = { .min = 17, .max = 23 },
  245.         .m2 = { .min = 5, .max = 11 },
  246.         .p = { .min = 28, .max = 112 },
  247.         .p1 = { .min = 2, .max = 8 },
  248.         .p2 = { .dot_limit = 0,
  249.                 .p2_slow = 14, .p2_fast = 14
  250.         },
  251. };
  252.  
  253. static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
  254.         .dot = { .min = 80000, .max = 224000 },
  255.         .vco = { .min = 1750000, .max = 3500000 },
  256.         .n = { .min = 1, .max = 3 },
  257.         .m = { .min = 104, .max = 138 },
  258.         .m1 = { .min = 17, .max = 23 },
  259.         .m2 = { .min = 5, .max = 11 },
  260.         .p = { .min = 14, .max = 42 },
  261.         .p1 = { .min = 2, .max = 6 },
  262.         .p2 = { .dot_limit = 0,
  263.                 .p2_slow = 7, .p2_fast = 7
  264.         },
  265. };
  266.  
  267. static const intel_limit_t intel_limits_pineview_sdvo = {
  268.         .dot = { .min = 20000, .max = 400000},
  269.         .vco = { .min = 1700000, .max = 3500000 },
  270.         /* Pineview's Ncounter is a ring counter */
  271.         .n = { .min = 3, .max = 6 },
  272.         .m = { .min = 2, .max = 256 },
  273.         /* Pineview only has one combined m divider, which we treat as m2. */
  274.         .m1 = { .min = 0, .max = 0 },
  275.         .m2 = { .min = 0, .max = 254 },
  276.         .p = { .min = 5, .max = 80 },
  277.         .p1 = { .min = 1, .max = 8 },
  278.         .p2 = { .dot_limit = 200000,
  279.                 .p2_slow = 10, .p2_fast = 5 },
  280. };
  281.  
  282. static const intel_limit_t intel_limits_pineview_lvds = {
  283.         .dot = { .min = 20000, .max = 400000 },
  284.         .vco = { .min = 1700000, .max = 3500000 },
  285.         .n = { .min = 3, .max = 6 },
  286.         .m = { .min = 2, .max = 256 },
  287.         .m1 = { .min = 0, .max = 0 },
  288.         .m2 = { .min = 0, .max = 254 },
  289.         .p = { .min = 7, .max = 112 },
  290.         .p1 = { .min = 1, .max = 8 },
  291.         .p2 = { .dot_limit = 112000,
  292.                 .p2_slow = 14, .p2_fast = 14 },
  293. };
  294.  
  295. /* Ironlake / Sandybridge
  296.  *
  297.  * We calculate clock using (register_value + 2) for N/M1/M2, so here
  298.  * the range value for them is (actual_value - 2).
  299.  */
  300. static const intel_limit_t intel_limits_ironlake_dac = {
  301.         .dot = { .min = 25000, .max = 350000 },
  302.         .vco = { .min = 1760000, .max = 3510000 },
  303.         .n = { .min = 1, .max = 5 },
  304.         .m = { .min = 79, .max = 127 },
  305.         .m1 = { .min = 12, .max = 22 },
  306.         .m2 = { .min = 5, .max = 9 },
  307.         .p = { .min = 5, .max = 80 },
  308.         .p1 = { .min = 1, .max = 8 },
  309.         .p2 = { .dot_limit = 225000,
  310.                 .p2_slow = 10, .p2_fast = 5 },
  311. };
  312.  
  313. static const intel_limit_t intel_limits_ironlake_single_lvds = {
  314.         .dot = { .min = 25000, .max = 350000 },
  315.         .vco = { .min = 1760000, .max = 3510000 },
  316.         .n = { .min = 1, .max = 3 },
  317.         .m = { .min = 79, .max = 118 },
  318.         .m1 = { .min = 12, .max = 22 },
  319.         .m2 = { .min = 5, .max = 9 },
  320.         .p = { .min = 28, .max = 112 },
  321.         .p1 = { .min = 2, .max = 8 },
  322.         .p2 = { .dot_limit = 225000,
  323.                 .p2_slow = 14, .p2_fast = 14 },
  324. };
  325.  
  326. static const intel_limit_t intel_limits_ironlake_dual_lvds = {
  327.         .dot = { .min = 25000, .max = 350000 },
  328.         .vco = { .min = 1760000, .max = 3510000 },
  329.         .n = { .min = 1, .max = 3 },
  330.         .m = { .min = 79, .max = 127 },
  331.         .m1 = { .min = 12, .max = 22 },
  332.         .m2 = { .min = 5, .max = 9 },
  333.         .p = { .min = 14, .max = 56 },
  334.         .p1 = { .min = 2, .max = 8 },
  335.         .p2 = { .dot_limit = 225000,
  336.                 .p2_slow = 7, .p2_fast = 7 },
  337. };
  338.  
  339. /* LVDS 100mhz refclk limits. */
  340. static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
  341.         .dot = { .min = 25000, .max = 350000 },
  342.         .vco = { .min = 1760000, .max = 3510000 },
  343.         .n = { .min = 1, .max = 2 },
  344.         .m = { .min = 79, .max = 126 },
  345.         .m1 = { .min = 12, .max = 22 },
  346.         .m2 = { .min = 5, .max = 9 },
  347.         .p = { .min = 28, .max = 112 },
  348.         .p1 = { .min = 2, .max = 8 },
  349.         .p2 = { .dot_limit = 225000,
  350.                 .p2_slow = 14, .p2_fast = 14 },
  351. };
  352.  
  353. static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
  354.         .dot = { .min = 25000, .max = 350000 },
  355.         .vco = { .min = 1760000, .max = 3510000 },
  356.         .n = { .min = 1, .max = 3 },
  357.         .m = { .min = 79, .max = 126 },
  358.         .m1 = { .min = 12, .max = 22 },
  359.         .m2 = { .min = 5, .max = 9 },
  360.         .p = { .min = 14, .max = 42 },
  361.         .p1 = { .min = 2, .max = 6 },
  362.         .p2 = { .dot_limit = 225000,
  363.                 .p2_slow = 7, .p2_fast = 7 },
  364. };
  365.  
  366. static const intel_limit_t intel_limits_vlv = {
  367.          /*
  368.           * These are the data rate limits (measured in fast clocks)
  369.           * since those are the strictest limits we have. The fast
  370.           * clock and actual rate limits are more relaxed, so checking
  371.           * them would make no difference.
  372.           */
  373.         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
  374.         .vco = { .min = 4000000, .max = 6000000 },
  375.         .n = { .min = 1, .max = 7 },
  376.         .m1 = { .min = 2, .max = 3 },
  377.         .m2 = { .min = 11, .max = 156 },
  378.         .p1 = { .min = 2, .max = 3 },
  379.         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  380. };
  381.  
  382. static const intel_limit_t intel_limits_chv = {
  383.         /*
  384.          * These are the data rate limits (measured in fast clocks)
  385.          * since those are the strictest limits we have.  The fast
  386.          * clock and actual rate limits are more relaxed, so checking
  387.          * them would make no difference.
  388.          */
  389.         .dot = { .min = 25000 * 5, .max = 540000 * 5},
  390.         .vco = { .min = 4860000, .max = 6700000 },
  391.         .n = { .min = 1, .max = 1 },
  392.         .m1 = { .min = 2, .max = 2 },
  393.         .m2 = { .min = 24 << 22, .max = 175 << 22 },
  394.         .p1 = { .min = 2, .max = 4 },
  395.         .p2 = { .p2_slow = 1, .p2_fast = 14 },
  396. };
  397.  
  398. static void vlv_clock(int refclk, intel_clock_t *clock)
  399. {
  400.         clock->m = clock->m1 * clock->m2;
  401.         clock->p = clock->p1 * clock->p2;
  402.         if (WARN_ON(clock->n == 0 || clock->p == 0))
  403.                 return;
  404.         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  405.         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  406. }
  407.  
  408. /**
  409.  * Returns whether any output on the specified pipe is of the specified type
  410.  */
  411. bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
  412. {
  413.         struct drm_device *dev = crtc->base.dev;
  414.         struct intel_encoder *encoder;
  415.  
  416.         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  417.                 if (encoder->type == type)
  418.                         return true;
  419.  
  420.         return false;
  421. }
  422.  
  423. /**
  424.  * Returns whether any output on the specified pipe will have the specified
  425.  * type after a staged modeset is complete, i.e., the same as
  426.  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
  427.  * encoder->crtc.
  428.  */
  429. static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
  430. {
  431.         struct drm_device *dev = crtc->base.dev;
  432.         struct intel_encoder *encoder;
  433.  
  434.         for_each_intel_encoder(dev, encoder)
  435.                 if (encoder->new_crtc == crtc && encoder->type == type)
  436.                         return true;
  437.  
  438.         return false;
  439. }
  440.  
  441. static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
  442.                                                 int refclk)
  443. {
  444.         struct drm_device *dev = crtc->base.dev;
  445.         const intel_limit_t *limit;
  446.  
  447.         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
  448.                 if (intel_is_dual_link_lvds(dev)) {
  449.                         if (refclk == 100000)
  450.                                 limit = &intel_limits_ironlake_dual_lvds_100m;
  451.                         else
  452.                                 limit = &intel_limits_ironlake_dual_lvds;
  453.                 } else {
  454.                         if (refclk == 100000)
  455.                                 limit = &intel_limits_ironlake_single_lvds_100m;
  456.                         else
  457.                                 limit = &intel_limits_ironlake_single_lvds;
  458.                 }
  459.         } else
  460.                 limit = &intel_limits_ironlake_dac;
  461.  
  462.         return limit;
  463. }
  464.  
  465. static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
  466. {
  467.         struct drm_device *dev = crtc->base.dev;
  468.         const intel_limit_t *limit;
  469.  
  470.         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
  471.                 if (intel_is_dual_link_lvds(dev))
  472.                         limit = &intel_limits_g4x_dual_channel_lvds;
  473.                 else
  474.                         limit = &intel_limits_g4x_single_channel_lvds;
  475.         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
  476.                    intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
  477.                 limit = &intel_limits_g4x_hdmi;
  478.         } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
  479.                 limit = &intel_limits_g4x_sdvo;
  480.         } else /* The option is for other outputs */
  481.                 limit = &intel_limits_i9xx_sdvo;
  482.  
  483.         return limit;
  484. }
  485.  
  486. static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
  487. {
  488.         struct drm_device *dev = crtc->base.dev;
  489.         const intel_limit_t *limit;
  490.  
  491.         if (HAS_PCH_SPLIT(dev))
  492.                 limit = intel_ironlake_limit(crtc, refclk);
  493.         else if (IS_G4X(dev)) {
  494.                 limit = intel_g4x_limit(crtc);
  495.         } else if (IS_PINEVIEW(dev)) {
  496.                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
  497.                         limit = &intel_limits_pineview_lvds;
  498.                 else
  499.                         limit = &intel_limits_pineview_sdvo;
  500.         } else if (IS_CHERRYVIEW(dev)) {
  501.                 limit = &intel_limits_chv;
  502.         } else if (IS_VALLEYVIEW(dev)) {
  503.                 limit = &intel_limits_vlv;
  504.         } else if (!IS_GEN2(dev)) {
  505.                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
  506.                         limit = &intel_limits_i9xx_lvds;
  507.                 else
  508.                         limit = &intel_limits_i9xx_sdvo;
  509.         } else {
  510.                 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
  511.                         limit = &intel_limits_i8xx_lvds;
  512.                 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
  513.                         limit = &intel_limits_i8xx_dvo;
  514.                 else
  515.                         limit = &intel_limits_i8xx_dac;
  516.         }
  517.         return limit;
  518. }
  519.  
  520. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  521. static void pineview_clock(int refclk, intel_clock_t *clock)
  522. {
  523.         clock->m = clock->m2 + 2;
  524.         clock->p = clock->p1 * clock->p2;
  525.         if (WARN_ON(clock->n == 0 || clock->p == 0))
  526.                 return;
  527.         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  528.         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  529. }
  530.  
  531. static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  532. {
  533.         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  534. }
  535.  
  536. static void i9xx_clock(int refclk, intel_clock_t *clock)
  537. {
  538.         clock->m = i9xx_dpll_compute_m(clock);
  539.         clock->p = clock->p1 * clock->p2;
  540.         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  541.                 return;
  542.         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  543.         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  544. }
  545.  
  546. static void chv_clock(int refclk, intel_clock_t *clock)
  547. {
  548.         clock->m = clock->m1 * clock->m2;
  549.         clock->p = clock->p1 * clock->p2;
  550.         if (WARN_ON(clock->n == 0 || clock->p == 0))
  551.                 return;
  552.         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  553.                         clock->n << 22);
  554.         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  555. }
  556.  
  557. #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
  558. /**
  559.  * Returns whether the given set of divisors are valid for a given refclk with
  560.  * the given connectors.
  561.  */
  562.  
  563. static bool intel_PLL_is_valid(struct drm_device *dev,
  564.                                const intel_limit_t *limit,
  565.                                const intel_clock_t *clock)
  566. {
  567.         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
  568.                 INTELPllInvalid("n out of range\n");
  569.         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
  570.                 INTELPllInvalid("p1 out of range\n");
  571.         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
  572.                 INTELPllInvalid("m2 out of range\n");
  573.         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
  574.                 INTELPllInvalid("m1 out of range\n");
  575.  
  576.         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
  577.                 if (clock->m1 <= clock->m2)
  578.                 INTELPllInvalid("m1 <= m2\n");
  579.  
  580.         if (!IS_VALLEYVIEW(dev)) {
  581.                 if (clock->p < limit->p.min || limit->p.max < clock->p)
  582.                         INTELPllInvalid("p out of range\n");
  583.         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
  584.                 INTELPllInvalid("m out of range\n");
  585.         }
  586.  
  587.         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  588.                 INTELPllInvalid("vco out of range\n");
  589.         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  590.          * connector, etc., rather than just a single range.
  591.          */
  592.         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  593.                 INTELPllInvalid("dot out of range\n");
  594.  
  595.         return true;
  596. }
  597.  
  598. static bool
  599. i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
  600.                     int target, int refclk, intel_clock_t *match_clock,
  601.                     intel_clock_t *best_clock)
  602. {
  603.         struct drm_device *dev = crtc->base.dev;
  604.         intel_clock_t clock;
  605.         int err = target;
  606.  
  607.         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
  608.                 /*
  609.                  * For LVDS just rely on its current settings for dual-channel.
  610.                  * We haven't figured out how to reliably set up different
  611.                  * single/dual channel state, if we even can.
  612.                  */
  613.                 if (intel_is_dual_link_lvds(dev))
  614.                         clock.p2 = limit->p2.p2_fast;
  615.                 else
  616.                         clock.p2 = limit->p2.p2_slow;
  617.         } else {
  618.                 if (target < limit->p2.dot_limit)
  619.                         clock.p2 = limit->p2.p2_slow;
  620.                 else
  621.                         clock.p2 = limit->p2.p2_fast;
  622.         }
  623.  
  624.         memset(best_clock, 0, sizeof(*best_clock));
  625.  
  626.         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  627.              clock.m1++) {
  628.                 for (clock.m2 = limit->m2.min;
  629.                      clock.m2 <= limit->m2.max; clock.m2++) {
  630.                         if (clock.m2 >= clock.m1)
  631.                                 break;
  632.                         for (clock.n = limit->n.min;
  633.                              clock.n <= limit->n.max; clock.n++) {
  634.                                 for (clock.p1 = limit->p1.min;
  635.                                         clock.p1 <= limit->p1.max; clock.p1++) {
  636.                                         int this_err;
  637.  
  638.                                         i9xx_clock(refclk, &clock);
  639.                                         if (!intel_PLL_is_valid(dev, limit,
  640.                                                                 &clock))
  641.                                                 continue;
  642.                                         if (match_clock &&
  643.                                             clock.p != match_clock->p)
  644.                                                 continue;
  645.  
  646.                                         this_err = abs(clock.dot - target);
  647.                                         if (this_err < err) {
  648.                                                 *best_clock = clock;
  649.                                                 err = this_err;
  650.                                         }
  651.                                 }
  652.                         }
  653.                 }
  654.         }
  655.  
  656.         return (err != target);
  657. }
  658.  
  659. static bool
  660. pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
  661.                    int target, int refclk, intel_clock_t *match_clock,
  662.                    intel_clock_t *best_clock)
  663. {
  664.         struct drm_device *dev = crtc->base.dev;
  665.         intel_clock_t clock;
  666.         int err = target;
  667.  
  668.         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
  669.                 /*
  670.                  * For LVDS just rely on its current settings for dual-channel.
  671.                  * We haven't figured out how to reliably set up different
  672.                  * single/dual channel state, if we even can.
  673.                  */
  674.                 if (intel_is_dual_link_lvds(dev))
  675.                         clock.p2 = limit->p2.p2_fast;
  676.                 else
  677.                         clock.p2 = limit->p2.p2_slow;
  678.         } else {
  679.                 if (target < limit->p2.dot_limit)
  680.                         clock.p2 = limit->p2.p2_slow;
  681.                 else
  682.                         clock.p2 = limit->p2.p2_fast;
  683.         }
  684.  
  685.         memset(best_clock, 0, sizeof(*best_clock));
  686.  
  687.         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  688.              clock.m1++) {
  689.                 for (clock.m2 = limit->m2.min;
  690.                      clock.m2 <= limit->m2.max; clock.m2++) {
  691.                         for (clock.n = limit->n.min;
  692.                              clock.n <= limit->n.max; clock.n++) {
  693.                                 for (clock.p1 = limit->p1.min;
  694.                                         clock.p1 <= limit->p1.max; clock.p1++) {
  695.                                         int this_err;
  696.  
  697.                                         pineview_clock(refclk, &clock);
  698.                                         if (!intel_PLL_is_valid(dev, limit,
  699.                                                                 &clock))
  700.                                                 continue;
  701.                                         if (match_clock &&
  702.                                             clock.p != match_clock->p)
  703.                                                 continue;
  704.  
  705.                                         this_err = abs(clock.dot - target);
  706.                                         if (this_err < err) {
  707.                                                 *best_clock = clock;
  708.                                                 err = this_err;
  709.                                         }
  710.                                 }
  711.                         }
  712.                 }
  713.         }
  714.  
  715.         return (err != target);
  716. }
  717.  
  718. static bool
  719. g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
  720.                         int target, int refclk, intel_clock_t *match_clock,
  721.                         intel_clock_t *best_clock)
  722. {
  723.         struct drm_device *dev = crtc->base.dev;
  724.         intel_clock_t clock;
  725.         int max_n;
  726.         bool found;
  727.         /* approximately equals target * 0.00585 */
  728.         int err_most = (target >> 8) + (target >> 9);
  729.         found = false;
  730.  
  731.         if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
  732.                 if (intel_is_dual_link_lvds(dev))
  733.                         clock.p2 = limit->p2.p2_fast;
  734.                 else
  735.                         clock.p2 = limit->p2.p2_slow;
  736.         } else {
  737.                 if (target < limit->p2.dot_limit)
  738.                         clock.p2 = limit->p2.p2_slow;
  739.                 else
  740.                         clock.p2 = limit->p2.p2_fast;
  741.         }
  742.  
  743.         memset(best_clock, 0, sizeof(*best_clock));
  744.         max_n = limit->n.max;
  745.         /* based on hardware requirement, prefer smaller n to precision */
  746.         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  747.                 /* based on hardware requirement, prefere larger m1,m2 */
  748.                 for (clock.m1 = limit->m1.max;
  749.                      clock.m1 >= limit->m1.min; clock.m1--) {
  750.                         for (clock.m2 = limit->m2.max;
  751.                              clock.m2 >= limit->m2.min; clock.m2--) {
  752.                                 for (clock.p1 = limit->p1.max;
  753.                                      clock.p1 >= limit->p1.min; clock.p1--) {
  754.                                         int this_err;
  755.  
  756.                                         i9xx_clock(refclk, &clock);
  757.                                         if (!intel_PLL_is_valid(dev, limit,
  758.                                                                 &clock))
  759.                                                 continue;
  760.  
  761.                                         this_err = abs(clock.dot - target);
  762.                                         if (this_err < err_most) {
  763.                                                 *best_clock = clock;
  764.                                                 err_most = this_err;
  765.                                                 max_n = clock.n;
  766.                                                 found = true;
  767.                                         }
  768.                                 }
  769.                         }
  770.                 }
  771.         }
  772.         return found;
  773. }
  774.  
  775. static bool
  776. vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
  777.                         int target, int refclk, intel_clock_t *match_clock,
  778.                         intel_clock_t *best_clock)
  779. {
  780.         struct drm_device *dev = crtc->base.dev;
  781.         intel_clock_t clock;
  782.         unsigned int bestppm = 1000000;
  783.         /* min update 19.2 MHz */
  784.         int max_n = min(limit->n.max, refclk / 19200);
  785.         bool found = false;
  786.  
  787.         target *= 5; /* fast clock */
  788.  
  789.         memset(best_clock, 0, sizeof(*best_clock));
  790.  
  791.         /* based on hardware requirement, prefer smaller n to precision */
  792.         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  793.                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  794.                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  795.                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  796.                                 clock.p = clock.p1 * clock.p2;
  797.                                 /* based on hardware requirement, prefer bigger m1,m2 values */
  798.                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  799.                                         unsigned int ppm, diff;
  800.  
  801.                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  802.                                                                      refclk * clock.m1);
  803.  
  804.                                         vlv_clock(refclk, &clock);
  805.  
  806.                                         if (!intel_PLL_is_valid(dev, limit,
  807.                                                                 &clock))
  808.                                                 continue;
  809.  
  810.                                         diff = abs(clock.dot - target);
  811.                                         ppm = div_u64(1000000ULL * diff, target);
  812.  
  813.                                         if (ppm < 100 && clock.p > best_clock->p) {
  814.                                                         bestppm = 0;
  815.                                                 *best_clock = clock;
  816.                                                 found = true;
  817.                                                 }
  818.  
  819.                                         if (bestppm >= 10 && ppm < bestppm - 10) {
  820.                                                 bestppm = ppm;
  821.                                                 *best_clock = clock;
  822.                                                 found = true;
  823.                                                 }
  824.                                                 }
  825.                                         }
  826.                                 }
  827.                         }
  828.  
  829.         return found;
  830. }
  831.  
  832. static bool
  833. chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
  834.                    int target, int refclk, intel_clock_t *match_clock,
  835.                    intel_clock_t *best_clock)
  836. {
  837.         struct drm_device *dev = crtc->base.dev;
  838.         intel_clock_t clock;
  839.         uint64_t m2;
  840.         int found = false;
  841.  
  842.         memset(best_clock, 0, sizeof(*best_clock));
  843.  
  844.         /*
  845.          * Based on hardware doc, the n always set to 1, and m1 always
  846.          * set to 2.  If requires to support 200Mhz refclk, we need to
  847.          * revisit this because n may not 1 anymore.
  848.          */
  849.         clock.n = 1, clock.m1 = 2;
  850.         target *= 5;    /* fast clock */
  851.  
  852.         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  853.                 for (clock.p2 = limit->p2.p2_fast;
  854.                                 clock.p2 >= limit->p2.p2_slow;
  855.                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  856.  
  857.                         clock.p = clock.p1 * clock.p2;
  858.  
  859.                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  860.                                         clock.n) << 22, refclk * clock.m1);
  861.  
  862.                         if (m2 > INT_MAX/clock.m1)
  863.                                 continue;
  864.  
  865.                         clock.m2 = m2;
  866.  
  867.                         chv_clock(refclk, &clock);
  868.  
  869.                         if (!intel_PLL_is_valid(dev, limit, &clock))
  870.                                 continue;
  871.  
  872.                         /* based on hardware requirement, prefer bigger p
  873.                          */
  874.                         if (clock.p > best_clock->p) {
  875.                                 *best_clock = clock;
  876.                                 found = true;
  877.                         }
  878.                 }
  879.         }
  880.  
  881.         return found;
  882. }
  883.  
  884. bool intel_crtc_active(struct drm_crtc *crtc)
  885. {
  886.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  887.  
  888.         /* Be paranoid as we can arrive here with only partial
  889.          * state retrieved from the hardware during setup.
  890.          *
  891.          * We can ditch the adjusted_mode.crtc_clock check as soon
  892.          * as Haswell has gained clock readout/fastboot support.
  893.          *
  894.          * We can ditch the crtc->primary->fb check as soon as we can
  895.          * properly reconstruct framebuffers.
  896.          */
  897.         return intel_crtc->active && crtc->primary->fb &&
  898.                 intel_crtc->config.adjusted_mode.crtc_clock;
  899. }
  900.  
  901. enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  902.                                              enum pipe pipe)
  903. {
  904.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  905.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  906.  
  907.         return intel_crtc->config.cpu_transcoder;
  908. }
  909.  
  910. static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
  911. {
  912.         struct drm_i915_private *dev_priv = dev->dev_private;
  913.         u32 reg = PIPEDSL(pipe);
  914.         u32 line1, line2;
  915.         u32 line_mask;
  916.  
  917.         if (IS_GEN2(dev))
  918.                 line_mask = DSL_LINEMASK_GEN2;
  919.         else
  920.                 line_mask = DSL_LINEMASK_GEN3;
  921.  
  922.         line1 = I915_READ(reg) & line_mask;
  923.         mdelay(5);
  924.         line2 = I915_READ(reg) & line_mask;
  925.  
  926.         return line1 == line2;
  927. }
  928.  
  929. /*
  930.  * intel_wait_for_pipe_off - wait for pipe to turn off
  931.  * @crtc: crtc whose pipe to wait for
  932.  *
  933.  * After disabling a pipe, we can't wait for vblank in the usual way,
  934.  * spinning on the vblank interrupt status bit, since we won't actually
  935.  * see an interrupt when the pipe is disabled.
  936.  *
  937.  * On Gen4 and above:
  938.  *   wait for the pipe register state bit to turn off
  939.  *
  940.  * Otherwise:
  941.  *   wait for the display line value to settle (it usually
  942.  *   ends up stopping at the start of the next frame).
  943.  *
  944.  */
  945. static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
  946. {
  947.         struct drm_device *dev = crtc->base.dev;
  948.         struct drm_i915_private *dev_priv = dev->dev_private;
  949.         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
  950.         enum pipe pipe = crtc->pipe;
  951.  
  952.         if (INTEL_INFO(dev)->gen >= 4) {
  953.                 int reg = PIPECONF(cpu_transcoder);
  954.  
  955.                 /* Wait for the Pipe State to go off */
  956.                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
  957.                              100))
  958.                         WARN(1, "pipe_off wait timed out\n");
  959.         } else {
  960.                 /* Wait for the display line to settle */
  961.                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
  962.                         WARN(1, "pipe_off wait timed out\n");
  963.         }
  964. }
  965.  
  966. /*
  967.  * ibx_digital_port_connected - is the specified port connected?
  968.  * @dev_priv: i915 private structure
  969.  * @port: the port to test
  970.  *
  971.  * Returns true if @port is connected, false otherwise.
  972.  */
  973. bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
  974.                                 struct intel_digital_port *port)
  975. {
  976.         u32 bit;
  977.  
  978.         if (HAS_PCH_IBX(dev_priv->dev)) {
  979.                 switch (port->port) {
  980.                 case PORT_B:
  981.                         bit = SDE_PORTB_HOTPLUG;
  982.                         break;
  983.                 case PORT_C:
  984.                         bit = SDE_PORTC_HOTPLUG;
  985.                         break;
  986.                 case PORT_D:
  987.                         bit = SDE_PORTD_HOTPLUG;
  988.                         break;
  989.                 default:
  990.                         return true;
  991.                 }
  992.         } else {
  993.                 switch (port->port) {
  994.                 case PORT_B:
  995.                         bit = SDE_PORTB_HOTPLUG_CPT;
  996.                         break;
  997.                 case PORT_C:
  998.                         bit = SDE_PORTC_HOTPLUG_CPT;
  999.                         break;
  1000.                 case PORT_D:
  1001.                         bit = SDE_PORTD_HOTPLUG_CPT;
  1002.                         break;
  1003.                 default:
  1004.                         return true;
  1005.                 }
  1006.         }
  1007.  
  1008.         return I915_READ(SDEISR) & bit;
  1009. }
  1010.  
  1011. static const char *state_string(bool enabled)
  1012. {
  1013.         return enabled ? "on" : "off";
  1014. }
  1015.  
  1016. /* Only for pre-ILK configs */
  1017. void assert_pll(struct drm_i915_private *dev_priv,
  1018.                        enum pipe pipe, bool state)
  1019. {
  1020.         int reg;
  1021.         u32 val;
  1022.         bool cur_state;
  1023.  
  1024.         reg = DPLL(pipe);
  1025.         val = I915_READ(reg);
  1026.         cur_state = !!(val & DPLL_VCO_ENABLE);
  1027.         WARN(cur_state != state,
  1028.              "PLL state assertion failure (expected %s, current %s)\n",
  1029.              state_string(state), state_string(cur_state));
  1030. }
  1031.  
  1032. /* XXX: the dsi pll is shared between MIPI DSI ports */
  1033. static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  1034. {
  1035.         u32 val;
  1036.         bool cur_state;
  1037.  
  1038.         mutex_lock(&dev_priv->dpio_lock);
  1039.         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
  1040.         mutex_unlock(&dev_priv->dpio_lock);
  1041.  
  1042.         cur_state = val & DSI_PLL_VCO_EN;
  1043.         WARN(cur_state != state,
  1044.              "DSI PLL state assertion failure (expected %s, current %s)\n",
  1045.              state_string(state), state_string(cur_state));
  1046. }
  1047. #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
  1048. #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
  1049.  
  1050. struct intel_shared_dpll *
  1051. intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
  1052. {
  1053.         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  1054.  
  1055.         if (crtc->config.shared_dpll < 0)
  1056.                 return NULL;
  1057.  
  1058.         return &dev_priv->shared_dplls[crtc->config.shared_dpll];
  1059. }
  1060.  
  1061. /* For ILK+ */
  1062. void assert_shared_dpll(struct drm_i915_private *dev_priv,
  1063.                                struct intel_shared_dpll *pll,
  1064.                            bool state)
  1065. {
  1066.         bool cur_state;
  1067.         struct intel_dpll_hw_state hw_state;
  1068.  
  1069.         if (WARN (!pll,
  1070.                   "asserting DPLL %s with no DPLL\n", state_string(state)))
  1071.                 return;
  1072.  
  1073.         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
  1074.         WARN(cur_state != state,
  1075.              "%s assertion failure (expected %s, current %s)\n",
  1076.              pll->name, state_string(state), state_string(cur_state));
  1077. }
  1078.  
  1079. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  1080.                           enum pipe pipe, bool state)
  1081. {
  1082.         int reg;
  1083.         u32 val;
  1084.         bool cur_state;
  1085.         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1086.                                                                       pipe);
  1087.  
  1088.         if (HAS_DDI(dev_priv->dev)) {
  1089.                 /* DDI does not have a specific FDI_TX register */
  1090.                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
  1091.                 val = I915_READ(reg);
  1092.                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
  1093.         } else {
  1094.         reg = FDI_TX_CTL(pipe);
  1095.         val = I915_READ(reg);
  1096.         cur_state = !!(val & FDI_TX_ENABLE);
  1097.         }
  1098.         WARN(cur_state != state,
  1099.              "FDI TX state assertion failure (expected %s, current %s)\n",
  1100.              state_string(state), state_string(cur_state));
  1101. }
  1102. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  1103. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  1104.  
  1105. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  1106.                           enum pipe pipe, bool state)
  1107. {
  1108.         int reg;
  1109.         u32 val;
  1110.         bool cur_state;
  1111.  
  1112.         reg = FDI_RX_CTL(pipe);
  1113.         val = I915_READ(reg);
  1114.         cur_state = !!(val & FDI_RX_ENABLE);
  1115.         WARN(cur_state != state,
  1116.              "FDI RX state assertion failure (expected %s, current %s)\n",
  1117.              state_string(state), state_string(cur_state));
  1118. }
  1119. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  1120. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  1121.  
  1122. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  1123.                                       enum pipe pipe)
  1124. {
  1125.         int reg;
  1126.         u32 val;
  1127.  
  1128.         /* ILK FDI PLL is always enabled */
  1129.         if (INTEL_INFO(dev_priv->dev)->gen == 5)
  1130.                 return;
  1131.  
  1132.         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
  1133.         if (HAS_DDI(dev_priv->dev))
  1134.                 return;
  1135.  
  1136.         reg = FDI_TX_CTL(pipe);
  1137.         val = I915_READ(reg);
  1138.         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  1139. }
  1140.  
  1141. void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  1142.                        enum pipe pipe, bool state)
  1143. {
  1144.         int reg;
  1145.         u32 val;
  1146.         bool cur_state;
  1147.  
  1148.         reg = FDI_RX_CTL(pipe);
  1149.         val = I915_READ(reg);
  1150.         cur_state = !!(val & FDI_RX_PLL_ENABLE);
  1151.         WARN(cur_state != state,
  1152.              "FDI RX PLL assertion failure (expected %s, current %s)\n",
  1153.              state_string(state), state_string(cur_state));
  1154. }
  1155.  
  1156. void assert_panel_unlocked(struct drm_i915_private *dev_priv,
  1157.                                   enum pipe pipe)
  1158. {
  1159.         struct drm_device *dev = dev_priv->dev;
  1160.         int pp_reg;
  1161.         u32 val;
  1162.         enum pipe panel_pipe = PIPE_A;
  1163.         bool locked = true;
  1164.  
  1165.         if (WARN_ON(HAS_DDI(dev)))
  1166.                 return;
  1167.  
  1168.         if (HAS_PCH_SPLIT(dev)) {
  1169.                 u32 port_sel;
  1170.  
  1171.                 pp_reg = PCH_PP_CONTROL;
  1172.                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
  1173.  
  1174.                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
  1175.                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
  1176.                         panel_pipe = PIPE_B;
  1177.                 /* XXX: else fix for eDP */
  1178.         } else if (IS_VALLEYVIEW(dev)) {
  1179.                 /* presumably write lock depends on pipe, not port select */
  1180.                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
  1181.                 panel_pipe = pipe;
  1182.         } else {
  1183.                 pp_reg = PP_CONTROL;
  1184.                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
  1185.                         panel_pipe = PIPE_B;
  1186.         }
  1187.  
  1188.         val = I915_READ(pp_reg);
  1189.         if (!(val & PANEL_POWER_ON) ||
  1190.             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
  1191.                 locked = false;
  1192.  
  1193.         WARN(panel_pipe == pipe && locked,
  1194.              "panel assertion failure, pipe %c regs locked\n",
  1195.              pipe_name(pipe));
  1196. }
  1197.  
  1198. static void assert_cursor(struct drm_i915_private *dev_priv,
  1199.                           enum pipe pipe, bool state)
  1200. {
  1201.         struct drm_device *dev = dev_priv->dev;
  1202.         bool cur_state;
  1203.  
  1204.         if (IS_845G(dev) || IS_I865G(dev))
  1205.                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
  1206.         else
  1207.                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  1208.  
  1209.         WARN(cur_state != state,
  1210.              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
  1211.              pipe_name(pipe), state_string(state), state_string(cur_state));
  1212. }
  1213. #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
  1214. #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
  1215.  
  1216. void assert_pipe(struct drm_i915_private *dev_priv,
  1217.                         enum pipe pipe, bool state)
  1218. {
  1219.         int reg;
  1220.         u32 val;
  1221.         bool cur_state;
  1222.         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1223.                                                                       pipe);
  1224.  
  1225.         /* if we need the pipe quirk it must be always on */
  1226.         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1227.             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1228.                 state = true;
  1229.  
  1230.         if (!intel_display_power_is_enabled(dev_priv,
  1231.                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
  1232.                 cur_state = false;
  1233.         } else {
  1234.         reg = PIPECONF(cpu_transcoder);
  1235.         val = I915_READ(reg);
  1236.         cur_state = !!(val & PIPECONF_ENABLE);
  1237.         }
  1238.  
  1239.         WARN(cur_state != state,
  1240.              "pipe %c assertion failure (expected %s, current %s)\n",
  1241.              pipe_name(pipe), state_string(state), state_string(cur_state));
  1242. }
  1243.  
  1244. static void assert_plane(struct drm_i915_private *dev_priv,
  1245.                          enum plane plane, bool state)
  1246. {
  1247.         int reg;
  1248.         u32 val;
  1249.         bool cur_state;
  1250.  
  1251.         reg = DSPCNTR(plane);
  1252.         val = I915_READ(reg);
  1253.         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  1254.         WARN(cur_state != state,
  1255.              "plane %c assertion failure (expected %s, current %s)\n",
  1256.              plane_name(plane), state_string(state), state_string(cur_state));
  1257. }
  1258.  
  1259. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  1260. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  1261.  
  1262. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  1263.                                    enum pipe pipe)
  1264. {
  1265.         struct drm_device *dev = dev_priv->dev;
  1266.         int reg, i;
  1267.         u32 val;
  1268.         int cur_pipe;
  1269.  
  1270.         /* Primary planes are fixed to pipes on gen4+ */
  1271.         if (INTEL_INFO(dev)->gen >= 4) {
  1272.                 reg = DSPCNTR(pipe);
  1273.                 val = I915_READ(reg);
  1274.                 WARN(val & DISPLAY_PLANE_ENABLE,
  1275.                      "plane %c assertion failure, should be disabled but not\n",
  1276.                      plane_name(pipe));
  1277.                 return;
  1278.         }
  1279.  
  1280.         /* Need to check both planes against the pipe */
  1281.         for_each_pipe(dev_priv, i) {
  1282.                 reg = DSPCNTR(i);
  1283.                 val = I915_READ(reg);
  1284.                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  1285.                         DISPPLANE_SEL_PIPE_SHIFT;
  1286.                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  1287.                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
  1288.                      plane_name(i), pipe_name(pipe));
  1289.         }
  1290. }
  1291.  
  1292. static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
  1293.                                     enum pipe pipe)
  1294. {
  1295.         struct drm_device *dev = dev_priv->dev;
  1296.         int reg, sprite;
  1297.         u32 val;
  1298.  
  1299.         if (INTEL_INFO(dev)->gen >= 9) {
  1300.                 for_each_sprite(pipe, sprite) {
  1301.                         val = I915_READ(PLANE_CTL(pipe, sprite));
  1302.                         WARN(val & PLANE_CTL_ENABLE,
  1303.                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
  1304.                              sprite, pipe_name(pipe));
  1305.                 }
  1306.         } else if (IS_VALLEYVIEW(dev)) {
  1307.                 for_each_sprite(pipe, sprite) {
  1308.                         reg = SPCNTR(pipe, sprite);
  1309.                 val = I915_READ(reg);
  1310.                         WARN(val & SP_ENABLE,
  1311.                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1312.                              sprite_name(pipe, sprite), pipe_name(pipe));
  1313.                 }
  1314.         } else if (INTEL_INFO(dev)->gen >= 7) {
  1315.                 reg = SPRCTL(pipe);
  1316.                 val = I915_READ(reg);
  1317.                 WARN(val & SPRITE_ENABLE,
  1318.                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1319.                      plane_name(pipe), pipe_name(pipe));
  1320.         } else if (INTEL_INFO(dev)->gen >= 5) {
  1321.                 reg = DVSCNTR(pipe);
  1322.                 val = I915_READ(reg);
  1323.                 WARN(val & DVS_ENABLE,
  1324.                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1325.                      plane_name(pipe), pipe_name(pipe));
  1326.         }
  1327. }
  1328.  
  1329. static void assert_vblank_disabled(struct drm_crtc *crtc)
  1330. {
  1331.         if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
  1332.                 drm_crtc_vblank_put(crtc);
  1333. }
  1334.  
  1335. static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
  1336. {
  1337.         u32 val;
  1338.         bool enabled;
  1339.  
  1340.         WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
  1341.  
  1342.         val = I915_READ(PCH_DREF_CONTROL);
  1343.         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
  1344.                             DREF_SUPERSPREAD_SOURCE_MASK));
  1345.         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  1346. }
  1347.  
  1348. static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  1349.                                        enum pipe pipe)
  1350. {
  1351.         int reg;
  1352.         u32 val;
  1353.         bool enabled;
  1354.  
  1355.         reg = PCH_TRANSCONF(pipe);
  1356.         val = I915_READ(reg);
  1357.         enabled = !!(val & TRANS_ENABLE);
  1358.         WARN(enabled,
  1359.              "transcoder assertion failed, should be off on pipe %c but is still active\n",
  1360.              pipe_name(pipe));
  1361. }
  1362.  
  1363. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1364.                             enum pipe pipe, u32 port_sel, u32 val)
  1365. {
  1366.         if ((val & DP_PORT_EN) == 0)
  1367.                 return false;
  1368.  
  1369.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1370.                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
  1371.                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
  1372.                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1373.                         return false;
  1374.         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
  1375.                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
  1376.                         return false;
  1377.         } else {
  1378.                 if ((val & DP_PIPE_MASK) != (pipe << 30))
  1379.                         return false;
  1380.         }
  1381.         return true;
  1382. }
  1383.  
  1384. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1385.                               enum pipe pipe, u32 val)
  1386. {
  1387.         if ((val & SDVO_ENABLE) == 0)
  1388.                 return false;
  1389.  
  1390.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1391.                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
  1392.                         return false;
  1393.         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
  1394.                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
  1395.                         return false;
  1396.         } else {
  1397.                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
  1398.                         return false;
  1399.         }
  1400.         return true;
  1401. }
  1402.  
  1403. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1404.                               enum pipe pipe, u32 val)
  1405. {
  1406.         if ((val & LVDS_PORT_EN) == 0)
  1407.                 return false;
  1408.  
  1409.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1410.                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1411.                         return false;
  1412.         } else {
  1413.                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1414.                         return false;
  1415.         }
  1416.         return true;
  1417. }
  1418.  
  1419. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1420.                               enum pipe pipe, u32 val)
  1421. {
  1422.         if ((val & ADPA_DAC_ENABLE) == 0)
  1423.                 return false;
  1424.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1425.                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1426.                         return false;
  1427.         } else {
  1428.                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1429.                         return false;
  1430.         }
  1431.         return true;
  1432. }
  1433.  
  1434. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1435.                                    enum pipe pipe, int reg, u32 port_sel)
  1436. {
  1437.         u32 val = I915_READ(reg);
  1438.         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1439.              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1440.              reg, pipe_name(pipe));
  1441.  
  1442.         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
  1443.              && (val & DP_PIPEB_SELECT),
  1444.              "IBX PCH dp port still using transcoder B\n");
  1445. }
  1446.  
  1447. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1448.                                      enum pipe pipe, int reg)
  1449. {
  1450.         u32 val = I915_READ(reg);
  1451.         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
  1452.              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
  1453.              reg, pipe_name(pipe));
  1454.  
  1455.         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
  1456.              && (val & SDVO_PIPE_B_SELECT),
  1457.              "IBX PCH hdmi port still using transcoder B\n");
  1458. }
  1459.  
  1460. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1461.                                       enum pipe pipe)
  1462. {
  1463.         int reg;
  1464.         u32 val;
  1465.  
  1466.         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1467.         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1468.         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1469.  
  1470.         reg = PCH_ADPA;
  1471.         val = I915_READ(reg);
  1472.         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
  1473.              "PCH VGA enabled on transcoder %c, should be disabled\n",
  1474.              pipe_name(pipe));
  1475.  
  1476.         reg = PCH_LVDS;
  1477.         val = I915_READ(reg);
  1478.         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
  1479.              "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1480.              pipe_name(pipe));
  1481.  
  1482.         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
  1483.         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
  1484.         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
  1485. }
  1486.  
  1487. static void intel_init_dpio(struct drm_device *dev)
  1488. {
  1489.         struct drm_i915_private *dev_priv = dev->dev_private;
  1490.  
  1491.         if (!IS_VALLEYVIEW(dev))
  1492.                 return;
  1493.  
  1494.         /*
  1495.          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
  1496.          * CHV x1 PHY (DP/HDMI D)
  1497.          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
  1498.          */
  1499.         if (IS_CHERRYVIEW(dev)) {
  1500.                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
  1501.                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
  1502.         } else {
  1503.         DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
  1504.         }
  1505. }
  1506.  
  1507. static void vlv_enable_pll(struct intel_crtc *crtc,
  1508.                            const struct intel_crtc_config *pipe_config)
  1509. {
  1510.         struct drm_device *dev = crtc->base.dev;
  1511.         struct drm_i915_private *dev_priv = dev->dev_private;
  1512.         int reg = DPLL(crtc->pipe);
  1513.         u32 dpll = pipe_config->dpll_hw_state.dpll;
  1514.  
  1515.         assert_pipe_disabled(dev_priv, crtc->pipe);
  1516.  
  1517.     /* No really, not for ILK+ */
  1518.         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
  1519.  
  1520.     /* PLL is protected by panel, make sure we can write it */
  1521.         if (IS_MOBILE(dev_priv->dev))
  1522.                 assert_panel_unlocked(dev_priv, crtc->pipe);
  1523.  
  1524.         I915_WRITE(reg, dpll);
  1525.         POSTING_READ(reg);
  1526.         udelay(150);
  1527.  
  1528.         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
  1529.                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
  1530.  
  1531.         I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
  1532.         POSTING_READ(DPLL_MD(crtc->pipe));
  1533.  
  1534.         /* We do this three times for luck */
  1535.         I915_WRITE(reg, dpll);
  1536.         POSTING_READ(reg);
  1537.         udelay(150); /* wait for warmup */
  1538.         I915_WRITE(reg, dpll);
  1539.         POSTING_READ(reg);
  1540.         udelay(150); /* wait for warmup */
  1541.         I915_WRITE(reg, dpll);
  1542.         POSTING_READ(reg);
  1543.         udelay(150); /* wait for warmup */
  1544. }
  1545.  
  1546. static void chv_enable_pll(struct intel_crtc *crtc,
  1547.                            const struct intel_crtc_config *pipe_config)
  1548. {
  1549.         struct drm_device *dev = crtc->base.dev;
  1550.         struct drm_i915_private *dev_priv = dev->dev_private;
  1551.         int pipe = crtc->pipe;
  1552.         enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1553.         u32 tmp;
  1554.  
  1555.         assert_pipe_disabled(dev_priv, crtc->pipe);
  1556.  
  1557.         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
  1558.  
  1559.         mutex_lock(&dev_priv->dpio_lock);
  1560.  
  1561.         /* Enable back the 10bit clock to display controller */
  1562.         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1563.         tmp |= DPIO_DCLKP_EN;
  1564.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
  1565.  
  1566.         /*
  1567.          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
  1568.          */
  1569.         udelay(1);
  1570.  
  1571.         /* Enable PLL */
  1572.         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1573.  
  1574.         /* Check PLL is locked */
  1575.         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
  1576.                 DRM_ERROR("PLL %d failed to lock\n", pipe);
  1577.  
  1578.         /* not sure when this should be written */
  1579.         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1580.         POSTING_READ(DPLL_MD(pipe));
  1581.  
  1582.         mutex_unlock(&dev_priv->dpio_lock);
  1583. }
  1584.  
  1585. static int intel_num_dvo_pipes(struct drm_device *dev)
  1586. {
  1587.         struct intel_crtc *crtc;
  1588.         int count = 0;
  1589.  
  1590.         for_each_intel_crtc(dev, crtc)
  1591.                 count += crtc->active &&
  1592.                         intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
  1593.  
  1594.         return count;
  1595. }
  1596.  
  1597. static void i9xx_enable_pll(struct intel_crtc *crtc)
  1598. {
  1599.         struct drm_device *dev = crtc->base.dev;
  1600.         struct drm_i915_private *dev_priv = dev->dev_private;
  1601.         int reg = DPLL(crtc->pipe);
  1602.         u32 dpll = crtc->config.dpll_hw_state.dpll;
  1603.  
  1604.         assert_pipe_disabled(dev_priv, crtc->pipe);
  1605.  
  1606.         /* No really, not for ILK+ */
  1607.         BUG_ON(INTEL_INFO(dev)->gen >= 5);
  1608.  
  1609.         /* PLL is protected by panel, make sure we can write it */
  1610.         if (IS_MOBILE(dev) && !IS_I830(dev))
  1611.                 assert_panel_unlocked(dev_priv, crtc->pipe);
  1612.  
  1613.         /* Enable DVO 2x clock on both PLLs if necessary */
  1614.         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
  1615.                 /*
  1616.                  * It appears to be important that we don't enable this
  1617.                  * for the current pipe before otherwise configuring the
  1618.                  * PLL. No idea how this should be handled if multiple
  1619.                  * DVO outputs are enabled simultaneosly.
  1620.                  */
  1621.                 dpll |= DPLL_DVO_2X_MODE;
  1622.                 I915_WRITE(DPLL(!crtc->pipe),
  1623.                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
  1624.         }
  1625.  
  1626.         /* Wait for the clocks to stabilize. */
  1627.         POSTING_READ(reg);
  1628.         udelay(150);
  1629.  
  1630.         if (INTEL_INFO(dev)->gen >= 4) {
  1631.                 I915_WRITE(DPLL_MD(crtc->pipe),
  1632.                            crtc->config.dpll_hw_state.dpll_md);
  1633.         } else {
  1634.                 /* The pixel multiplier can only be updated once the
  1635.                  * DPLL is enabled and the clocks are stable.
  1636.                  *
  1637.                  * So write it again.
  1638.                  */
  1639.                 I915_WRITE(reg, dpll);
  1640.         }
  1641.  
  1642.     /* We do this three times for luck */
  1643.         I915_WRITE(reg, dpll);
  1644.     POSTING_READ(reg);
  1645.     udelay(150); /* wait for warmup */
  1646.         I915_WRITE(reg, dpll);
  1647.     POSTING_READ(reg);
  1648.     udelay(150); /* wait for warmup */
  1649.         I915_WRITE(reg, dpll);
  1650.     POSTING_READ(reg);
  1651.     udelay(150); /* wait for warmup */
  1652. }
  1653.  
  1654. /**
  1655.  * i9xx_disable_pll - disable a PLL
  1656.  * @dev_priv: i915 private structure
  1657.  * @pipe: pipe PLL to disable
  1658.  *
  1659.  * Disable the PLL for @pipe, making sure the pipe is off first.
  1660.  *
  1661.  * Note!  This is for pre-ILK only.
  1662.  */
  1663. static void i9xx_disable_pll(struct intel_crtc *crtc)
  1664. {
  1665.         struct drm_device *dev = crtc->base.dev;
  1666.         struct drm_i915_private *dev_priv = dev->dev_private;
  1667.         enum pipe pipe = crtc->pipe;
  1668.  
  1669.         /* Disable DVO 2x clock on both PLLs if necessary */
  1670.         if (IS_I830(dev) &&
  1671.             intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
  1672.             intel_num_dvo_pipes(dev) == 1) {
  1673.                 I915_WRITE(DPLL(PIPE_B),
  1674.                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
  1675.                 I915_WRITE(DPLL(PIPE_A),
  1676.                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
  1677.         }
  1678.  
  1679.         /* Don't disable pipe or pipe PLLs if needed */
  1680.         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1681.             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1682.                 return;
  1683.  
  1684.         /* Make sure the pipe isn't still relying on us */
  1685.         assert_pipe_disabled(dev_priv, pipe);
  1686.  
  1687.         I915_WRITE(DPLL(pipe), 0);
  1688.         POSTING_READ(DPLL(pipe));
  1689. }
  1690.  
  1691. static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1692. {
  1693.         u32 val = 0;
  1694.  
  1695.         /* Make sure the pipe isn't still relying on us */
  1696.         assert_pipe_disabled(dev_priv, pipe);
  1697.  
  1698.         /*
  1699.          * Leave integrated clock source and reference clock enabled for pipe B.
  1700.          * The latter is needed for VGA hotplug / manual detection.
  1701.          */
  1702.         if (pipe == PIPE_B)
  1703.                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
  1704.         I915_WRITE(DPLL(pipe), val);
  1705.         POSTING_READ(DPLL(pipe));
  1706.  
  1707. }
  1708.  
  1709. static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1710. {
  1711.         enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1712.         u32 val;
  1713.  
  1714.         /* Make sure the pipe isn't still relying on us */
  1715.         assert_pipe_disabled(dev_priv, pipe);
  1716.  
  1717.         /* Set PLL en = 0 */
  1718.         val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
  1719.         if (pipe != PIPE_A)
  1720.                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1721.         I915_WRITE(DPLL(pipe), val);
  1722.         POSTING_READ(DPLL(pipe));
  1723.  
  1724.         mutex_lock(&dev_priv->dpio_lock);
  1725.  
  1726.         /* Disable 10bit clock to display controller */
  1727.         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1728.         val &= ~DPIO_DCLKP_EN;
  1729.         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
  1730.  
  1731.         /* disable left/right clock distribution */
  1732.         if (pipe != PIPE_B) {
  1733.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
  1734.                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
  1735.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
  1736.         } else {
  1737.                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
  1738.                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
  1739.                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
  1740.         }
  1741.  
  1742.         mutex_unlock(&dev_priv->dpio_lock);
  1743. }
  1744.  
  1745. void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  1746.                 struct intel_digital_port *dport)
  1747. {
  1748.         u32 port_mask;
  1749.         int dpll_reg;
  1750.  
  1751.         switch (dport->port) {
  1752.         case PORT_B:
  1753.                 port_mask = DPLL_PORTB_READY_MASK;
  1754.                 dpll_reg = DPLL(0);
  1755.                 break;
  1756.         case PORT_C:
  1757.                 port_mask = DPLL_PORTC_READY_MASK;
  1758.                 dpll_reg = DPLL(0);
  1759.                 break;
  1760.         case PORT_D:
  1761.                 port_mask = DPLL_PORTD_READY_MASK;
  1762.                 dpll_reg = DPIO_PHY_STATUS;
  1763.                 break;
  1764.         default:
  1765.                 BUG();
  1766.         }
  1767.  
  1768.         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
  1769.                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
  1770.                      port_name(dport->port), I915_READ(dpll_reg));
  1771. }
  1772.  
  1773. static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
  1774. {
  1775.         struct drm_device *dev = crtc->base.dev;
  1776.         struct drm_i915_private *dev_priv = dev->dev_private;
  1777.         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
  1778.  
  1779.         if (WARN_ON(pll == NULL))
  1780.                 return;
  1781.  
  1782.         WARN_ON(!pll->config.crtc_mask);
  1783.         if (pll->active == 0) {
  1784.                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
  1785.                 WARN_ON(pll->on);
  1786.                 assert_shared_dpll_disabled(dev_priv, pll);
  1787.  
  1788.                 pll->mode_set(dev_priv, pll);
  1789.         }
  1790. }
  1791.  
  1792. /**
  1793.  * intel_enable_shared_dpll - enable PCH PLL
  1794.  * @dev_priv: i915 private structure
  1795.  * @pipe: pipe PLL to enable
  1796.  *
  1797.  * The PCH PLL needs to be enabled before the PCH transcoder, since it
  1798.  * drives the transcoder clock.
  1799.  */
  1800. static void intel_enable_shared_dpll(struct intel_crtc *crtc)
  1801. {
  1802.         struct drm_device *dev = crtc->base.dev;
  1803.         struct drm_i915_private *dev_priv = dev->dev_private;
  1804.         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
  1805.  
  1806.         if (WARN_ON(pll == NULL))
  1807.                 return;
  1808.  
  1809.         if (WARN_ON(pll->config.crtc_mask == 0))
  1810.                 return;
  1811.  
  1812.         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
  1813.                       pll->name, pll->active, pll->on,
  1814.                       crtc->base.base.id);
  1815.  
  1816.         if (pll->active++) {
  1817.                 WARN_ON(!pll->on);
  1818.                 assert_shared_dpll_enabled(dev_priv, pll);
  1819.                 return;
  1820.         }
  1821.         WARN_ON(pll->on);
  1822.  
  1823.         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
  1824.  
  1825.         DRM_DEBUG_KMS("enabling %s\n", pll->name);
  1826.         pll->enable(dev_priv, pll);
  1827.         pll->on = true;
  1828. }
  1829.  
  1830. static void intel_disable_shared_dpll(struct intel_crtc *crtc)
  1831. {
  1832.         struct drm_device *dev = crtc->base.dev;
  1833.         struct drm_i915_private *dev_priv = dev->dev_private;
  1834.         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
  1835.  
  1836.         /* PCH only available on ILK+ */
  1837.         BUG_ON(INTEL_INFO(dev)->gen < 5);
  1838.         if (WARN_ON(pll == NULL))
  1839.                return;
  1840.  
  1841.         if (WARN_ON(pll->config.crtc_mask == 0))
  1842.                 return;
  1843.  
  1844.         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
  1845.                       pll->name, pll->active, pll->on,
  1846.                       crtc->base.base.id);
  1847.  
  1848.         if (WARN_ON(pll->active == 0)) {
  1849.                 assert_shared_dpll_disabled(dev_priv, pll);
  1850.                 return;
  1851.         }
  1852.  
  1853.         assert_shared_dpll_enabled(dev_priv, pll);
  1854.         WARN_ON(!pll->on);
  1855.         if (--pll->active)
  1856.                 return;
  1857.  
  1858.         DRM_DEBUG_KMS("disabling %s\n", pll->name);
  1859.         pll->disable(dev_priv, pll);
  1860.         pll->on = false;
  1861.  
  1862.         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
  1863. }
  1864.  
  1865. static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1866.                                     enum pipe pipe)
  1867. {
  1868.         struct drm_device *dev = dev_priv->dev;
  1869.         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1870.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1871.         uint32_t reg, val, pipeconf_val;
  1872.  
  1873.         /* PCH only available on ILK+ */
  1874.         BUG_ON(!HAS_PCH_SPLIT(dev));
  1875.  
  1876.         /* Make sure PCH DPLL is enabled */
  1877.         assert_shared_dpll_enabled(dev_priv,
  1878.                                    intel_crtc_to_shared_dpll(intel_crtc));
  1879.  
  1880.         /* FDI must be feeding us bits for PCH ports */
  1881.         assert_fdi_tx_enabled(dev_priv, pipe);
  1882.         assert_fdi_rx_enabled(dev_priv, pipe);
  1883.  
  1884.         if (HAS_PCH_CPT(dev)) {
  1885.                 /* Workaround: Set the timing override bit before enabling the
  1886.                  * pch transcoder. */
  1887.                 reg = TRANS_CHICKEN2(pipe);
  1888.                 val = I915_READ(reg);
  1889.                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1890.                 I915_WRITE(reg, val);
  1891.         }
  1892.  
  1893.         reg = PCH_TRANSCONF(pipe);
  1894.         val = I915_READ(reg);
  1895.         pipeconf_val = I915_READ(PIPECONF(pipe));
  1896.  
  1897.         if (HAS_PCH_IBX(dev_priv->dev)) {
  1898.                 /*
  1899.                  * make the BPC in transcoder be consistent with
  1900.                  * that in pipeconf reg.
  1901.                  */
  1902.                 val &= ~PIPECONF_BPC_MASK;
  1903.                 val |= pipeconf_val & PIPECONF_BPC_MASK;
  1904.         }
  1905.  
  1906.         val &= ~TRANS_INTERLACE_MASK;
  1907.         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
  1908.                 if (HAS_PCH_IBX(dev_priv->dev) &&
  1909.                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
  1910.                         val |= TRANS_LEGACY_INTERLACED_ILK;
  1911.                 else
  1912.                         val |= TRANS_INTERLACED;
  1913.         else
  1914.                 val |= TRANS_PROGRESSIVE;
  1915.  
  1916.         I915_WRITE(reg, val | TRANS_ENABLE);
  1917.         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
  1918.                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  1919. }
  1920.  
  1921. static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1922.                                       enum transcoder cpu_transcoder)
  1923. {
  1924.         u32 val, pipeconf_val;
  1925.  
  1926.         /* PCH only available on ILK+ */
  1927.         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
  1928.  
  1929.         /* FDI must be feeding us bits for PCH ports */
  1930.         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
  1931.         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  1932.  
  1933.         /* Workaround: set timing override bit. */
  1934.         val = I915_READ(_TRANSA_CHICKEN2);
  1935.         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1936.         I915_WRITE(_TRANSA_CHICKEN2, val);
  1937.  
  1938.         val = TRANS_ENABLE;
  1939.         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
  1940.  
  1941.         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
  1942.             PIPECONF_INTERLACED_ILK)
  1943.                 val |= TRANS_INTERLACED;
  1944.         else
  1945.                 val |= TRANS_PROGRESSIVE;
  1946.  
  1947.         I915_WRITE(LPT_TRANSCONF, val);
  1948.         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
  1949.                 DRM_ERROR("Failed to enable PCH transcoder\n");
  1950. }
  1951.  
  1952. static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  1953.                                      enum pipe pipe)
  1954. {
  1955.         struct drm_device *dev = dev_priv->dev;
  1956.         uint32_t reg, val;
  1957.  
  1958.         /* FDI relies on the transcoder */
  1959.         assert_fdi_tx_disabled(dev_priv, pipe);
  1960.         assert_fdi_rx_disabled(dev_priv, pipe);
  1961.  
  1962.         /* Ports must be off as well */
  1963.         assert_pch_ports_disabled(dev_priv, pipe);
  1964.  
  1965.         reg = PCH_TRANSCONF(pipe);
  1966.         val = I915_READ(reg);
  1967.         val &= ~TRANS_ENABLE;
  1968.         I915_WRITE(reg, val);
  1969.         /* wait for PCH transcoder off, transcoder state */
  1970.         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
  1971.                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
  1972.  
  1973.         if (!HAS_PCH_IBX(dev)) {
  1974.                 /* Workaround: Clear the timing override chicken bit again. */
  1975.                 reg = TRANS_CHICKEN2(pipe);
  1976.                 val = I915_READ(reg);
  1977.                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1978.                 I915_WRITE(reg, val);
  1979.         }
  1980. }
  1981.  
  1982. static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  1983. {
  1984.         u32 val;
  1985.  
  1986.         val = I915_READ(LPT_TRANSCONF);
  1987.         val &= ~TRANS_ENABLE;
  1988.         I915_WRITE(LPT_TRANSCONF, val);
  1989.         /* wait for PCH transcoder off, transcoder state */
  1990.         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
  1991.                 DRM_ERROR("Failed to disable PCH transcoder\n");
  1992.  
  1993.         /* Workaround: clear timing override bit. */
  1994.         val = I915_READ(_TRANSA_CHICKEN2);
  1995.         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1996.         I915_WRITE(_TRANSA_CHICKEN2, val);
  1997. }
  1998.  
  1999. /**
  2000.  * intel_enable_pipe - enable a pipe, asserting requirements
  2001.  * @crtc: crtc responsible for the pipe
  2002.  *
  2003.  * Enable @crtc's pipe, making sure that various hardware specific requirements
  2004.  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  2005.  */
  2006. static void intel_enable_pipe(struct intel_crtc *crtc)
  2007. {
  2008.         struct drm_device *dev = crtc->base.dev;
  2009.         struct drm_i915_private *dev_priv = dev->dev_private;
  2010.         enum pipe pipe = crtc->pipe;
  2011.         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  2012.                                                                       pipe);
  2013.         enum pipe pch_transcoder;
  2014.         int reg;
  2015.         u32 val;
  2016.  
  2017.         assert_planes_disabled(dev_priv, pipe);
  2018.         assert_cursor_disabled(dev_priv, pipe);
  2019.         assert_sprites_disabled(dev_priv, pipe);
  2020.  
  2021.         if (HAS_PCH_LPT(dev_priv->dev))
  2022.                 pch_transcoder = TRANSCODER_A;
  2023.         else
  2024.                 pch_transcoder = pipe;
  2025.  
  2026.         /*
  2027.          * A pipe without a PLL won't actually be able to drive bits from
  2028.          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
  2029.          * need the check.
  2030.          */
  2031.         if (!HAS_PCH_SPLIT(dev_priv->dev))
  2032.                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
  2033.                         assert_dsi_pll_enabled(dev_priv);
  2034.                 else
  2035.                 assert_pll_enabled(dev_priv, pipe);
  2036.         else {
  2037.                 if (crtc->config.has_pch_encoder) {
  2038.                         /* if driving the PCH, we need FDI enabled */
  2039.                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
  2040.                         assert_fdi_tx_pll_enabled(dev_priv,
  2041.                                                   (enum pipe) cpu_transcoder);
  2042.                 }
  2043.                 /* FIXME: assert CPU port conditions for SNB+ */
  2044.         }
  2045.  
  2046.         reg = PIPECONF(cpu_transcoder);
  2047.         val = I915_READ(reg);
  2048.         if (val & PIPECONF_ENABLE) {
  2049.                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  2050.                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
  2051.                 return;
  2052.         }
  2053.  
  2054.         I915_WRITE(reg, val | PIPECONF_ENABLE);
  2055.         POSTING_READ(reg);
  2056. }
  2057.  
  2058. /**
  2059.  * intel_disable_pipe - disable a pipe, asserting requirements
  2060.  * @crtc: crtc whose pipes is to be disabled
  2061.  *
  2062.  * Disable the pipe of @crtc, making sure that various hardware
  2063.  * specific requirements are met, if applicable, e.g. plane
  2064.  * disabled, panel fitter off, etc.
  2065.  *
  2066.  * Will wait until the pipe has shut down before returning.
  2067.  */
  2068. static void intel_disable_pipe(struct intel_crtc *crtc)
  2069. {
  2070.         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  2071.         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
  2072.         enum pipe pipe = crtc->pipe;
  2073.         int reg;
  2074.         u32 val;
  2075.  
  2076.     /*
  2077.          * Make sure planes won't keep trying to pump pixels to us,
  2078.          * or we might hang the display.
  2079.          */
  2080.         assert_planes_disabled(dev_priv, pipe);
  2081.         assert_cursor_disabled(dev_priv, pipe);
  2082.         assert_sprites_disabled(dev_priv, pipe);
  2083.  
  2084.         reg = PIPECONF(cpu_transcoder);
  2085.         val = I915_READ(reg);
  2086.         if ((val & PIPECONF_ENABLE) == 0)
  2087.                 return;
  2088.  
  2089.         /*
  2090.          * Double wide has implications for planes
  2091.          * so best keep it disabled when not needed.
  2092.          */
  2093.         if (crtc->config.double_wide)
  2094.                 val &= ~PIPECONF_DOUBLE_WIDE;
  2095.  
  2096.         /* Don't disable pipe or pipe PLLs if needed */
  2097.         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
  2098.             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  2099.                 val &= ~PIPECONF_ENABLE;
  2100.  
  2101.         I915_WRITE(reg, val);
  2102.         if ((val & PIPECONF_ENABLE) == 0)
  2103.                 intel_wait_for_pipe_off(crtc);
  2104. }
  2105.  
  2106. /*
  2107.  * Plane regs are double buffered, going from enabled->disabled needs a
  2108.  * trigger in order to latch.  The display address reg provides this.
  2109.  */
  2110. void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
  2111.                                       enum plane plane)
  2112. {
  2113.         struct drm_device *dev = dev_priv->dev;
  2114.         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
  2115.  
  2116.         I915_WRITE(reg, I915_READ(reg));
  2117.         POSTING_READ(reg);
  2118. }
  2119.  
  2120. /**
  2121.  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
  2122.  * @plane:  plane to be enabled
  2123.  * @crtc: crtc for the plane
  2124.  *
  2125.  * Enable @plane on @crtc, making sure that the pipe is running first.
  2126.  */
  2127. static void intel_enable_primary_hw_plane(struct drm_plane *plane,
  2128.                                           struct drm_crtc *crtc)
  2129. {
  2130.         struct drm_device *dev = plane->dev;
  2131.         struct drm_i915_private *dev_priv = dev->dev_private;
  2132.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2133.  
  2134.         /* If the pipe isn't enabled, we can't pump pixels and may hang */
  2135.         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
  2136.  
  2137.         if (intel_crtc->primary_enabled)
  2138.                 return;
  2139.  
  2140.         intel_crtc->primary_enabled = true;
  2141.  
  2142.         dev_priv->display.update_primary_plane(crtc, plane->fb,
  2143.                                                crtc->x, crtc->y);
  2144.  
  2145.         /*
  2146.          * BDW signals flip done immediately if the plane
  2147.          * is disabled, even if the plane enable is already
  2148.          * armed to occur at the next vblank :(
  2149.          */
  2150.         if (IS_BROADWELL(dev))
  2151.                 intel_wait_for_vblank(dev, intel_crtc->pipe);
  2152. }
  2153.  
  2154. /**
  2155.  * intel_disable_primary_hw_plane - disable the primary hardware plane
  2156.  * @plane: plane to be disabled
  2157.  * @crtc: crtc for the plane
  2158.  *
  2159.  * Disable @plane on @crtc, making sure that the pipe is running first.
  2160.  */
  2161. static void intel_disable_primary_hw_plane(struct drm_plane *plane,
  2162.                                            struct drm_crtc *crtc)
  2163. {
  2164.         struct drm_device *dev = plane->dev;
  2165.         struct drm_i915_private *dev_priv = dev->dev_private;
  2166.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2167.  
  2168.         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
  2169.  
  2170.         if (!intel_crtc->primary_enabled)
  2171.                 return;
  2172.  
  2173.         intel_crtc->primary_enabled = false;
  2174.  
  2175.         dev_priv->display.update_primary_plane(crtc, plane->fb,
  2176.                                                crtc->x, crtc->y);
  2177. }
  2178.  
  2179. static bool need_vtd_wa(struct drm_device *dev)
  2180. {
  2181. #ifdef CONFIG_INTEL_IOMMU
  2182.         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
  2183.                 return true;
  2184. #endif
  2185.         return false;
  2186. }
  2187.  
  2188. static int intel_align_height(struct drm_device *dev, int height, bool tiled)
  2189. {
  2190.         int tile_height;
  2191.  
  2192.         tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
  2193.         return ALIGN(height, tile_height);
  2194. }
  2195.  
  2196. int
  2197. intel_pin_and_fence_fb_obj(struct drm_plane *plane,
  2198.                            struct drm_framebuffer *fb,
  2199.                            struct intel_engine_cs *pipelined)
  2200. {
  2201.         struct drm_device *dev = fb->dev;
  2202.         struct drm_i915_private *dev_priv = dev->dev_private;
  2203.         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2204.         u32 alignment;
  2205.         int ret;
  2206.  
  2207.         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  2208.  
  2209.         switch (obj->tiling_mode) {
  2210.         case I915_TILING_NONE:
  2211.                 if (INTEL_INFO(dev)->gen >= 9)
  2212.                         alignment = 256 * 1024;
  2213.                 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
  2214.                         alignment = 128 * 1024;
  2215.                 else if (INTEL_INFO(dev)->gen >= 4)
  2216.                         alignment = 4 * 1024;
  2217.                 else
  2218.                         alignment = 64 * 1024;
  2219.                 break;
  2220.         case I915_TILING_X:
  2221.                 if (INTEL_INFO(dev)->gen >= 9)
  2222.                         alignment = 256 * 1024;
  2223.                 else {
  2224.                 /* pin() will align the object as required by fence */
  2225.                 alignment = 0;
  2226.                 }
  2227.                 break;
  2228.         case I915_TILING_Y:
  2229.                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
  2230.                 return -EINVAL;
  2231.         default:
  2232.                 BUG();
  2233.         }
  2234.  
  2235.         /* Note that the w/a also requires 64 PTE of padding following the
  2236.          * bo. We currently fill all unused PTE with the shadow page and so
  2237.          * we should always have valid PTE following the scanout preventing
  2238.          * the VT-d warning.
  2239.          */
  2240.         if (need_vtd_wa(dev) && alignment < 256 * 1024)
  2241.                 alignment = 256 * 1024;
  2242.  
  2243.         /*
  2244.          * Global gtt pte registers are special registers which actually forward
  2245.          * writes to a chunk of system memory. Which means that there is no risk
  2246.          * that the register values disappear as soon as we call
  2247.          * intel_runtime_pm_put(), so it is correct to wrap only the
  2248.          * pin/unpin/fence and not more.
  2249.          */
  2250.         intel_runtime_pm_get(dev_priv);
  2251.  
  2252.         dev_priv->mm.interruptible = false;
  2253.         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
  2254.         if (ret)
  2255.                 goto err_interruptible;
  2256.  
  2257.         /* Install a fence for tiled scan-out. Pre-i965 always needs a
  2258.          * fence, whereas 965+ only requires a fence if using
  2259.          * framebuffer compression.  For simplicity, we always install
  2260.          * a fence as the cost is not that onerous.
  2261.          */
  2262.         ret = i915_gem_object_get_fence(obj);
  2263.         if (ret)
  2264.                 goto err_unpin;
  2265.  
  2266.         i915_gem_object_pin_fence(obj);
  2267.  
  2268.         dev_priv->mm.interruptible = true;
  2269.         intel_runtime_pm_put(dev_priv);
  2270.         return 0;
  2271.  
  2272. err_unpin:
  2273.         i915_gem_object_unpin_from_display_plane(obj);
  2274. err_interruptible:
  2275.         dev_priv->mm.interruptible = true;
  2276.         intel_runtime_pm_put(dev_priv);
  2277.         return ret;
  2278. }
  2279.  
  2280. void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
  2281. {
  2282.         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
  2283.  
  2284.         i915_gem_object_unpin_fence(obj);
  2285. //      i915_gem_object_unpin_from_display_plane(obj);
  2286. }
  2287.  
  2288. /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  2289.  * is assumed to be a power-of-two. */
  2290. unsigned long intel_gen4_compute_page_offset(int *x, int *y,
  2291.                                              unsigned int tiling_mode,
  2292.                                              unsigned int cpp,
  2293.                                                         unsigned int pitch)
  2294. {
  2295.         if (tiling_mode != I915_TILING_NONE) {
  2296.                 unsigned int tile_rows, tiles;
  2297.  
  2298.         tile_rows = *y / 8;
  2299.         *y %= 8;
  2300.  
  2301.                 tiles = *x / (512/cpp);
  2302.                 *x %= 512/cpp;
  2303.  
  2304.         return tile_rows * pitch * 8 + tiles * 4096;
  2305.         } else {
  2306.                 unsigned int offset;
  2307.  
  2308.                 offset = *y * pitch + *x * cpp;
  2309.                 *y = 0;
  2310.                 *x = (offset & 4095) / cpp;
  2311.                 return offset & -4096;
  2312.         }
  2313. }
  2314.  
  2315. int intel_format_to_fourcc(int format)
  2316. {
  2317.         switch (format) {
  2318.         case DISPPLANE_8BPP:
  2319.                 return DRM_FORMAT_C8;
  2320.         case DISPPLANE_BGRX555:
  2321.                 return DRM_FORMAT_XRGB1555;
  2322.         case DISPPLANE_BGRX565:
  2323.                 return DRM_FORMAT_RGB565;
  2324.         default:
  2325.         case DISPPLANE_BGRX888:
  2326.                 return DRM_FORMAT_XRGB8888;
  2327.         case DISPPLANE_RGBX888:
  2328.                 return DRM_FORMAT_XBGR8888;
  2329.         case DISPPLANE_BGRX101010:
  2330.                 return DRM_FORMAT_XRGB2101010;
  2331.         case DISPPLANE_RGBX101010:
  2332.                 return DRM_FORMAT_XBGR2101010;
  2333.         }
  2334. }
  2335.  
  2336. static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
  2337.                                   struct intel_plane_config *plane_config)
  2338. {
  2339.         struct drm_device *dev = crtc->base.dev;
  2340.         struct drm_i915_gem_object *obj = NULL;
  2341.         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  2342.         u32 base = plane_config->base;
  2343.  
  2344.         if (plane_config->size == 0)
  2345.                 return false;
  2346.  
  2347.         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
  2348.                                                              plane_config->size);
  2349.         if (!obj)
  2350.                 return false;
  2351.  
  2352.     obj->map_and_fenceable=true;
  2353.     main_fb_obj = obj;
  2354.  
  2355.         if (plane_config->tiled) {
  2356.                 obj->tiling_mode = I915_TILING_X;
  2357.                 obj->stride = crtc->base.primary->fb->pitches[0];
  2358.         }
  2359.  
  2360.         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
  2361.         mode_cmd.width = crtc->base.primary->fb->width;
  2362.         mode_cmd.height = crtc->base.primary->fb->height;
  2363.         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
  2364.  
  2365.         mutex_lock(&dev->struct_mutex);
  2366.  
  2367.         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
  2368.                                    &mode_cmd, obj)) {
  2369.                 DRM_DEBUG_KMS("intel fb init failed\n");
  2370.                 goto out_unref_obj;
  2371.         }
  2372.  
  2373.         obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
  2374.         mutex_unlock(&dev->struct_mutex);
  2375.  
  2376.         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
  2377.         return true;
  2378.  
  2379. out_unref_obj:
  2380.         drm_gem_object_unreference(&obj->base);
  2381.         mutex_unlock(&dev->struct_mutex);
  2382.         return false;
  2383. }
  2384.  
  2385. static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
  2386.                                  struct intel_plane_config *plane_config)
  2387. {
  2388.         struct drm_device *dev = intel_crtc->base.dev;
  2389.         struct drm_i915_private *dev_priv = dev->dev_private;
  2390.         struct drm_crtc *c;
  2391.         struct intel_crtc *i;
  2392.         struct drm_i915_gem_object *obj;
  2393.  
  2394.         if (!intel_crtc->base.primary->fb)
  2395.                 return;
  2396.  
  2397.         if (intel_alloc_plane_obj(intel_crtc, plane_config))
  2398.                 return;
  2399.  
  2400.         kfree(intel_crtc->base.primary->fb);
  2401.         intel_crtc->base.primary->fb = NULL;
  2402.  
  2403.         /*
  2404.          * Failed to alloc the obj, check to see if we should share
  2405.          * an fb with another CRTC instead
  2406.          */
  2407.         for_each_crtc(dev, c) {
  2408.                 i = to_intel_crtc(c);
  2409.  
  2410.                 if (c == &intel_crtc->base)
  2411.                         continue;
  2412.  
  2413.                 if (!i->active)
  2414.                         continue;
  2415.  
  2416.                 obj = intel_fb_obj(c->primary->fb);
  2417.                 if (obj == NULL)
  2418.                         continue;
  2419.  
  2420.                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
  2421.                         if (obj->tiling_mode != I915_TILING_NONE)
  2422.                                 dev_priv->preserve_bios_swizzle = true;
  2423.  
  2424.                         drm_framebuffer_reference(c->primary->fb);
  2425.                         intel_crtc->base.primary->fb = c->primary->fb;
  2426.                         obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
  2427.                         break;
  2428.                 }
  2429.         }
  2430. }
  2431.  
  2432. static void i9xx_update_primary_plane(struct drm_crtc *crtc,
  2433.                                      struct drm_framebuffer *fb,
  2434.                                      int x, int y)
  2435. {
  2436.     struct drm_device *dev = crtc->dev;
  2437.     struct drm_i915_private *dev_priv = dev->dev_private;
  2438.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2439.         struct drm_i915_gem_object *obj;
  2440.     int plane = intel_crtc->plane;
  2441.         unsigned long linear_offset;
  2442.     u32 dspcntr;
  2443.         u32 reg = DSPCNTR(plane);
  2444.         int pixel_size;
  2445.  
  2446.         if (!intel_crtc->primary_enabled) {
  2447.                 I915_WRITE(reg, 0);
  2448.                 if (INTEL_INFO(dev)->gen >= 4)
  2449.                         I915_WRITE(DSPSURF(plane), 0);
  2450.                 else
  2451.                         I915_WRITE(DSPADDR(plane), 0);
  2452.                 POSTING_READ(reg);
  2453.                 return;
  2454.         }
  2455.  
  2456.         obj = intel_fb_obj(fb);
  2457.         if (WARN_ON(obj == NULL))
  2458.                 return;
  2459.  
  2460.         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
  2461.  
  2462.         dspcntr = DISPPLANE_GAMMA_ENABLE;
  2463.  
  2464.         dspcntr |= DISPLAY_PLANE_ENABLE;
  2465.  
  2466.         if (INTEL_INFO(dev)->gen < 4) {
  2467.                 if (intel_crtc->pipe == PIPE_B)
  2468.                         dspcntr |= DISPPLANE_SEL_PIPE_B;
  2469.  
  2470.                 /* pipesrc and dspsize control the size that is scaled from,
  2471.                  * which should always be the user's requested size.
  2472.                  */
  2473.                 I915_WRITE(DSPSIZE(plane),
  2474.                            ((intel_crtc->config.pipe_src_h - 1) << 16) |
  2475.                            (intel_crtc->config.pipe_src_w - 1));
  2476.                 I915_WRITE(DSPPOS(plane), 0);
  2477.         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
  2478.                 I915_WRITE(PRIMSIZE(plane),
  2479.                            ((intel_crtc->config.pipe_src_h - 1) << 16) |
  2480.                            (intel_crtc->config.pipe_src_w - 1));
  2481.                 I915_WRITE(PRIMPOS(plane), 0);
  2482.                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
  2483.         }
  2484.  
  2485.         switch (fb->pixel_format) {
  2486.         case DRM_FORMAT_C8:
  2487.         dspcntr |= DISPPLANE_8BPP;
  2488.         break;
  2489.         case DRM_FORMAT_XRGB1555:
  2490.         case DRM_FORMAT_ARGB1555:
  2491.                 dspcntr |= DISPPLANE_BGRX555;
  2492.                 break;
  2493.         case DRM_FORMAT_RGB565:
  2494.                 dspcntr |= DISPPLANE_BGRX565;
  2495.                 break;
  2496.         case DRM_FORMAT_XRGB8888:
  2497.         case DRM_FORMAT_ARGB8888:
  2498.                 dspcntr |= DISPPLANE_BGRX888;
  2499.                 break;
  2500.         case DRM_FORMAT_XBGR8888:
  2501.         case DRM_FORMAT_ABGR8888:
  2502.                 dspcntr |= DISPPLANE_RGBX888;
  2503.                 break;
  2504.         case DRM_FORMAT_XRGB2101010:
  2505.         case DRM_FORMAT_ARGB2101010:
  2506.                 dspcntr |= DISPPLANE_BGRX101010;
  2507.         break;
  2508.         case DRM_FORMAT_XBGR2101010:
  2509.         case DRM_FORMAT_ABGR2101010:
  2510.                 dspcntr |= DISPPLANE_RGBX101010;
  2511.         break;
  2512.     default:
  2513.                 BUG();
  2514.     }
  2515.  
  2516.         if (INTEL_INFO(dev)->gen >= 4 &&
  2517.             obj->tiling_mode != I915_TILING_NONE)
  2518.             dspcntr |= DISPPLANE_TILED;
  2519.  
  2520.         if (IS_G4X(dev))
  2521.                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2522.  
  2523.         linear_offset = y * fb->pitches[0] + x * pixel_size;
  2524.  
  2525.         if (INTEL_INFO(dev)->gen >= 4) {
  2526.                 intel_crtc->dspaddr_offset =
  2527.                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
  2528.                                                        pixel_size,
  2529.                                                            fb->pitches[0]);
  2530.                 linear_offset -= intel_crtc->dspaddr_offset;
  2531.         } else {
  2532.                 intel_crtc->dspaddr_offset = linear_offset;
  2533.         }
  2534.  
  2535.         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
  2536.                 dspcntr |= DISPPLANE_ROTATE_180;
  2537.  
  2538.                 x += (intel_crtc->config.pipe_src_w - 1);
  2539.                 y += (intel_crtc->config.pipe_src_h - 1);
  2540.  
  2541.                 /* Finding the last pixel of the last line of the display
  2542.                 data and adding to linear_offset*/
  2543.                 linear_offset +=
  2544.                         (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
  2545.                         (intel_crtc->config.pipe_src_w - 1) * pixel_size;
  2546.         }
  2547.  
  2548.         I915_WRITE(reg, dspcntr);
  2549.  
  2550.         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
  2551.                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
  2552.                       fb->pitches[0]);
  2553.         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2554.     if (INTEL_INFO(dev)->gen >= 4) {
  2555.                 I915_WRITE(DSPSURF(plane),
  2556.                                      i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
  2557.         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2558.                 I915_WRITE(DSPLINOFF(plane), linear_offset);
  2559.     } else
  2560.                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
  2561.     POSTING_READ(reg);
  2562. }
  2563.  
  2564. static void ironlake_update_primary_plane(struct drm_crtc *crtc,
  2565.                                          struct drm_framebuffer *fb,
  2566.                                          int x, int y)
  2567. {
  2568.     struct drm_device *dev = crtc->dev;
  2569.     struct drm_i915_private *dev_priv = dev->dev_private;
  2570.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2571.         struct drm_i915_gem_object *obj;
  2572.     int plane = intel_crtc->plane;
  2573.         unsigned long linear_offset;
  2574.     u32 dspcntr;
  2575.         u32 reg = DSPCNTR(plane);
  2576.         int pixel_size;
  2577.  
  2578.         if (!intel_crtc->primary_enabled) {
  2579.                 I915_WRITE(reg, 0);
  2580.                 I915_WRITE(DSPSURF(plane), 0);
  2581.                 POSTING_READ(reg);
  2582.                 return;
  2583.         }
  2584.  
  2585.         obj = intel_fb_obj(fb);
  2586.         if (WARN_ON(obj == NULL))
  2587.                 return;
  2588.  
  2589.         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
  2590.  
  2591.         dspcntr = DISPPLANE_GAMMA_ENABLE;
  2592.  
  2593.         dspcntr |= DISPLAY_PLANE_ENABLE;
  2594.  
  2595.         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2596.                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  2597.  
  2598.         switch (fb->pixel_format) {
  2599.         case DRM_FORMAT_C8:
  2600.         dspcntr |= DISPPLANE_8BPP;
  2601.         break;
  2602.         case DRM_FORMAT_RGB565:
  2603.                 dspcntr |= DISPPLANE_BGRX565;
  2604.   &