Subversion Repositories Kolibri OS

Rev

Rev 2330 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2006-2007 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *  Eric Anholt <eric@anholt.net>
  25.  */
  26.  
  27. //#include <linux/cpufreq.h>
  28. #include <linux/module.h>
  29. //#include <linux/input.h>
  30. #include <linux/i2c.h>
  31. #include <linux/kernel.h>
  32. //#include <linux/slab.h>
  33. //#include <linux/vgaarb.h>
  34. #include "drmP.h"
  35. #include "intel_drv.h"
  36. #include "i915_drv.h"
  37. //#include "i915_trace.h"
  38. #include "drm_dp_helper.h"
  39.  
  40. #include "drm_crtc_helper.h"
  41.  
  42. #include <syscall.h>
  43.  
  44. phys_addr_t get_bus_addr(void);
  45.  
  46. static inline __attribute__((const))
  47. bool is_power_of_2(unsigned long n)
  48. {
  49.     return (n != 0 && ((n & (n - 1)) == 0));
  50. }
  51.  
  52. static inline int pci_read_config_word(struct pci_dev *dev, int where,
  53.                     u16 *val)
  54. {
  55.     *val = PciRead16(dev->busnr, dev->devfn, where);
  56.     return 1;
  57. }
  58.  
  59.  
  60. #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  61.  
  62. bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
  63. static void intel_update_watermarks(struct drm_device *dev);
  64. static void intel_increase_pllclock(struct drm_crtc *crtc);
  65. static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  66.  
  67. typedef struct {
  68.     /* given values */
  69.     int n;
  70.     int m1, m2;
  71.     int p1, p2;
  72.     /* derived values */
  73.     int dot;
  74.     int vco;
  75.     int m;
  76.     int p;
  77. } intel_clock_t;
  78.  
  79. typedef struct {
  80.     int min, max;
  81. } intel_range_t;
  82.  
  83. typedef struct {
  84.     int dot_limit;
  85.     int p2_slow, p2_fast;
  86. } intel_p2_t;
  87.  
  88. #define INTEL_P2_NUM              2
  89. typedef struct intel_limit intel_limit_t;
  90. struct intel_limit {
  91.     intel_range_t   dot, vco, n, m, m1, m2, p, p1;
  92.     intel_p2_t      p2;
  93.     bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
  94.               int, int, intel_clock_t *);
  95. };
  96.  
  97. /* FDI */
  98. #define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
  99.  
  100. static bool
  101. intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  102.             int target, int refclk, intel_clock_t *best_clock);
  103. static bool
  104. intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  105.             int target, int refclk, intel_clock_t *best_clock);
  106.  
  107. static bool
  108. intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
  109.               int target, int refclk, intel_clock_t *best_clock);
  110. static bool
  111. intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
  112.                int target, int refclk, intel_clock_t *best_clock);
  113.  
  114. static inline u32 /* units of 100MHz */
  115. intel_fdi_link_freq(struct drm_device *dev)
  116. {
  117.         if (IS_GEN5(dev)) {
  118.                 struct drm_i915_private *dev_priv = dev->dev_private;
  119.                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
  120.         } else
  121.                 return 27;
  122. }
  123.  
  124. static const intel_limit_t intel_limits_i8xx_dvo = {
  125.         .dot = { .min = 25000, .max = 350000 },
  126.         .vco = { .min = 930000, .max = 1400000 },
  127.         .n = { .min = 3, .max = 16 },
  128.         .m = { .min = 96, .max = 140 },
  129.         .m1 = { .min = 18, .max = 26 },
  130.         .m2 = { .min = 6, .max = 16 },
  131.         .p = { .min = 4, .max = 128 },
  132.         .p1 = { .min = 2, .max = 33 },
  133.         .p2 = { .dot_limit = 165000,
  134.                 .p2_slow = 4, .p2_fast = 2 },
  135.         .find_pll = intel_find_best_PLL,
  136. };
  137.  
  138. static const intel_limit_t intel_limits_i8xx_lvds = {
  139.         .dot = { .min = 25000, .max = 350000 },
  140.         .vco = { .min = 930000, .max = 1400000 },
  141.         .n = { .min = 3, .max = 16 },
  142.         .m = { .min = 96, .max = 140 },
  143.         .m1 = { .min = 18, .max = 26 },
  144.         .m2 = { .min = 6, .max = 16 },
  145.         .p = { .min = 4, .max = 128 },
  146.         .p1 = { .min = 1, .max = 6 },
  147.         .p2 = { .dot_limit = 165000,
  148.                 .p2_slow = 14, .p2_fast = 7 },
  149.         .find_pll = intel_find_best_PLL,
  150. };
  151.  
  152. static const intel_limit_t intel_limits_i9xx_sdvo = {
  153.         .dot = { .min = 20000, .max = 400000 },
  154.         .vco = { .min = 1400000, .max = 2800000 },
  155.         .n = { .min = 1, .max = 6 },
  156.         .m = { .min = 70, .max = 120 },
  157.         .m1 = { .min = 10, .max = 22 },
  158.         .m2 = { .min = 5, .max = 9 },
  159.         .p = { .min = 5, .max = 80 },
  160.         .p1 = { .min = 1, .max = 8 },
  161.         .p2 = { .dot_limit = 200000,
  162.                 .p2_slow = 10, .p2_fast = 5 },
  163.         .find_pll = intel_find_best_PLL,
  164. };
  165.  
  166. static const intel_limit_t intel_limits_i9xx_lvds = {
  167.         .dot = { .min = 20000, .max = 400000 },
  168.         .vco = { .min = 1400000, .max = 2800000 },
  169.         .n = { .min = 1, .max = 6 },
  170.         .m = { .min = 70, .max = 120 },
  171.         .m1 = { .min = 10, .max = 22 },
  172.         .m2 = { .min = 5, .max = 9 },
  173.         .p = { .min = 7, .max = 98 },
  174.         .p1 = { .min = 1, .max = 8 },
  175.         .p2 = { .dot_limit = 112000,
  176.                 .p2_slow = 14, .p2_fast = 7 },
  177.         .find_pll = intel_find_best_PLL,
  178. };
  179.  
  180.  
  181. static const intel_limit_t intel_limits_g4x_sdvo = {
  182.         .dot = { .min = 25000, .max = 270000 },
  183.         .vco = { .min = 1750000, .max = 3500000},
  184.         .n = { .min = 1, .max = 4 },
  185.         .m = { .min = 104, .max = 138 },
  186.         .m1 = { .min = 17, .max = 23 },
  187.         .m2 = { .min = 5, .max = 11 },
  188.         .p = { .min = 10, .max = 30 },
  189.         .p1 = { .min = 1, .max = 3},
  190.         .p2 = { .dot_limit = 270000,
  191.                 .p2_slow = 10,
  192.                 .p2_fast = 10
  193.         },
  194.         .find_pll = intel_g4x_find_best_PLL,
  195. };
  196.  
  197. static const intel_limit_t intel_limits_g4x_hdmi = {
  198.         .dot = { .min = 22000, .max = 400000 },
  199.         .vco = { .min = 1750000, .max = 3500000},
  200.         .n = { .min = 1, .max = 4 },
  201.         .m = { .min = 104, .max = 138 },
  202.         .m1 = { .min = 16, .max = 23 },
  203.         .m2 = { .min = 5, .max = 11 },
  204.         .p = { .min = 5, .max = 80 },
  205.         .p1 = { .min = 1, .max = 8},
  206.         .p2 = { .dot_limit = 165000,
  207.                 .p2_slow = 10, .p2_fast = 5 },
  208.         .find_pll = intel_g4x_find_best_PLL,
  209. };
  210.  
  211. static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
  212.         .dot = { .min = 20000, .max = 115000 },
  213.         .vco = { .min = 1750000, .max = 3500000 },
  214.         .n = { .min = 1, .max = 3 },
  215.         .m = { .min = 104, .max = 138 },
  216.         .m1 = { .min = 17, .max = 23 },
  217.         .m2 = { .min = 5, .max = 11 },
  218.         .p = { .min = 28, .max = 112 },
  219.         .p1 = { .min = 2, .max = 8 },
  220.         .p2 = { .dot_limit = 0,
  221.                 .p2_slow = 14, .p2_fast = 14
  222.         },
  223.         .find_pll = intel_g4x_find_best_PLL,
  224. };
  225.  
  226. static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
  227.         .dot = { .min = 80000, .max = 224000 },
  228.         .vco = { .min = 1750000, .max = 3500000 },
  229.         .n = { .min = 1, .max = 3 },
  230.         .m = { .min = 104, .max = 138 },
  231.         .m1 = { .min = 17, .max = 23 },
  232.         .m2 = { .min = 5, .max = 11 },
  233.         .p = { .min = 14, .max = 42 },
  234.         .p1 = { .min = 2, .max = 6 },
  235.         .p2 = { .dot_limit = 0,
  236.                 .p2_slow = 7, .p2_fast = 7
  237.         },
  238.         .find_pll = intel_g4x_find_best_PLL,
  239. };
  240.  
  241. static const intel_limit_t intel_limits_g4x_display_port = {
  242.         .dot = { .min = 161670, .max = 227000 },
  243.         .vco = { .min = 1750000, .max = 3500000},
  244.         .n = { .min = 1, .max = 2 },
  245.         .m = { .min = 97, .max = 108 },
  246.         .m1 = { .min = 0x10, .max = 0x12 },
  247.         .m2 = { .min = 0x05, .max = 0x06 },
  248.         .p = { .min = 10, .max = 20 },
  249.         .p1 = { .min = 1, .max = 2},
  250.         .p2 = { .dot_limit = 0,
  251.                 .p2_slow = 10, .p2_fast = 10 },
  252.         .find_pll = intel_find_pll_g4x_dp,
  253. };
  254.  
  255. static const intel_limit_t intel_limits_pineview_sdvo = {
  256.         .dot = { .min = 20000, .max = 400000},
  257.         .vco = { .min = 1700000, .max = 3500000 },
  258.         /* Pineview's Ncounter is a ring counter */
  259.         .n = { .min = 3, .max = 6 },
  260.         .m = { .min = 2, .max = 256 },
  261.         /* Pineview only has one combined m divider, which we treat as m2. */
  262.         .m1 = { .min = 0, .max = 0 },
  263.         .m2 = { .min = 0, .max = 254 },
  264.         .p = { .min = 5, .max = 80 },
  265.         .p1 = { .min = 1, .max = 8 },
  266.         .p2 = { .dot_limit = 200000,
  267.                 .p2_slow = 10, .p2_fast = 5 },
  268.         .find_pll = intel_find_best_PLL,
  269. };
  270.  
  271. static const intel_limit_t intel_limits_pineview_lvds = {
  272.         .dot = { .min = 20000, .max = 400000 },
  273.         .vco = { .min = 1700000, .max = 3500000 },
  274.         .n = { .min = 3, .max = 6 },
  275.         .m = { .min = 2, .max = 256 },
  276.         .m1 = { .min = 0, .max = 0 },
  277.         .m2 = { .min = 0, .max = 254 },
  278.         .p = { .min = 7, .max = 112 },
  279.         .p1 = { .min = 1, .max = 8 },
  280.         .p2 = { .dot_limit = 112000,
  281.                 .p2_slow = 14, .p2_fast = 14 },
  282.         .find_pll = intel_find_best_PLL,
  283. };
  284.  
  285. /* Ironlake / Sandybridge
  286.  *
  287.  * We calculate clock using (register_value + 2) for N/M1/M2, so here
  288.  * the range value for them is (actual_value - 2).
  289.  */
  290. static const intel_limit_t intel_limits_ironlake_dac = {
  291.         .dot = { .min = 25000, .max = 350000 },
  292.         .vco = { .min = 1760000, .max = 3510000 },
  293.         .n = { .min = 1, .max = 5 },
  294.         .m = { .min = 79, .max = 127 },
  295.         .m1 = { .min = 12, .max = 22 },
  296.         .m2 = { .min = 5, .max = 9 },
  297.         .p = { .min = 5, .max = 80 },
  298.         .p1 = { .min = 1, .max = 8 },
  299.         .p2 = { .dot_limit = 225000,
  300.                 .p2_slow = 10, .p2_fast = 5 },
  301.         .find_pll = intel_g4x_find_best_PLL,
  302. };
  303.  
  304. static const intel_limit_t intel_limits_ironlake_single_lvds = {
  305.         .dot = { .min = 25000, .max = 350000 },
  306.         .vco = { .min = 1760000, .max = 3510000 },
  307.         .n = { .min = 1, .max = 3 },
  308.         .m = { .min = 79, .max = 118 },
  309.         .m1 = { .min = 12, .max = 22 },
  310.         .m2 = { .min = 5, .max = 9 },
  311.         .p = { .min = 28, .max = 112 },
  312.         .p1 = { .min = 2, .max = 8 },
  313.         .p2 = { .dot_limit = 225000,
  314.                 .p2_slow = 14, .p2_fast = 14 },
  315.         .find_pll = intel_g4x_find_best_PLL,
  316. };
  317.  
  318. static const intel_limit_t intel_limits_ironlake_dual_lvds = {
  319.         .dot = { .min = 25000, .max = 350000 },
  320.         .vco = { .min = 1760000, .max = 3510000 },
  321.         .n = { .min = 1, .max = 3 },
  322.         .m = { .min = 79, .max = 127 },
  323.         .m1 = { .min = 12, .max = 22 },
  324.         .m2 = { .min = 5, .max = 9 },
  325.         .p = { .min = 14, .max = 56 },
  326.         .p1 = { .min = 2, .max = 8 },
  327.         .p2 = { .dot_limit = 225000,
  328.                 .p2_slow = 7, .p2_fast = 7 },
  329.         .find_pll = intel_g4x_find_best_PLL,
  330. };
  331.  
  332. /* LVDS 100mhz refclk limits. */
  333. static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
  334.         .dot = { .min = 25000, .max = 350000 },
  335.         .vco = { .min = 1760000, .max = 3510000 },
  336.         .n = { .min = 1, .max = 2 },
  337.         .m = { .min = 79, .max = 126 },
  338.         .m1 = { .min = 12, .max = 22 },
  339.         .m2 = { .min = 5, .max = 9 },
  340.         .p = { .min = 28, .max = 112 },
  341.         .p1 = { .min = 2,.max = 8 },
  342.         .p2 = { .dot_limit = 225000,
  343.                 .p2_slow = 14, .p2_fast = 14 },
  344.         .find_pll = intel_g4x_find_best_PLL,
  345. };
  346.  
  347. static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
  348.         .dot = { .min = 25000, .max = 350000 },
  349.         .vco = { .min = 1760000, .max = 3510000 },
  350.         .n = { .min = 1, .max = 3 },
  351.         .m = { .min = 79, .max = 126 },
  352.         .m1 = { .min = 12, .max = 22 },
  353.         .m2 = { .min = 5, .max = 9 },
  354.         .p = { .min = 14, .max = 42 },
  355.         .p1 = { .min = 2,.max = 6 },
  356.         .p2 = { .dot_limit = 225000,
  357.                 .p2_slow = 7, .p2_fast = 7 },
  358.         .find_pll = intel_g4x_find_best_PLL,
  359. };
  360.  
  361. static const intel_limit_t intel_limits_ironlake_display_port = {
  362.         .dot = { .min = 25000, .max = 350000 },
  363.         .vco = { .min = 1760000, .max = 3510000},
  364.         .n = { .min = 1, .max = 2 },
  365.         .m = { .min = 81, .max = 90 },
  366.         .m1 = { .min = 12, .max = 22 },
  367.         .m2 = { .min = 5, .max = 9 },
  368.         .p = { .min = 10, .max = 20 },
  369.         .p1 = { .min = 1, .max = 2},
  370.         .p2 = { .dot_limit = 0,
  371.                 .p2_slow = 10, .p2_fast = 10 },
  372.         .find_pll = intel_find_pll_ironlake_dp,
  373. };
  374.  
  375. static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
  376.                                                 int refclk)
  377. {
  378.         struct drm_device *dev = crtc->dev;
  379.         struct drm_i915_private *dev_priv = dev->dev_private;
  380.         const intel_limit_t *limit;
  381.  
  382.         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  383.                 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
  384.                     LVDS_CLKB_POWER_UP) {
  385.                         /* LVDS dual channel */
  386.                         if (refclk == 100000)
  387.                                 limit = &intel_limits_ironlake_dual_lvds_100m;
  388.                         else
  389.                                 limit = &intel_limits_ironlake_dual_lvds;
  390.                 } else {
  391.                         if (refclk == 100000)
  392.                                 limit = &intel_limits_ironlake_single_lvds_100m;
  393.                         else
  394.                                 limit = &intel_limits_ironlake_single_lvds;
  395.                 }
  396.         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
  397.                         HAS_eDP)
  398.                 limit = &intel_limits_ironlake_display_port;
  399.         else
  400.                 limit = &intel_limits_ironlake_dac;
  401.  
  402.         return limit;
  403. }
  404.  
  405. static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
  406. {
  407.         struct drm_device *dev = crtc->dev;
  408.         struct drm_i915_private *dev_priv = dev->dev_private;
  409.         const intel_limit_t *limit;
  410.  
  411.         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  412.                 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  413.                     LVDS_CLKB_POWER_UP)
  414.                         /* LVDS with dual channel */
  415.                         limit = &intel_limits_g4x_dual_channel_lvds;
  416.                 else
  417.                         /* LVDS with dual channel */
  418.                         limit = &intel_limits_g4x_single_channel_lvds;
  419.         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
  420.                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
  421.                 limit = &intel_limits_g4x_hdmi;
  422.         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
  423.                 limit = &intel_limits_g4x_sdvo;
  424.         } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
  425.                 limit = &intel_limits_g4x_display_port;
  426.         } else /* The option is for other outputs */
  427.                 limit = &intel_limits_i9xx_sdvo;
  428.  
  429.         return limit;
  430. }
  431.  
  432. static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
  433. {
  434.         struct drm_device *dev = crtc->dev;
  435.         const intel_limit_t *limit;
  436.  
  437.         if (HAS_PCH_SPLIT(dev))
  438.                 limit = intel_ironlake_limit(crtc, refclk);
  439.         else if (IS_G4X(dev)) {
  440.                 limit = intel_g4x_limit(crtc);
  441.         } else if (IS_PINEVIEW(dev)) {
  442.                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
  443.                         limit = &intel_limits_pineview_lvds;
  444.                 else
  445.                         limit = &intel_limits_pineview_sdvo;
  446.         } else if (!IS_GEN2(dev)) {
  447.                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
  448.                         limit = &intel_limits_i9xx_lvds;
  449.                 else
  450.                         limit = &intel_limits_i9xx_sdvo;
  451.         } else {
  452.                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
  453.                         limit = &intel_limits_i8xx_lvds;
  454.                 else
  455.                         limit = &intel_limits_i8xx_dvo;
  456.         }
  457.         return limit;
  458. }
  459.  
  460. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  461. static void pineview_clock(int refclk, intel_clock_t *clock)
  462. {
  463.         clock->m = clock->m2 + 2;
  464.         clock->p = clock->p1 * clock->p2;
  465.         clock->vco = refclk * clock->m / clock->n;
  466.         clock->dot = clock->vco / clock->p;
  467. }
  468.  
  469. static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
  470. {
  471.         if (IS_PINEVIEW(dev)) {
  472.                 pineview_clock(refclk, clock);
  473.                 return;
  474.         }
  475.         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
  476.         clock->p = clock->p1 * clock->p2;
  477.         clock->vco = refclk * clock->m / (clock->n + 2);
  478.         clock->dot = clock->vco / clock->p;
  479. }
  480.  
  481. /**
  482.  * Returns whether any output on the specified pipe is of the specified type
  483.  */
  484. bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
  485. {
  486.         struct drm_device *dev = crtc->dev;
  487.         struct drm_mode_config *mode_config = &dev->mode_config;
  488.         struct intel_encoder *encoder;
  489.  
  490.         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  491.                 if (encoder->base.crtc == crtc && encoder->type == type)
  492.                         return true;
  493.  
  494.         return false;
  495. }
  496.  
  497. #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
  498. /**
  499.  * Returns whether the given set of divisors are valid for a given refclk with
  500.  * the given connectors.
  501.  */
  502.  
  503. static bool intel_PLL_is_valid(struct drm_device *dev,
  504.                                const intel_limit_t *limit,
  505.                                const intel_clock_t *clock)
  506. {
  507.         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
  508.                 INTELPllInvalid ("p1 out of range\n");
  509.         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
  510.                 INTELPllInvalid ("p out of range\n");
  511.         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
  512.                 INTELPllInvalid ("m2 out of range\n");
  513.         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
  514.                 INTELPllInvalid ("m1 out of range\n");
  515.         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
  516.                 INTELPllInvalid ("m1 <= m2\n");
  517.         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
  518.                 INTELPllInvalid ("m out of range\n");
  519.         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
  520.                 INTELPllInvalid ("n out of range\n");
  521.         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  522.                 INTELPllInvalid ("vco out of range\n");
  523.         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  524.          * connector, etc., rather than just a single range.
  525.          */
  526.         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  527.                 INTELPllInvalid ("dot out of range\n");
  528.  
  529.         return true;
  530. }
  531.  
  532. static bool
  533. intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  534.                     int target, int refclk, intel_clock_t *best_clock)
  535.  
  536. {
  537.         struct drm_device *dev = crtc->dev;
  538.         struct drm_i915_private *dev_priv = dev->dev_private;
  539.         intel_clock_t clock;
  540.         int err = target;
  541.  
  542.         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  543.             (I915_READ(LVDS)) != 0) {
  544.                 /*
  545.                  * For LVDS, if the panel is on, just rely on its current
  546.                  * settings for dual-channel.  We haven't figured out how to
  547.                  * reliably set up different single/dual channel state, if we
  548.                  * even can.
  549.                  */
  550.                 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  551.                     LVDS_CLKB_POWER_UP)
  552.                         clock.p2 = limit->p2.p2_fast;
  553.                 else
  554.                         clock.p2 = limit->p2.p2_slow;
  555.         } else {
  556.                 if (target < limit->p2.dot_limit)
  557.                         clock.p2 = limit->p2.p2_slow;
  558.                 else
  559.                         clock.p2 = limit->p2.p2_fast;
  560.         }
  561.  
  562.         memset (best_clock, 0, sizeof (*best_clock));
  563.  
  564.         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  565.              clock.m1++) {
  566.                 for (clock.m2 = limit->m2.min;
  567.                      clock.m2 <= limit->m2.max; clock.m2++) {
  568.                         /* m1 is always 0 in Pineview */
  569.                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
  570.                                 break;
  571.                         for (clock.n = limit->n.min;
  572.                              clock.n <= limit->n.max; clock.n++) {
  573.                                 for (clock.p1 = limit->p1.min;
  574.                                         clock.p1 <= limit->p1.max; clock.p1++) {
  575.                                         int this_err;
  576.  
  577.                                         intel_clock(dev, refclk, &clock);
  578.                                         if (!intel_PLL_is_valid(dev, limit,
  579.                                                                 &clock))
  580.                                                 continue;
  581.  
  582.                                         this_err = abs(clock.dot - target);
  583.                                         if (this_err < err) {
  584.                                                 *best_clock = clock;
  585.                                                 err = this_err;
  586.                                         }
  587.                                 }
  588.                         }
  589.                 }
  590.         }
  591.  
  592.         return (err != target);
  593. }
  594.  
  595. static bool
  596. intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
  597.                         int target, int refclk, intel_clock_t *best_clock)
  598. {
  599.         struct drm_device *dev = crtc->dev;
  600.         struct drm_i915_private *dev_priv = dev->dev_private;
  601.         intel_clock_t clock;
  602.         int max_n;
  603.         bool found;
  604.         /* approximately equals target * 0.00585 */
  605.         int err_most = (target >> 8) + (target >> 9);
  606.         found = false;
  607.  
  608.         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
  609.                 int lvds_reg;
  610.  
  611.                 if (HAS_PCH_SPLIT(dev))
  612.                         lvds_reg = PCH_LVDS;
  613.                 else
  614.                         lvds_reg = LVDS;
  615.                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
  616.                     LVDS_CLKB_POWER_UP)
  617.                         clock.p2 = limit->p2.p2_fast;
  618.                 else
  619.                         clock.p2 = limit->p2.p2_slow;
  620.         } else {
  621.                 if (target < limit->p2.dot_limit)
  622.                         clock.p2 = limit->p2.p2_slow;
  623.                 else
  624.                         clock.p2 = limit->p2.p2_fast;
  625.         }
  626.  
  627.         memset(best_clock, 0, sizeof(*best_clock));
  628.         max_n = limit->n.max;
  629.         /* based on hardware requirement, prefer smaller n to precision */
  630.         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  631.                 /* based on hardware requirement, prefere larger m1,m2 */
  632.                 for (clock.m1 = limit->m1.max;
  633.                      clock.m1 >= limit->m1.min; clock.m1--) {
  634.                         for (clock.m2 = limit->m2.max;
  635.                              clock.m2 >= limit->m2.min; clock.m2--) {
  636.                                 for (clock.p1 = limit->p1.max;
  637.                                      clock.p1 >= limit->p1.min; clock.p1--) {
  638.                                         int this_err;
  639.  
  640.                                         intel_clock(dev, refclk, &clock);
  641.                                         if (!intel_PLL_is_valid(dev, limit,
  642.                                                                 &clock))
  643.                                                 continue;
  644.  
  645.                                         this_err = abs(clock.dot - target);
  646.                                         if (this_err < err_most) {
  647.                                                 *best_clock = clock;
  648.                                                 err_most = this_err;
  649.                                                 max_n = clock.n;
  650.                                                 found = true;
  651.                                         }
  652.                                 }
  653.                         }
  654.                 }
  655.         }
  656.         return found;
  657. }
  658.  
  659. static bool
  660. intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
  661.                            int target, int refclk, intel_clock_t *best_clock)
  662. {
  663.         struct drm_device *dev = crtc->dev;
  664.         intel_clock_t clock;
  665.  
  666.         if (target < 200000) {
  667.                 clock.n = 1;
  668.                 clock.p1 = 2;
  669.                 clock.p2 = 10;
  670.                 clock.m1 = 12;
  671.                 clock.m2 = 9;
  672.         } else {
  673.                 clock.n = 2;
  674.                 clock.p1 = 1;
  675.                 clock.p2 = 10;
  676.                 clock.m1 = 14;
  677.                 clock.m2 = 8;
  678.         }
  679.         intel_clock(dev, refclk, &clock);
  680.         memcpy(best_clock, &clock, sizeof(intel_clock_t));
  681.         return true;
  682. }
  683.  
  684. /* DisplayPort has only two frequencies, 162MHz and 270MHz */
  685. static bool
  686. intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
  687.                       int target, int refclk, intel_clock_t *best_clock)
  688. {
  689.         intel_clock_t clock;
  690.         if (target < 200000) {
  691.                 clock.p1 = 2;
  692.                 clock.p2 = 10;
  693.                 clock.n = 2;
  694.                 clock.m1 = 23;
  695.                 clock.m2 = 8;
  696.         } else {
  697.                 clock.p1 = 1;
  698.                 clock.p2 = 10;
  699.                 clock.n = 1;
  700.                 clock.m1 = 14;
  701.                 clock.m2 = 2;
  702.         }
  703.         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
  704.         clock.p = (clock.p1 * clock.p2);
  705.         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
  706.         clock.vco = 0;
  707.         memcpy(best_clock, &clock, sizeof(intel_clock_t));
  708.         return true;
  709. }
  710.  
  711. /**
  712.  * intel_wait_for_vblank - wait for vblank on a given pipe
  713.  * @dev: drm device
  714.  * @pipe: pipe to wait for
  715.  *
  716.  * Wait for vblank to occur on a given pipe.  Needed for various bits of
  717.  * mode setting code.
  718.  */
  719. void intel_wait_for_vblank(struct drm_device *dev, int pipe)
  720. {
  721.         struct drm_i915_private *dev_priv = dev->dev_private;
  722.         int pipestat_reg = PIPESTAT(pipe);
  723.  
  724.         /* Clear existing vblank status. Note this will clear any other
  725.          * sticky status fields as well.
  726.          *
  727.          * This races with i915_driver_irq_handler() with the result
  728.          * that either function could miss a vblank event.  Here it is not
  729.          * fatal, as we will either wait upon the next vblank interrupt or
  730.          * timeout.  Generally speaking intel_wait_for_vblank() is only
  731.          * called during modeset at which time the GPU should be idle and
  732.          * should *not* be performing page flips and thus not waiting on
  733.          * vblanks...
  734.          * Currently, the result of us stealing a vblank from the irq
  735.          * handler is that a single frame will be skipped during swapbuffers.
  736.          */
  737.         I915_WRITE(pipestat_reg,
  738.                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
  739.  
  740.         /* Wait for vblank interrupt bit to set */
  741.         if (wait_for(I915_READ(pipestat_reg) &
  742.                      PIPE_VBLANK_INTERRUPT_STATUS,
  743.                      50))
  744.                 DRM_DEBUG_KMS("vblank wait timed out\n");
  745. }
  746.  
  747. /*
  748.  * intel_wait_for_pipe_off - wait for pipe to turn off
  749.  * @dev: drm device
  750.  * @pipe: pipe to wait for
  751.  *
  752.  * After disabling a pipe, we can't wait for vblank in the usual way,
  753.  * spinning on the vblank interrupt status bit, since we won't actually
  754.  * see an interrupt when the pipe is disabled.
  755.  *
  756.  * On Gen4 and above:
  757.  *   wait for the pipe register state bit to turn off
  758.  *
  759.  * Otherwise:
  760.  *   wait for the display line value to settle (it usually
  761.  *   ends up stopping at the start of the next frame).
  762.  *
  763.  */
  764. void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
  765. {
  766.         struct drm_i915_private *dev_priv = dev->dev_private;
  767.  
  768.         if (INTEL_INFO(dev)->gen >= 4) {
  769.                 int reg = PIPECONF(pipe);
  770.  
  771.                 /* Wait for the Pipe State to go off */
  772.                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
  773.                              100))
  774.                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
  775.         } else {
  776.                 u32 last_line;
  777.                 int reg = PIPEDSL(pipe);
  778.                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
  779.  
  780.                 /* Wait for the display line to settle */
  781.                 do {
  782.                         last_line = I915_READ(reg) & DSL_LINEMASK;
  783.                         mdelay(5);
  784.                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
  785.                          time_after(timeout, jiffies));
  786.                 if (time_after(jiffies, timeout))
  787.                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
  788.         }
  789. }
  790.  
  791. static const char *state_string(bool enabled)
  792. {
  793.         return enabled ? "on" : "off";
  794. }
  795.  
  796. /* Only for pre-ILK configs */
  797. static void assert_pll(struct drm_i915_private *dev_priv,
  798.                        enum pipe pipe, bool state)
  799. {
  800.         int reg;
  801.         u32 val;
  802.         bool cur_state;
  803.  
  804.         reg = DPLL(pipe);
  805.         val = I915_READ(reg);
  806.         cur_state = !!(val & DPLL_VCO_ENABLE);
  807.         WARN(cur_state != state,
  808.              "PLL state assertion failure (expected %s, current %s)\n",
  809.              state_string(state), state_string(cur_state));
  810. }
  811. #define assert_pll_enabled(d, p) assert_pll(d, p, true)
  812. #define assert_pll_disabled(d, p) assert_pll(d, p, false)
  813.  
  814. /* For ILK+ */
  815. static void assert_pch_pll(struct drm_i915_private *dev_priv,
  816.                            enum pipe pipe, bool state)
  817. {
  818.         int reg;
  819.         u32 val;
  820.         bool cur_state;
  821.  
  822.         reg = PCH_DPLL(pipe);
  823.         val = I915_READ(reg);
  824.         cur_state = !!(val & DPLL_VCO_ENABLE);
  825.         WARN(cur_state != state,
  826.              "PCH PLL state assertion failure (expected %s, current %s)\n",
  827.              state_string(state), state_string(cur_state));
  828. }
  829. #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
  830. #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
  831.  
  832. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  833.                           enum pipe pipe, bool state)
  834. {
  835.         int reg;
  836.         u32 val;
  837.         bool cur_state;
  838.  
  839.         reg = FDI_TX_CTL(pipe);
  840.         val = I915_READ(reg);
  841.         cur_state = !!(val & FDI_TX_ENABLE);
  842.         WARN(cur_state != state,
  843.              "FDI TX state assertion failure (expected %s, current %s)\n",
  844.              state_string(state), state_string(cur_state));
  845. }
  846. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  847. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  848.  
  849. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  850.                           enum pipe pipe, bool state)
  851. {
  852.         int reg;
  853.         u32 val;
  854.         bool cur_state;
  855.  
  856.         reg = FDI_RX_CTL(pipe);
  857.         val = I915_READ(reg);
  858.         cur_state = !!(val & FDI_RX_ENABLE);
  859.         WARN(cur_state != state,
  860.              "FDI RX state assertion failure (expected %s, current %s)\n",
  861.              state_string(state), state_string(cur_state));
  862. }
  863. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  864. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  865.  
  866. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  867.                                       enum pipe pipe)
  868. {
  869.         int reg;
  870.         u32 val;
  871.  
  872.         /* ILK FDI PLL is always enabled */
  873.         if (dev_priv->info->gen == 5)
  874.                 return;
  875.  
  876.         reg = FDI_TX_CTL(pipe);
  877.         val = I915_READ(reg);
  878.         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  879. }
  880.  
  881. static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
  882.                                       enum pipe pipe)
  883. {
  884.         int reg;
  885.         u32 val;
  886.  
  887.         reg = FDI_RX_CTL(pipe);
  888.         val = I915_READ(reg);
  889.         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
  890. }
  891.  
  892. static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
  893.                                   enum pipe pipe)
  894. {
  895.         int pp_reg, lvds_reg;
  896.         u32 val;
  897.         enum pipe panel_pipe = PIPE_A;
  898.         bool locked = true;
  899.  
  900.         if (HAS_PCH_SPLIT(dev_priv->dev)) {
  901.                 pp_reg = PCH_PP_CONTROL;
  902.                 lvds_reg = PCH_LVDS;
  903.         } else {
  904.                 pp_reg = PP_CONTROL;
  905.                 lvds_reg = LVDS;
  906.         }
  907.  
  908.         val = I915_READ(pp_reg);
  909.         if (!(val & PANEL_POWER_ON) ||
  910.             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
  911.                 locked = false;
  912.  
  913.         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
  914.                 panel_pipe = PIPE_B;
  915.  
  916.         WARN(panel_pipe == pipe && locked,
  917.              "panel assertion failure, pipe %c regs locked\n",
  918.              pipe_name(pipe));
  919. }
  920.  
  921. static void assert_pipe(struct drm_i915_private *dev_priv,
  922.                         enum pipe pipe, bool state)
  923. {
  924.         int reg;
  925.         u32 val;
  926.         bool cur_state;
  927.  
  928.         reg = PIPECONF(pipe);
  929.         val = I915_READ(reg);
  930.         cur_state = !!(val & PIPECONF_ENABLE);
  931.         WARN(cur_state != state,
  932.              "pipe %c assertion failure (expected %s, current %s)\n",
  933.              pipe_name(pipe), state_string(state), state_string(cur_state));
  934. }
  935. #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
  936. #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
  937.  
  938. static void assert_plane_enabled(struct drm_i915_private *dev_priv,
  939.                                  enum plane plane)
  940. {
  941.         int reg;
  942.         u32 val;
  943.  
  944.         reg = DSPCNTR(plane);
  945.         val = I915_READ(reg);
  946.         WARN(!(val & DISPLAY_PLANE_ENABLE),
  947.              "plane %c assertion failure, should be active but is disabled\n",
  948.              plane_name(plane));
  949. }
  950.  
  951. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  952.                                    enum pipe pipe)
  953. {
  954.         int reg, i;
  955.         u32 val;
  956.         int cur_pipe;
  957.  
  958.         /* Planes are fixed to pipes on ILK+ */
  959.         if (HAS_PCH_SPLIT(dev_priv->dev))
  960.                 return;
  961.  
  962.         /* Need to check both planes against the pipe */
  963.         for (i = 0; i < 2; i++) {
  964.                 reg = DSPCNTR(i);
  965.                 val = I915_READ(reg);
  966.                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  967.                         DISPPLANE_SEL_PIPE_SHIFT;
  968.                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  969.                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
  970.                      plane_name(i), pipe_name(pipe));
  971.         }
  972. }
  973.  
  974. static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
  975. {
  976.         u32 val;
  977.         bool enabled;
  978.  
  979.         val = I915_READ(PCH_DREF_CONTROL);
  980.         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
  981.                             DREF_SUPERSPREAD_SOURCE_MASK));
  982.         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
  983. }
  984.  
  985. static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
  986.                                        enum pipe pipe)
  987. {
  988.         int reg;
  989.         u32 val;
  990.         bool enabled;
  991.  
  992.         reg = TRANSCONF(pipe);
  993.         val = I915_READ(reg);
  994.         enabled = !!(val & TRANS_ENABLE);
  995.         WARN(enabled,
  996.              "transcoder assertion failed, should be off on pipe %c but is still active\n",
  997.              pipe_name(pipe));
  998. }
  999.  
  1000. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1001.                             enum pipe pipe, u32 port_sel, u32 val)
  1002. {
  1003.         if ((val & DP_PORT_EN) == 0)
  1004.                 return false;
  1005.  
  1006.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1007.                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
  1008.                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
  1009.                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1010.                         return false;
  1011.         } else {
  1012.                 if ((val & DP_PIPE_MASK) != (pipe << 30))
  1013.                         return false;
  1014.         }
  1015.         return true;
  1016. }
  1017.  
  1018. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1019.                               enum pipe pipe, u32 val)
  1020. {
  1021.         if ((val & PORT_ENABLE) == 0)
  1022.                 return false;
  1023.  
  1024.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1025.                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1026.                         return false;
  1027.         } else {
  1028.                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
  1029.                         return false;
  1030.         }
  1031.         return true;
  1032. }
  1033.  
  1034. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1035.                               enum pipe pipe, u32 val)
  1036. {
  1037.         if ((val & LVDS_PORT_EN) == 0)
  1038.                 return false;
  1039.  
  1040.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1041.                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1042.                         return false;
  1043.         } else {
  1044.                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1045.                         return false;
  1046.         }
  1047.         return true;
  1048. }
  1049.  
  1050. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1051.                               enum pipe pipe, u32 val)
  1052. {
  1053.         if ((val & ADPA_DAC_ENABLE) == 0)
  1054.                 return false;
  1055.         if (HAS_PCH_CPT(dev_priv->dev)) {
  1056.                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1057.                         return false;
  1058.         } else {
  1059.                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1060.                         return false;
  1061.         }
  1062.         return true;
  1063. }
  1064.  
  1065. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1066.                                    enum pipe pipe, int reg, u32 port_sel)
  1067. {
  1068.         u32 val = I915_READ(reg);
  1069.         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1070.              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1071.              reg, pipe_name(pipe));
  1072. }
  1073.  
  1074. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1075.                                      enum pipe pipe, int reg)
  1076. {
  1077.         u32 val = I915_READ(reg);
  1078.         WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
  1079.              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1080.              reg, pipe_name(pipe));
  1081. }
  1082.  
  1083. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1084.                                       enum pipe pipe)
  1085. {
  1086.         int reg;
  1087.         u32 val;
  1088.  
  1089.         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1090.         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1091.         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1092.  
  1093.         reg = PCH_ADPA;
  1094.         val = I915_READ(reg);
  1095.         WARN(adpa_pipe_enabled(dev_priv, val, pipe),
  1096.              "PCH VGA enabled on transcoder %c, should be disabled\n",
  1097.              pipe_name(pipe));
  1098.  
  1099.         reg = PCH_LVDS;
  1100.         val = I915_READ(reg);
  1101.         WARN(lvds_pipe_enabled(dev_priv, val, pipe),
  1102.              "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1103.              pipe_name(pipe));
  1104.  
  1105.         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
  1106.         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
  1107.         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
  1108. }
  1109.  
  1110. /**
  1111.  * intel_enable_pll - enable a PLL
  1112.  * @dev_priv: i915 private structure
  1113.  * @pipe: pipe PLL to enable
  1114.  *
  1115.  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
  1116.  * make sure the PLL reg is writable first though, since the panel write
  1117.  * protect mechanism may be enabled.
  1118.  *
  1119.  * Note!  This is for pre-ILK only.
  1120.  */
  1121. static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1122. {
  1123.     int reg;
  1124.     u32 val;
  1125.  
  1126.     /* No really, not for ILK+ */
  1127.     BUG_ON(dev_priv->info->gen >= 5);
  1128.  
  1129.     /* PLL is protected by panel, make sure we can write it */
  1130.     if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
  1131.         assert_panel_unlocked(dev_priv, pipe);
  1132.  
  1133.     reg = DPLL(pipe);
  1134.     val = I915_READ(reg);
  1135.     val |= DPLL_VCO_ENABLE;
  1136.  
  1137.     /* We do this three times for luck */
  1138.     I915_WRITE(reg, val);
  1139.     POSTING_READ(reg);
  1140.     udelay(150); /* wait for warmup */
  1141.     I915_WRITE(reg, val);
  1142.     POSTING_READ(reg);
  1143.     udelay(150); /* wait for warmup */
  1144.     I915_WRITE(reg, val);
  1145.     POSTING_READ(reg);
  1146.     udelay(150); /* wait for warmup */
  1147. }
  1148.  
  1149. /**
  1150.  * intel_disable_pll - disable a PLL
  1151.  * @dev_priv: i915 private structure
  1152.  * @pipe: pipe PLL to disable
  1153.  *
  1154.  * Disable the PLL for @pipe, making sure the pipe is off first.
  1155.  *
  1156.  * Note!  This is for pre-ILK only.
  1157.  */
  1158. static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1159. {
  1160.         int reg;
  1161.         u32 val;
  1162.  
  1163.         /* Don't disable pipe A or pipe A PLLs if needed */
  1164.         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
  1165.                 return;
  1166.  
  1167.         /* Make sure the pipe isn't still relying on us */
  1168.         assert_pipe_disabled(dev_priv, pipe);
  1169.  
  1170.         reg = DPLL(pipe);
  1171.         val = I915_READ(reg);
  1172.         val &= ~DPLL_VCO_ENABLE;
  1173.         I915_WRITE(reg, val);
  1174.         POSTING_READ(reg);
  1175. }
  1176.  
  1177. /**
  1178.  * intel_enable_pch_pll - enable PCH PLL
  1179.  * @dev_priv: i915 private structure
  1180.  * @pipe: pipe PLL to enable
  1181.  *
  1182.  * The PCH PLL needs to be enabled before the PCH transcoder, since it
  1183.  * drives the transcoder clock.
  1184.  */
  1185. static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
  1186.                                  enum pipe pipe)
  1187. {
  1188.         int reg;
  1189.         u32 val;
  1190.  
  1191.         /* PCH only available on ILK+ */
  1192.         BUG_ON(dev_priv->info->gen < 5);
  1193.  
  1194.         /* PCH refclock must be enabled first */
  1195.         assert_pch_refclk_enabled(dev_priv);
  1196.  
  1197.         reg = PCH_DPLL(pipe);
  1198.         val = I915_READ(reg);
  1199.         val |= DPLL_VCO_ENABLE;
  1200.         I915_WRITE(reg, val);
  1201.         POSTING_READ(reg);
  1202.         udelay(200);
  1203. }
  1204.  
  1205. static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
  1206.                                   enum pipe pipe)
  1207. {
  1208.         int reg;
  1209.         u32 val;
  1210.  
  1211.         /* PCH only available on ILK+ */
  1212.         BUG_ON(dev_priv->info->gen < 5);
  1213.  
  1214.         /* Make sure transcoder isn't still depending on us */
  1215.         assert_transcoder_disabled(dev_priv, pipe);
  1216.  
  1217.         reg = PCH_DPLL(pipe);
  1218.         val = I915_READ(reg);
  1219.         val &= ~DPLL_VCO_ENABLE;
  1220.         I915_WRITE(reg, val);
  1221.         POSTING_READ(reg);
  1222.         udelay(200);
  1223. }
  1224.  
  1225. static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
  1226.                                     enum pipe pipe)
  1227. {
  1228.         int reg;
  1229.         u32 val;
  1230.  
  1231.         /* PCH only available on ILK+ */
  1232.         BUG_ON(dev_priv->info->gen < 5);
  1233.  
  1234.         /* Make sure PCH DPLL is enabled */
  1235.         assert_pch_pll_enabled(dev_priv, pipe);
  1236.  
  1237.         /* FDI must be feeding us bits for PCH ports */
  1238.         assert_fdi_tx_enabled(dev_priv, pipe);
  1239.         assert_fdi_rx_enabled(dev_priv, pipe);
  1240.  
  1241.         reg = TRANSCONF(pipe);
  1242.         val = I915_READ(reg);
  1243.  
  1244.         if (HAS_PCH_IBX(dev_priv->dev)) {
  1245.                 /*
  1246.                  * make the BPC in transcoder be consistent with
  1247.                  * that in pipeconf reg.
  1248.                  */
  1249.                 val &= ~PIPE_BPC_MASK;
  1250.                 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
  1251.         }
  1252.         I915_WRITE(reg, val | TRANS_ENABLE);
  1253.         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
  1254.                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
  1255. }
  1256.  
  1257. static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
  1258.                                      enum pipe pipe)
  1259. {
  1260.         int reg;
  1261.         u32 val;
  1262.  
  1263.         /* FDI relies on the transcoder */
  1264.         assert_fdi_tx_disabled(dev_priv, pipe);
  1265.         assert_fdi_rx_disabled(dev_priv, pipe);
  1266.  
  1267.         /* Ports must be off as well */
  1268.         assert_pch_ports_disabled(dev_priv, pipe);
  1269.  
  1270.         reg = TRANSCONF(pipe);
  1271.         val = I915_READ(reg);
  1272.         val &= ~TRANS_ENABLE;
  1273.         I915_WRITE(reg, val);
  1274.         /* wait for PCH transcoder off, transcoder state */
  1275.         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
  1276.                 DRM_ERROR("failed to disable transcoder\n");
  1277. }
  1278.  
  1279. /**
  1280.  * intel_enable_pipe - enable a pipe, asserting requirements
  1281.  * @dev_priv: i915 private structure
  1282.  * @pipe: pipe to enable
  1283.  * @pch_port: on ILK+, is this pipe driving a PCH port or not
  1284.  *
  1285.  * Enable @pipe, making sure that various hardware specific requirements
  1286.  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1287.  *
  1288.  * @pipe should be %PIPE_A or %PIPE_B.
  1289.  *
  1290.  * Will wait until the pipe is actually running (i.e. first vblank) before
  1291.  * returning.
  1292.  */
  1293. static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
  1294.                               bool pch_port)
  1295. {
  1296.         int reg;
  1297.         u32 val;
  1298.  
  1299.         /*
  1300.          * A pipe without a PLL won't actually be able to drive bits from
  1301.          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
  1302.          * need the check.
  1303.          */
  1304.         if (!HAS_PCH_SPLIT(dev_priv->dev))
  1305.                 assert_pll_enabled(dev_priv, pipe);
  1306.         else {
  1307.                 if (pch_port) {
  1308.                         /* if driving the PCH, we need FDI enabled */
  1309.                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
  1310.                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
  1311.                 }
  1312.                 /* FIXME: assert CPU port conditions for SNB+ */
  1313.         }
  1314.  
  1315.         reg = PIPECONF(pipe);
  1316.         val = I915_READ(reg);
  1317.         if (val & PIPECONF_ENABLE)
  1318.                 return;
  1319.  
  1320.         I915_WRITE(reg, val | PIPECONF_ENABLE);
  1321.         intel_wait_for_vblank(dev_priv->dev, pipe);
  1322. }
  1323.  
  1324. /**
  1325.  * intel_disable_pipe - disable a pipe, asserting requirements
  1326.  * @dev_priv: i915 private structure
  1327.  * @pipe: pipe to disable
  1328.  *
  1329.  * Disable @pipe, making sure that various hardware specific requirements
  1330.  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
  1331.  *
  1332.  * @pipe should be %PIPE_A or %PIPE_B.
  1333.  *
  1334.  * Will wait until the pipe has shut down before returning.
  1335.  */
  1336. static void intel_disable_pipe(struct drm_i915_private *dev_priv,
  1337.                                enum pipe pipe)
  1338. {
  1339.         int reg;
  1340.         u32 val;
  1341.  
  1342.         /*
  1343.          * Make sure planes won't keep trying to pump pixels to us,
  1344.          * or we might hang the display.
  1345.          */
  1346.         assert_planes_disabled(dev_priv, pipe);
  1347.  
  1348.         /* Don't disable pipe A or pipe A PLLs if needed */
  1349.         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
  1350.                 return;
  1351.  
  1352.         reg = PIPECONF(pipe);
  1353.         val = I915_READ(reg);
  1354.         if ((val & PIPECONF_ENABLE) == 0)
  1355.                 return;
  1356.  
  1357.         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
  1358.         intel_wait_for_pipe_off(dev_priv->dev, pipe);
  1359. }
  1360.  
  1361. /*
  1362.  * Plane regs are double buffered, going from enabled->disabled needs a
  1363.  * trigger in order to latch.  The display address reg provides this.
  1364.  */
  1365. static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
  1366.                                       enum plane plane)
  1367. {
  1368.         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
  1369.         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
  1370. }
  1371.  
  1372. /**
  1373.  * intel_enable_plane - enable a display plane on a given pipe
  1374.  * @dev_priv: i915 private structure
  1375.  * @plane: plane to enable
  1376.  * @pipe: pipe being fed
  1377.  *
  1378.  * Enable @plane on @pipe, making sure that @pipe is running first.
  1379.  */
  1380. static void intel_enable_plane(struct drm_i915_private *dev_priv,
  1381.                                enum plane plane, enum pipe pipe)
  1382. {
  1383.         int reg;
  1384.         u32 val;
  1385.  
  1386.         /* If the pipe isn't enabled, we can't pump pixels and may hang */
  1387.         assert_pipe_enabled(dev_priv, pipe);
  1388.  
  1389.         reg = DSPCNTR(plane);
  1390.         val = I915_READ(reg);
  1391.         if (val & DISPLAY_PLANE_ENABLE)
  1392.                 return;
  1393.  
  1394.         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
  1395.         intel_flush_display_plane(dev_priv, plane);
  1396.         intel_wait_for_vblank(dev_priv->dev, pipe);
  1397. }
  1398.  
  1399. /**
  1400.  * intel_disable_plane - disable a display plane
  1401.  * @dev_priv: i915 private structure
  1402.  * @plane: plane to disable
  1403.  * @pipe: pipe consuming the data
  1404.  *
  1405.  * Disable @plane; should be an independent operation.
  1406.  */
  1407. static void intel_disable_plane(struct drm_i915_private *dev_priv,
  1408.                                 enum plane plane, enum pipe pipe)
  1409. {
  1410.         int reg;
  1411.         u32 val;
  1412.  
  1413.         reg = DSPCNTR(plane);
  1414.         val = I915_READ(reg);
  1415.         if ((val & DISPLAY_PLANE_ENABLE) == 0)
  1416.                 return;
  1417.  
  1418.         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
  1419.         intel_flush_display_plane(dev_priv, plane);
  1420.         intel_wait_for_vblank(dev_priv->dev, pipe);
  1421. }
  1422.  
  1423. static void disable_pch_dp(struct drm_i915_private *dev_priv,
  1424.                            enum pipe pipe, int reg, u32 port_sel)
  1425. {
  1426.         u32 val = I915_READ(reg);
  1427.         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
  1428.                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
  1429.                 I915_WRITE(reg, val & ~DP_PORT_EN);
  1430.         }
  1431. }
  1432.  
  1433. static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
  1434.                              enum pipe pipe, int reg)
  1435. {
  1436.         u32 val = I915_READ(reg);
  1437.         if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
  1438.                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
  1439.                               reg, pipe);
  1440.                 I915_WRITE(reg, val & ~PORT_ENABLE);
  1441.         }
  1442. }
  1443.  
  1444. /* Disable any ports connected to this transcoder */
  1445. static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
  1446.                                     enum pipe pipe)
  1447. {
  1448.         u32 reg, val;
  1449.  
  1450.         val = I915_READ(PCH_PP_CONTROL);
  1451.         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
  1452.  
  1453.         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1454.         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1455.         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1456.  
  1457.         reg = PCH_ADPA;
  1458.         val = I915_READ(reg);
  1459.         if (adpa_pipe_enabled(dev_priv, val, pipe))
  1460.                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
  1461.  
  1462.         reg = PCH_LVDS;
  1463.         val = I915_READ(reg);
  1464.         if (lvds_pipe_enabled(dev_priv, val, pipe)) {
  1465.                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
  1466.                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
  1467.                 POSTING_READ(reg);
  1468.                 udelay(100);
  1469.         }
  1470.  
  1471.         disable_pch_hdmi(dev_priv, pipe, HDMIB);
  1472.         disable_pch_hdmi(dev_priv, pipe, HDMIC);
  1473.         disable_pch_hdmi(dev_priv, pipe, HDMID);
  1474. }
  1475.  
  1476. static void i8xx_disable_fbc(struct drm_device *dev)
  1477. {
  1478.     struct drm_i915_private *dev_priv = dev->dev_private;
  1479.     u32 fbc_ctl;
  1480.  
  1481.     /* Disable compression */
  1482.     fbc_ctl = I915_READ(FBC_CONTROL);
  1483.     if ((fbc_ctl & FBC_CTL_EN) == 0)
  1484.         return;
  1485.  
  1486.     fbc_ctl &= ~FBC_CTL_EN;
  1487.     I915_WRITE(FBC_CONTROL, fbc_ctl);
  1488.  
  1489.     /* Wait for compressing bit to clear */
  1490.     if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
  1491.         DRM_DEBUG_KMS("FBC idle timed out\n");
  1492.         return;
  1493.     }
  1494.  
  1495.     DRM_DEBUG_KMS("disabled FBC\n");
  1496. }
  1497.  
  1498. static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1499. {
  1500.     struct drm_device *dev = crtc->dev;
  1501.     struct drm_i915_private *dev_priv = dev->dev_private;
  1502.     struct drm_framebuffer *fb = crtc->fb;
  1503.     struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  1504.     struct drm_i915_gem_object *obj = intel_fb->obj;
  1505.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1506.     int cfb_pitch;
  1507.     int plane, i;
  1508.     u32 fbc_ctl, fbc_ctl2;
  1509.  
  1510.     cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
  1511.     if (fb->pitch < cfb_pitch)
  1512.         cfb_pitch = fb->pitch;
  1513.  
  1514.     /* FBC_CTL wants 64B units */
  1515.     cfb_pitch = (cfb_pitch / 64) - 1;
  1516.     plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
  1517.  
  1518.     /* Clear old tags */
  1519.     for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  1520.         I915_WRITE(FBC_TAG + (i * 4), 0);
  1521.  
  1522.     /* Set it up... */
  1523.     fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
  1524.     fbc_ctl2 |= plane;
  1525.     I915_WRITE(FBC_CONTROL2, fbc_ctl2);
  1526.     I915_WRITE(FBC_FENCE_OFF, crtc->y);
  1527.  
  1528.     /* enable it... */
  1529.     fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
  1530.     if (IS_I945GM(dev))
  1531.         fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  1532.     fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  1533.     fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
  1534.     fbc_ctl |= obj->fence_reg;
  1535.     I915_WRITE(FBC_CONTROL, fbc_ctl);
  1536.  
  1537.     DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
  1538.               cfb_pitch, crtc->y, intel_crtc->plane);
  1539. }
  1540.  
  1541. static bool i8xx_fbc_enabled(struct drm_device *dev)
  1542. {
  1543.     struct drm_i915_private *dev_priv = dev->dev_private;
  1544.  
  1545.     return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  1546. }
  1547.  
  1548. static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1549. {
  1550.     struct drm_device *dev = crtc->dev;
  1551.     struct drm_i915_private *dev_priv = dev->dev_private;
  1552.     struct drm_framebuffer *fb = crtc->fb;
  1553.     struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  1554.     struct drm_i915_gem_object *obj = intel_fb->obj;
  1555.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1556.     int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
  1557.     unsigned long stall_watermark = 200;
  1558.     u32 dpfc_ctl;
  1559.  
  1560.     dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
  1561.     dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
  1562.     I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
  1563.  
  1564.     I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
  1565.            (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
  1566.            (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
  1567.     I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
  1568.  
  1569.     /* enable it... */
  1570.     I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
  1571.  
  1572.     DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
  1573. }
  1574.  
  1575. static void g4x_disable_fbc(struct drm_device *dev)
  1576. {
  1577.     struct drm_i915_private *dev_priv = dev->dev_private;
  1578.     u32 dpfc_ctl;
  1579.  
  1580.     /* Disable compression */
  1581.     dpfc_ctl = I915_READ(DPFC_CONTROL);
  1582.     if (dpfc_ctl & DPFC_CTL_EN) {
  1583.         dpfc_ctl &= ~DPFC_CTL_EN;
  1584.         I915_WRITE(DPFC_CONTROL, dpfc_ctl);
  1585.  
  1586.         DRM_DEBUG_KMS("disabled FBC\n");
  1587.     }
  1588. }
  1589.  
  1590. static bool g4x_fbc_enabled(struct drm_device *dev)
  1591. {
  1592.     struct drm_i915_private *dev_priv = dev->dev_private;
  1593.  
  1594.     return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  1595. }
  1596.  
  1597. static void sandybridge_blit_fbc_update(struct drm_device *dev)
  1598. {
  1599.         struct drm_i915_private *dev_priv = dev->dev_private;
  1600.         u32 blt_ecoskpd;
  1601.  
  1602.         /* Make sure blitter notifies FBC of writes */
  1603.         gen6_gt_force_wake_get(dev_priv);
  1604.         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
  1605.         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
  1606.                 GEN6_BLITTER_LOCK_SHIFT;
  1607.         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  1608.         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
  1609.         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  1610.         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
  1611.                          GEN6_BLITTER_LOCK_SHIFT);
  1612.         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  1613.         POSTING_READ(GEN6_BLITTER_ECOSKPD);
  1614.         gen6_gt_force_wake_put(dev_priv);
  1615. }
  1616.  
  1617. static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1618. {
  1619.     struct drm_device *dev = crtc->dev;
  1620.     struct drm_i915_private *dev_priv = dev->dev_private;
  1621.     struct drm_framebuffer *fb = crtc->fb;
  1622.     struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  1623.     struct drm_i915_gem_object *obj = intel_fb->obj;
  1624.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1625.     int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
  1626.     unsigned long stall_watermark = 200;
  1627.     u32 dpfc_ctl;
  1628.  
  1629.     dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  1630.     dpfc_ctl &= DPFC_RESERVED;
  1631.     dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
  1632.     /* Set persistent mode for front-buffer rendering, ala X. */
  1633.     dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
  1634.     dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
  1635.     I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
  1636.  
  1637.     I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
  1638.            (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
  1639.            (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
  1640.     I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
  1641.     I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
  1642.     /* enable it... */
  1643.     I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  1644.  
  1645.     if (IS_GEN6(dev)) {
  1646.         I915_WRITE(SNB_DPFC_CTL_SA,
  1647.                SNB_CPU_FENCE_ENABLE | obj->fence_reg);
  1648.         I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
  1649.         sandybridge_blit_fbc_update(dev);
  1650.     }
  1651.  
  1652.     DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
  1653. }
  1654.  
  1655. static void ironlake_disable_fbc(struct drm_device *dev)
  1656. {
  1657.     struct drm_i915_private *dev_priv = dev->dev_private;
  1658.     u32 dpfc_ctl;
  1659.  
  1660.     /* Disable compression */
  1661.     dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  1662.     if (dpfc_ctl & DPFC_CTL_EN) {
  1663.         dpfc_ctl &= ~DPFC_CTL_EN;
  1664.         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
  1665.  
  1666.         DRM_DEBUG_KMS("disabled FBC\n");
  1667.     }
  1668. }
  1669.  
  1670. static bool ironlake_fbc_enabled(struct drm_device *dev)
  1671. {
  1672.     struct drm_i915_private *dev_priv = dev->dev_private;
  1673.  
  1674.     return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  1675. }
  1676.  
  1677. bool intel_fbc_enabled(struct drm_device *dev)
  1678. {
  1679.         struct drm_i915_private *dev_priv = dev->dev_private;
  1680.  
  1681.         if (!dev_priv->display.fbc_enabled)
  1682.                 return false;
  1683.  
  1684.         return dev_priv->display.fbc_enabled(dev);
  1685. }
  1686.  
  1687.  
  1688.  
  1689.  
  1690.  
  1691.  
  1692.  
  1693.  
  1694.  
  1695.  
  1696. static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  1697. {
  1698.         struct intel_fbc_work *work;
  1699.         struct drm_device *dev = crtc->dev;
  1700.         struct drm_i915_private *dev_priv = dev->dev_private;
  1701.  
  1702.         if (!dev_priv->display.enable_fbc)
  1703.                 return;
  1704.  
  1705. //      intel_cancel_fbc_work(dev_priv);
  1706.  
  1707. //      work = kzalloc(sizeof *work, GFP_KERNEL);
  1708. //      if (work == NULL) {
  1709. //              dev_priv->display.enable_fbc(crtc, interval);
  1710. //              return;
  1711. //      }
  1712.  
  1713. //      work->crtc = crtc;
  1714. //      work->fb = crtc->fb;
  1715. //      work->interval = interval;
  1716. //      INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
  1717.  
  1718. //      dev_priv->fbc_work = work;
  1719.  
  1720.         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
  1721.  
  1722.         /* Delay the actual enabling to let pageflipping cease and the
  1723.          * display to settle before starting the compression. Note that
  1724.          * this delay also serves a second purpose: it allows for a
  1725.          * vblank to pass after disabling the FBC before we attempt
  1726.          * to modify the control registers.
  1727.          *
  1728.          * A more complicated solution would involve tracking vblanks
  1729.          * following the termination of the page-flipping sequence
  1730.          * and indeed performing the enable as a co-routine and not
  1731.          * waiting synchronously upon the vblank.
  1732.          */
  1733. //      schedule_delayed_work(&work->work, msecs_to_jiffies(50));
  1734. }
  1735.  
  1736. void intel_disable_fbc(struct drm_device *dev)
  1737. {
  1738.         struct drm_i915_private *dev_priv = dev->dev_private;
  1739.  
  1740. //   intel_cancel_fbc_work(dev_priv);
  1741.  
  1742.         if (!dev_priv->display.disable_fbc)
  1743.                 return;
  1744.  
  1745.         dev_priv->display.disable_fbc(dev);
  1746.         dev_priv->cfb_plane = -1;
  1747. }
  1748.  
  1749. /**
  1750.  * intel_update_fbc - enable/disable FBC as needed
  1751.  * @dev: the drm_device
  1752.  *
  1753.  * Set up the framebuffer compression hardware at mode set time.  We
  1754.  * enable it if possible:
  1755.  *   - plane A only (on pre-965)
  1756.  *   - no pixel mulitply/line duplication
  1757.  *   - no alpha buffer discard
  1758.  *   - no dual wide
  1759.  *   - framebuffer <= 2048 in width, 1536 in height
  1760.  *
  1761.  * We can't assume that any compression will take place (worst case),
  1762.  * so the compressed buffer has to be the same size as the uncompressed
  1763.  * one.  It also must reside (along with the line length buffer) in
  1764.  * stolen memory.
  1765.  *
  1766.  * We need to enable/disable FBC on a global basis.
  1767.  */
  1768. static void intel_update_fbc(struct drm_device *dev)
  1769. {
  1770.         struct drm_i915_private *dev_priv = dev->dev_private;
  1771.         struct drm_crtc *crtc = NULL, *tmp_crtc;
  1772.         struct intel_crtc *intel_crtc;
  1773.         struct drm_framebuffer *fb;
  1774.         struct intel_framebuffer *intel_fb;
  1775.         struct drm_i915_gem_object *obj;
  1776.  
  1777.         DRM_DEBUG_KMS("\n");
  1778.  
  1779.         if (!i915_powersave)
  1780.                 return;
  1781.  
  1782.         if (!I915_HAS_FBC(dev))
  1783.                 return;
  1784.  
  1785.         /*
  1786.          * If FBC is already on, we just have to verify that we can
  1787.          * keep it that way...
  1788.          * Need to disable if:
  1789.          *   - more than one pipe is active
  1790.          *   - changing FBC params (stride, fence, mode)
  1791.          *   - new fb is too large to fit in compressed buffer
  1792.          *   - going to an unsupported config (interlace, pixel multiply, etc.)
  1793.          */
  1794.         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
  1795.                 if (tmp_crtc->enabled && tmp_crtc->fb) {
  1796.                         if (crtc) {
  1797.                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
  1798. //                              dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
  1799.                                 goto out_disable;
  1800.                         }
  1801.                         crtc = tmp_crtc;
  1802.                 }
  1803.         }
  1804.  
  1805.         if (!crtc || crtc->fb == NULL) {
  1806.                 DRM_DEBUG_KMS("no output, disabling\n");
  1807. //              dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
  1808.                 goto out_disable;
  1809.         }
  1810.  
  1811.         intel_crtc = to_intel_crtc(crtc);
  1812.         fb = crtc->fb;
  1813.         intel_fb = to_intel_framebuffer(fb);
  1814.         obj = intel_fb->obj;
  1815.  
  1816.         if (!i915_enable_fbc) {
  1817.                 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
  1818. //              dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
  1819.                 goto out_disable;
  1820.         }
  1821.         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
  1822.                 DRM_DEBUG_KMS("framebuffer too large, disabling "
  1823.                               "compression\n");
  1824. //              dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
  1825.                 goto out_disable;
  1826.         }
  1827.         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
  1828.             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
  1829.                 DRM_DEBUG_KMS("mode incompatible with compression, "
  1830.                               "disabling\n");
  1831. //              dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
  1832.                 goto out_disable;
  1833.         }
  1834.         if ((crtc->mode.hdisplay > 2048) ||
  1835.             (crtc->mode.vdisplay > 1536)) {
  1836.                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
  1837. //              dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
  1838.                 goto out_disable;
  1839.         }
  1840.         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
  1841.                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
  1842. //              dev_priv->no_fbc_reason = FBC_BAD_PLANE;
  1843.                 goto out_disable;
  1844.         }
  1845.  
  1846.         /* The use of a CPU fence is mandatory in order to detect writes
  1847.          * by the CPU to the scanout and trigger updates to the FBC.
  1848.          */
  1849. //      if (obj->tiling_mode != I915_TILING_X ||
  1850. //          obj->fence_reg == I915_FENCE_REG_NONE) {
  1851. //              DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
  1852. //              dev_priv->no_fbc_reason = FBC_NOT_TILED;
  1853. //              goto out_disable;
  1854. //      }
  1855.  
  1856.         /* If the kernel debugger is active, always disable compression */
  1857.         if (in_dbg_master())
  1858.                 goto out_disable;
  1859.  
  1860.         /* If the scanout has not changed, don't modify the FBC settings.
  1861.          * Note that we make the fundamental assumption that the fb->obj
  1862.          * cannot be unpinned (and have its GTT offset and fence revoked)
  1863.          * without first being decoupled from the scanout and FBC disabled.
  1864.          */
  1865.         if (dev_priv->cfb_plane == intel_crtc->plane &&
  1866.             dev_priv->cfb_fb == fb->base.id &&
  1867.             dev_priv->cfb_y == crtc->y)
  1868.                 return;
  1869.  
  1870.         if (intel_fbc_enabled(dev)) {
  1871.                 /* We update FBC along two paths, after changing fb/crtc
  1872.                  * configuration (modeswitching) and after page-flipping
  1873.                  * finishes. For the latter, we know that not only did
  1874.                  * we disable the FBC at the start of the page-flip
  1875.                  * sequence, but also more than one vblank has passed.
  1876.                  *
  1877.                  * For the former case of modeswitching, it is possible
  1878.                  * to switch between two FBC valid configurations
  1879.                  * instantaneously so we do need to disable the FBC
  1880.                  * before we can modify its control registers. We also
  1881.                  * have to wait for the next vblank for that to take
  1882.                  * effect. However, since we delay enabling FBC we can
  1883.                  * assume that a vblank has passed since disabling and
  1884.                  * that we can safely alter the registers in the deferred
  1885.                  * callback.
  1886.                  *
  1887.                  * In the scenario that we go from a valid to invalid
  1888.                  * and then back to valid FBC configuration we have
  1889.                  * no strict enforcement that a vblank occurred since
  1890.                  * disabling the FBC. However, along all current pipe
  1891.                  * disabling paths we do need to wait for a vblank at
  1892.                  * some point. And we wait before enabling FBC anyway.
  1893.                  */
  1894.                 DRM_DEBUG_KMS("disabling active FBC for update\n");
  1895.                 intel_disable_fbc(dev);
  1896.         }
  1897.  
  1898.         intel_enable_fbc(crtc, 500);
  1899.         return;
  1900.  
  1901. out_disable:
  1902.         /* Multiple disables should be harmless */
  1903.         if (intel_fbc_enabled(dev)) {
  1904.                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
  1905.                 intel_disable_fbc(dev);
  1906.         }
  1907. }
  1908.  
  1909.  
  1910.  
  1911.  
  1912.  
  1913.  
  1914.  
  1915.  
  1916.  
  1917.  
  1918.  
  1919.  
  1920.  
  1921.  
  1922.  
  1923.  
  1924.  
  1925.  
  1926.  
  1927.  
  1928.  
  1929.  
  1930.  
  1931.  
  1932. static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  1933.                  int x, int y)
  1934. {
  1935.     struct drm_device *dev = crtc->dev;
  1936.     struct drm_i915_private *dev_priv = dev->dev_private;
  1937.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1938.     struct intel_framebuffer *intel_fb;
  1939.     struct drm_i915_gem_object *obj;
  1940.     int plane = intel_crtc->plane;
  1941.     unsigned long Start, Offset;
  1942.     u32 dspcntr;
  1943.     u32 reg;
  1944.  
  1945.     switch (plane) {
  1946.     case 0:
  1947.     case 1:
  1948.         break;
  1949.     default:
  1950.         DRM_ERROR("Can't update plane %d in SAREA\n", plane);
  1951.         return -EINVAL;
  1952.     }
  1953.  
  1954.     intel_fb = to_intel_framebuffer(fb);
  1955.     obj = intel_fb->obj;
  1956.  
  1957.     reg = DSPCNTR(plane);
  1958.     dspcntr = I915_READ(reg);
  1959.     /* Mask out pixel format bits in case we change it */
  1960.     dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  1961.     switch (fb->bits_per_pixel) {
  1962.     case 8:
  1963.         dspcntr |= DISPPLANE_8BPP;
  1964.         break;
  1965.     case 16:
  1966.         if (fb->depth == 15)
  1967.             dspcntr |= DISPPLANE_15_16BPP;
  1968.         else
  1969.             dspcntr |= DISPPLANE_16BPP;
  1970.         break;
  1971.     case 24:
  1972.     case 32:
  1973.         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  1974.         break;
  1975.     default:
  1976.         DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
  1977.         return -EINVAL;
  1978.     }
  1979.     if (INTEL_INFO(dev)->gen >= 4) {
  1980.         if (obj->tiling_mode != I915_TILING_NONE)
  1981.             dspcntr |= DISPPLANE_TILED;
  1982.         else
  1983.             dspcntr &= ~DISPPLANE_TILED;
  1984.     }
  1985.  
  1986.     I915_WRITE(reg, dspcntr);
  1987.  
  1988.     Start = obj->gtt_offset;
  1989.     Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
  1990.  
  1991.     DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
  1992.               Start, Offset, x, y, fb->pitch);
  1993.     I915_WRITE(DSPSTRIDE(plane), fb->pitch);
  1994.     if (INTEL_INFO(dev)->gen >= 4) {
  1995.         I915_WRITE(DSPSURF(plane), Start);
  1996.         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  1997.         I915_WRITE(DSPADDR(plane), Offset);
  1998.     } else
  1999.         I915_WRITE(DSPADDR(plane), Start + Offset);
  2000.     POSTING_READ(reg);
  2001.  
  2002.     return 0;
  2003. }
  2004.  
  2005. static int ironlake_update_plane(struct drm_crtc *crtc,
  2006.                  struct drm_framebuffer *fb, int x, int y)
  2007. {
  2008.     struct drm_device *dev = crtc->dev;
  2009.     struct drm_i915_private *dev_priv = dev->dev_private;
  2010.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2011.     struct intel_framebuffer *intel_fb;
  2012.     struct drm_i915_gem_object *obj;
  2013.     int plane = intel_crtc->plane;
  2014.     unsigned long Start, Offset;
  2015.     u32 dspcntr;
  2016.     u32 reg;
  2017.  
  2018.     switch (plane) {
  2019.     case 0:
  2020.     case 1:
  2021.         break;
  2022.     default:
  2023.         DRM_ERROR("Can't update plane %d in SAREA\n", plane);
  2024.         return -EINVAL;
  2025.     }
  2026.  
  2027.     intel_fb = to_intel_framebuffer(fb);
  2028.     obj = intel_fb->obj;
  2029.  
  2030.     reg = DSPCNTR(plane);
  2031.     dspcntr = I915_READ(reg);
  2032.     /* Mask out pixel format bits in case we change it */
  2033.     dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  2034.     switch (fb->bits_per_pixel) {
  2035.     case 8:
  2036.         dspcntr |= DISPPLANE_8BPP;
  2037.         break;
  2038.     case 16:
  2039.         if (fb->depth != 16)
  2040.             return -EINVAL;
  2041.  
  2042.         dspcntr |= DISPPLANE_16BPP;
  2043.         break;
  2044.     case 24:
  2045.     case 32:
  2046.         if (fb->depth == 24)
  2047.             dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  2048.         else if (fb->depth == 30)
  2049.             dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
  2050.         else
  2051.             return -EINVAL;
  2052.         break;
  2053.     default:
  2054.         DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
  2055.         return -EINVAL;
  2056.     }
  2057.  
  2058. //    if (obj->tiling_mode != I915_TILING_NONE)
  2059. //        dspcntr |= DISPPLANE_TILED;
  2060. //    else
  2061.         dspcntr &= ~DISPPLANE_TILED;
  2062.  
  2063.     /* must disable */
  2064.     dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2065.  
  2066.     I915_WRITE(reg, dspcntr);
  2067.  
  2068. //    Start = obj->gtt_offset;
  2069. //    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
  2070.  
  2071.     DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
  2072.               Start, Offset, x, y, fb->pitch);
  2073. //    I915_WRITE(DSPSTRIDE(plane), fb->pitch);
  2074. //    I915_WRITE(DSPSURF(plane), Start);
  2075. //    I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2076. //    I915_WRITE(DSPADDR(plane), Offset);
  2077. //    POSTING_READ(reg);
  2078.  
  2079.     return 0;
  2080. }
  2081.  
  2082. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  2083. static int
  2084. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  2085.                            int x, int y, enum mode_set_atomic state)
  2086. {
  2087.         struct drm_device *dev = crtc->dev;
  2088.         struct drm_i915_private *dev_priv = dev->dev_private;
  2089.         int ret;
  2090.  
  2091.         ret = dev_priv->display.update_plane(crtc, fb, x, y);
  2092.         if (ret)
  2093.                 return ret;
  2094.  
  2095.         intel_update_fbc(dev);
  2096.         intel_increase_pllclock(crtc);
  2097.  
  2098.         return 0;
  2099. }
  2100.  
  2101. static int
  2102. intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  2103.                     struct drm_framebuffer *old_fb)
  2104. {
  2105.         struct drm_device *dev = crtc->dev;
  2106.         struct drm_i915_master_private *master_priv;
  2107.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2108.         int ret;
  2109.  
  2110.         /* no fb bound */
  2111.         if (!crtc->fb) {
  2112.                 DRM_ERROR("No FB bound\n");
  2113.                 return 0;
  2114.         }
  2115.  
  2116.         switch (intel_crtc->plane) {
  2117.         case 0:
  2118.         case 1:
  2119.                 break;
  2120.         default:
  2121.                 DRM_ERROR("no plane for crtc\n");
  2122.                 return -EINVAL;
  2123.         }
  2124.  
  2125.         mutex_lock(&dev->struct_mutex);
  2126. //   ret = intel_pin_and_fence_fb_obj(dev,
  2127. //                    to_intel_framebuffer(crtc->fb)->obj,
  2128. //                    NULL);
  2129.         if (ret != 0) {
  2130.                 mutex_unlock(&dev->struct_mutex);
  2131.                 DRM_ERROR("pin & fence failed\n");
  2132.                 return ret;
  2133.         }
  2134.  
  2135.         if (old_fb) {
  2136.                 struct drm_i915_private *dev_priv = dev->dev_private;
  2137.                 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
  2138.  
  2139. //              wait_event(dev_priv->pending_flip_queue,
  2140. //                         atomic_read(&dev_priv->mm.wedged) ||
  2141. //                         atomic_read(&obj->pending_flip) == 0);
  2142.  
  2143.                 /* Big Hammer, we also need to ensure that any pending
  2144.                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  2145.                  * current scanout is retired before unpinning the old
  2146.                  * framebuffer.
  2147.                  *
  2148.                  * This should only fail upon a hung GPU, in which case we
  2149.                  * can safely continue.
  2150.                  */
  2151. //       ret = i915_gem_object_finish_gpu(obj);
  2152.                 (void) ret;
  2153.         }
  2154.  
  2155.         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
  2156.                                          LEAVE_ATOMIC_MODE_SET);
  2157.         if (ret) {
  2158. //       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
  2159.                 mutex_unlock(&dev->struct_mutex);
  2160.                 DRM_ERROR("failed to update base address\n");
  2161.                 return ret;
  2162.         }
  2163.  
  2164.         if (old_fb) {
  2165. //       intel_wait_for_vblank(dev, intel_crtc->pipe);
  2166. //       i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
  2167.         }
  2168.  
  2169.         mutex_unlock(&dev->struct_mutex);
  2170.  
  2171. //      if (!dev->primary->master)
  2172. //              return 0;
  2173.  
  2174. //      master_priv = dev->primary->master->driver_priv;
  2175. //      if (!master_priv->sarea_priv)
  2176. //              return 0;
  2177.  
  2178. //      if (intel_crtc->pipe) {
  2179. //              master_priv->sarea_priv->pipeB_x = x;
  2180. //              master_priv->sarea_priv->pipeB_y = y;
  2181. //      } else {
  2182. //              master_priv->sarea_priv->pipeA_x = x;
  2183. //              master_priv->sarea_priv->pipeA_y = y;
  2184. //      }
  2185.  
  2186.         return 0;
  2187. }
  2188.  
  2189. static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
  2190. {
  2191.         struct drm_device *dev = crtc->dev;
  2192.         struct drm_i915_private *dev_priv = dev->dev_private;
  2193.         u32 dpa_ctl;
  2194.  
  2195.         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
  2196.         dpa_ctl = I915_READ(DP_A);
  2197.         dpa_ctl &= ~DP_PLL_FREQ_MASK;
  2198.  
  2199.         if (clock < 200000) {
  2200.                 u32 temp;
  2201.                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
  2202.                 /* workaround for 160Mhz:
  2203.                    1) program 0x4600c bits 15:0 = 0x8124
  2204.                    2) program 0x46010 bit 0 = 1
  2205.                    3) program 0x46034 bit 24 = 1
  2206.                    4) program 0x64000 bit 14 = 1
  2207.                    */
  2208.                 temp = I915_READ(0x4600c);
  2209.                 temp &= 0xffff0000;
  2210.                 I915_WRITE(0x4600c, temp | 0x8124);
  2211.  
  2212.                 temp = I915_READ(0x46010);
  2213.                 I915_WRITE(0x46010, temp | 1);
  2214.  
  2215.                 temp = I915_READ(0x46034);
  2216.                 I915_WRITE(0x46034, temp | (1 << 24));
  2217.         } else {
  2218.                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
  2219.         }
  2220.         I915_WRITE(DP_A, dpa_ctl);
  2221.  
  2222.         POSTING_READ(DP_A);
  2223.         udelay(500);
  2224. }
  2225.  
  2226. static void intel_fdi_normal_train(struct drm_crtc *crtc)
  2227. {
  2228.         struct drm_device *dev = crtc->dev;
  2229.         struct drm_i915_private *dev_priv = dev->dev_private;
  2230.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2231.         int pipe = intel_crtc->pipe;
  2232.         u32 reg, temp;
  2233.  
  2234.         /* enable normal train */
  2235.         reg = FDI_TX_CTL(pipe);
  2236.         temp = I915_READ(reg);
  2237.         if (IS_IVYBRIDGE(dev)) {
  2238.                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  2239.                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  2240.         } else {
  2241.                 temp &= ~FDI_LINK_TRAIN_NONE;
  2242.                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  2243.         }
  2244.         I915_WRITE(reg, temp);
  2245.  
  2246.         reg = FDI_RX_CTL(pipe);
  2247.         temp = I915_READ(reg);
  2248.         if (HAS_PCH_CPT(dev)) {
  2249.                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2250.                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  2251.         } else {
  2252.                 temp &= ~FDI_LINK_TRAIN_NONE;
  2253.                 temp |= FDI_LINK_TRAIN_NONE;
  2254.         }
  2255.         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  2256.  
  2257.         /* wait one idle pattern time */
  2258.         POSTING_READ(reg);
  2259.         udelay(1000);
  2260.  
  2261.         /* IVB wants error correction enabled */
  2262.         if (IS_IVYBRIDGE(dev))
  2263.                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  2264.                            FDI_FE_ERRC_ENABLE);
  2265. }
  2266.  
  2267. static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
  2268. {
  2269.         struct drm_i915_private *dev_priv = dev->dev_private;
  2270.         u32 flags = I915_READ(SOUTH_CHICKEN1);
  2271.  
  2272.         flags |= FDI_PHASE_SYNC_OVR(pipe);
  2273.         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
  2274.         flags |= FDI_PHASE_SYNC_EN(pipe);
  2275.         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
  2276.         POSTING_READ(SOUTH_CHICKEN1);
  2277. }
  2278.  
  2279. /* The FDI link training functions for ILK/Ibexpeak. */
  2280. static void ironlake_fdi_link_train(struct drm_crtc *crtc)
  2281. {
  2282.     struct drm_device *dev = crtc->dev;
  2283.     struct drm_i915_private *dev_priv = dev->dev_private;
  2284.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2285.     int pipe = intel_crtc->pipe;
  2286.     int plane = intel_crtc->plane;
  2287.     u32 reg, temp, tries;
  2288.  
  2289.     /* FDI needs bits from pipe & plane first */
  2290.     assert_pipe_enabled(dev_priv, pipe);
  2291.     assert_plane_enabled(dev_priv, plane);
  2292.  
  2293.     /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2294.        for train result */
  2295.     reg = FDI_RX_IMR(pipe);
  2296.     temp = I915_READ(reg);
  2297.     temp &= ~FDI_RX_SYMBOL_LOCK;
  2298.     temp &= ~FDI_RX_BIT_LOCK;
  2299.     I915_WRITE(reg, temp);
  2300.     I915_READ(reg);
  2301.     udelay(150);
  2302.  
  2303.     /* enable CPU FDI TX and PCH FDI RX */
  2304.     reg = FDI_TX_CTL(pipe);
  2305.     temp = I915_READ(reg);
  2306.     temp &= ~(7 << 19);
  2307.     temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2308.     temp &= ~FDI_LINK_TRAIN_NONE;
  2309.     temp |= FDI_LINK_TRAIN_PATTERN_1;
  2310.     I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2311.  
  2312.     reg = FDI_RX_CTL(pipe);
  2313.     temp = I915_READ(reg);
  2314.     temp &= ~FDI_LINK_TRAIN_NONE;
  2315.     temp |= FDI_LINK_TRAIN_PATTERN_1;
  2316.     I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2317.  
  2318.     POSTING_READ(reg);
  2319.     udelay(150);
  2320.  
  2321.     /* Ironlake workaround, enable clock pointer after FDI enable*/
  2322.     if (HAS_PCH_IBX(dev)) {
  2323.         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  2324.         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  2325.                FDI_RX_PHASE_SYNC_POINTER_EN);
  2326.     }
  2327.  
  2328.     reg = FDI_RX_IIR(pipe);
  2329.     for (tries = 0; tries < 5; tries++) {
  2330.         temp = I915_READ(reg);
  2331.         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2332.  
  2333.         if ((temp & FDI_RX_BIT_LOCK)) {
  2334.             DRM_DEBUG_KMS("FDI train 1 done.\n");
  2335.             I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2336.             break;
  2337.         }
  2338.     }
  2339.     if (tries == 5)
  2340.         DRM_ERROR("FDI train 1 fail!\n");
  2341.  
  2342.     /* Train 2 */
  2343.     reg = FDI_TX_CTL(pipe);
  2344.     temp = I915_READ(reg);
  2345.     temp &= ~FDI_LINK_TRAIN_NONE;
  2346.     temp |= FDI_LINK_TRAIN_PATTERN_2;
  2347.     I915_WRITE(reg, temp);
  2348.  
  2349.     reg = FDI_RX_CTL(pipe);
  2350.     temp = I915_READ(reg);
  2351.     temp &= ~FDI_LINK_TRAIN_NONE;
  2352.     temp |= FDI_LINK_TRAIN_PATTERN_2;
  2353.     I915_WRITE(reg, temp);
  2354.  
  2355.     POSTING_READ(reg);
  2356.     udelay(150);
  2357.  
  2358.     reg = FDI_RX_IIR(pipe);
  2359.     for (tries = 0; tries < 5; tries++) {
  2360.         temp = I915_READ(reg);
  2361.         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2362.  
  2363.         if (temp & FDI_RX_SYMBOL_LOCK) {
  2364.             I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2365.             DRM_DEBUG_KMS("FDI train 2 done.\n");
  2366.             break;
  2367.         }
  2368.     }
  2369.     if (tries == 5)
  2370.         DRM_ERROR("FDI train 2 fail!\n");
  2371.  
  2372.     DRM_DEBUG_KMS("FDI train done\n");
  2373.  
  2374. }
  2375.  
  2376. static const int snb_b_fdi_train_param [] = {
  2377.     FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  2378.     FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  2379.     FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  2380.     FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  2381. };
  2382.  
  2383. /* The FDI link training functions for SNB/Cougarpoint. */
  2384. static void gen6_fdi_link_train(struct drm_crtc *crtc)
  2385. {
  2386.     struct drm_device *dev = crtc->dev;
  2387.     struct drm_i915_private *dev_priv = dev->dev_private;
  2388.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2389.     int pipe = intel_crtc->pipe;
  2390.     u32 reg, temp, i;
  2391.  
  2392.     /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2393.        for train result */
  2394.     reg = FDI_RX_IMR(pipe);
  2395.     temp = I915_READ(reg);
  2396.     temp &= ~FDI_RX_SYMBOL_LOCK;
  2397.     temp &= ~FDI_RX_BIT_LOCK;
  2398.     I915_WRITE(reg, temp);
  2399.  
  2400.     POSTING_READ(reg);
  2401.     udelay(150);
  2402.  
  2403.     /* enable CPU FDI TX and PCH FDI RX */
  2404.     reg = FDI_TX_CTL(pipe);
  2405.     temp = I915_READ(reg);
  2406.     temp &= ~(7 << 19);
  2407.     temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2408.     temp &= ~FDI_LINK_TRAIN_NONE;
  2409.     temp |= FDI_LINK_TRAIN_PATTERN_1;
  2410.     temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2411.     /* SNB-B */
  2412.     temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2413.     I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2414.  
  2415.     reg = FDI_RX_CTL(pipe);
  2416.     temp = I915_READ(reg);
  2417.     if (HAS_PCH_CPT(dev)) {
  2418.         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2419.         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2420.     } else {
  2421.         temp &= ~FDI_LINK_TRAIN_NONE;
  2422.         temp |= FDI_LINK_TRAIN_PATTERN_1;
  2423.     }
  2424.     I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2425.  
  2426.     POSTING_READ(reg);
  2427.     udelay(150);
  2428.  
  2429.     if (HAS_PCH_CPT(dev))
  2430.         cpt_phase_pointer_enable(dev, pipe);
  2431.  
  2432.     for (i = 0; i < 4; i++ ) {
  2433.         reg = FDI_TX_CTL(pipe);
  2434.         temp = I915_READ(reg);
  2435.         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2436.         temp |= snb_b_fdi_train_param[i];
  2437.         I915_WRITE(reg, temp);
  2438.  
  2439.         POSTING_READ(reg);
  2440.         udelay(500);
  2441.  
  2442.         reg = FDI_RX_IIR(pipe);
  2443.         temp = I915_READ(reg);
  2444.         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2445.  
  2446.         if (temp & FDI_RX_BIT_LOCK) {
  2447.             I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2448.             DRM_DEBUG_KMS("FDI train 1 done.\n");
  2449.             break;
  2450.         }
  2451.     }
  2452.     if (i == 4)
  2453.         DRM_ERROR("FDI train 1 fail!\n");
  2454.  
  2455.     /* Train 2 */
  2456.     reg = FDI_TX_CTL(pipe);
  2457.     temp = I915_READ(reg);
  2458.     temp &= ~FDI_LINK_TRAIN_NONE;
  2459.     temp |= FDI_LINK_TRAIN_PATTERN_2;
  2460.     if (IS_GEN6(dev)) {
  2461.         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2462.         /* SNB-B */
  2463.         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2464.     }
  2465.     I915_WRITE(reg, temp);
  2466.  
  2467.     reg = FDI_RX_CTL(pipe);
  2468.     temp = I915_READ(reg);
  2469.     if (HAS_PCH_CPT(dev)) {
  2470.         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2471.         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  2472.     } else {
  2473.         temp &= ~FDI_LINK_TRAIN_NONE;
  2474.         temp |= FDI_LINK_TRAIN_PATTERN_2;
  2475.     }
  2476.     I915_WRITE(reg, temp);
  2477.  
  2478.     POSTING_READ(reg);
  2479.     udelay(150);
  2480.  
  2481.     for (i = 0; i < 4; i++ ) {
  2482.         reg = FDI_TX_CTL(pipe);
  2483.         temp = I915_READ(reg);
  2484.         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2485.         temp |= snb_b_fdi_train_param[i];
  2486.         I915_WRITE(reg, temp);
  2487.  
  2488.         POSTING_READ(reg);
  2489.         udelay(500);
  2490.  
  2491.         reg = FDI_RX_IIR(pipe);
  2492.         temp = I915_READ(reg);
  2493.         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2494.  
  2495.         if (temp & FDI_RX_SYMBOL_LOCK) {
  2496.             I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2497.             DRM_DEBUG_KMS("FDI train 2 done.\n");
  2498.             break;
  2499.         }
  2500.     }
  2501.     if (i == 4)
  2502.         DRM_ERROR("FDI train 2 fail!\n");
  2503.  
  2504.     DRM_DEBUG_KMS("FDI train done.\n");
  2505. }
  2506.  
  2507. /* Manual link training for Ivy Bridge A0 parts */
  2508. static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
  2509. {
  2510.     struct drm_device *dev = crtc->dev;
  2511.     struct drm_i915_private *dev_priv = dev->dev_private;
  2512.     struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2513.     int pipe = intel_crtc->pipe;
  2514.     u32 reg, temp, i;
  2515.  
  2516.     /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2517.        for train result */
  2518.     reg = FDI_RX_IMR(pipe);
  2519.     temp = I915_READ(reg);
  2520.     temp &= ~FDI_RX_SYMBOL_LOCK;
  2521.     temp &= ~FDI_RX_BIT_LOCK;
  2522.     I915_WRITE(reg, temp);
  2523.  
  2524.     POSTING_READ(reg);
  2525.     udelay(150);
  2526.  
  2527.     /* enable CPU FDI TX and PCH FDI RX */
  2528.     reg = FDI_TX_CTL(pipe);
  2529.     temp = I915_READ(reg);
  2530.     temp &= ~(7 << 19);
  2531.     temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2532.     temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  2533.     temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  2534.     temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2535.     temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2536.     I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2537.  
  2538.     reg = FDI_RX_CTL(pipe);
  2539.     temp = I915_READ(reg);
  2540.     temp &= ~FDI_LINK_TRAIN_AUTO;
  2541.     temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2542.     temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2543.     I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2544.  
  2545.     POSTING_READ(reg);
  2546.     udelay(150);
  2547.  
  2548.     if (HAS_PCH_CPT(dev))
  2549.         cpt_phase_pointer_enable(dev, pipe);
  2550.  
  2551.     for (i = 0; i < 4; i++ ) {
  2552.         reg = FDI_TX_CTL(pipe);
  2553.         temp = I915_READ(reg);
  2554.         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2555.         temp |= snb_b_fdi_train_param[i];
  2556.         I915_WRITE(reg, temp);
  2557.  
  2558.         POSTING_READ(reg);
  2559.         udelay(500);
  2560.  
  2561.         reg = FDI_RX_IIR(pipe);
  2562.         temp = I915_READ(reg);
  2563.         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2564.  
  2565.         if (temp & FDI_RX_BIT_LOCK ||
  2566.             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  2567.             I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2568.             DRM_DEBUG_KMS("FDI train 1 done.\n");
  2569.             break;
  2570.         }
  2571.     }
  2572.     if (i == 4)
  2573.         DRM_ERROR("FDI train 1 fail!\n");
  2574.  
  2575.     /* Train 2 */
  2576.     reg = FDI_TX_CTL(pipe);
  2577.     temp = I915_READ(reg);
  2578.     temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  2579.     temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  2580.     temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2581.     temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2582.     I915_WRITE(reg, temp);
  2583.  
  2584.     reg = FDI_RX_CTL(pipe);
  2585.     temp = I915_READ(reg);
  2586.     temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2587.     temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  2588.     I915_WRITE(reg, temp);
  2589.  
  2590.     POSTING_READ(reg);
  2591.     udelay(150);
  2592.  
  2593.     for (i = 0; i < 4; i++ ) {
  2594.         reg = FDI_TX_CTL(pipe);
  2595.         temp = I915_READ(reg);
  2596.         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2597.         temp |= snb_b_fdi_train_param[i];
  2598.         I915_WRITE(reg, temp);
  2599.  
  2600.         POSTING_READ(reg);
  2601.         udelay(500);
  2602.  
  2603.         reg = FDI_RX_IIR(pipe);
  2604.         temp = I915_READ(reg);
  2605.         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2606.  
  2607.         if (temp & FDI_RX_SYMBOL_LOCK) {
  2608.             I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2609.             DRM_DEBUG_KMS("FDI train 2 done.\n");
  2610.             break;
  2611.         }
  2612.     }
  2613.     if (i == 4)
  2614.         DRM_ERROR("FDI train 2 fail!\n");
  2615.  
  2616.     DRM_DEBUG_KMS("FDI train done.\n");
  2617. }
  2618.  
  2619. static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
  2620. {
  2621.         struct drm_device *dev = crtc->dev;
  2622.         struct drm_i915_private *dev_priv = dev->dev_private;
  2623.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2624.         int pipe = intel_crtc->pipe;
  2625.         u32 reg, temp;
  2626.  
  2627.         /* Write the TU size bits so error detection works */
  2628.         I915_WRITE(FDI_RX_TUSIZE1(pipe),
  2629.                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  2630.  
  2631.         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  2632.         reg = FDI_RX_CTL(pipe);
  2633.         temp = I915_READ(reg);
  2634.         temp &= ~((0x7 << 19) | (0x7 << 16));
  2635.         temp |= (intel_crtc->fdi_lanes - 1) << 19;
  2636.         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
  2637.         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  2638.  
  2639.         POSTING_READ(reg);
  2640.         udelay(200);
  2641.  
  2642.         /* Switch from Rawclk to PCDclk */
  2643.         temp = I915_READ(reg);
  2644.         I915_WRITE(reg, temp | FDI_PCDCLK);
  2645.  
  2646.         POSTING_READ(reg);
  2647.         udelay(200);
  2648.  
  2649.         /* Enable CPU FDI TX PLL, always on for Ironlake */
  2650.         reg = FDI_TX_CTL(pipe);
  2651.         temp = I915_READ(reg);
  2652.         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  2653.                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  2654.  
  2655.                 POSTING_READ(reg);
  2656.                 udelay(100);
  2657.         }
  2658. }
  2659.  
  2660. static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
  2661. {
  2662.         struct drm_i915_private *dev_priv = dev->dev_private;
  2663.         u32 flags = I915_READ(SOUTH_CHICKEN1);
  2664.  
  2665.         flags &= ~(FDI_PHASE_SYNC_EN(pipe));
  2666.         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
  2667.         flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
  2668.         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
  2669.         POSTING_READ(SOUTH_CHICKEN1);
  2670. }
  2671. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  2672. {
  2673.         struct drm_device *dev = crtc->dev;
  2674.         struct drm_i915_private *dev_priv = dev->dev_private;
  2675.         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2676.         int pipe = intel_crtc->pipe;
  2677.         u32 reg, temp;
  2678.  
  2679.         /* disable CPU FDI tx and PCH FDI rx */
  2680.         reg = FDI_TX_CTL(pipe);
  2681.         temp = I915_READ(reg);
  2682.         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  2683.         POSTING_READ(reg);
  2684.  
  2685.         reg = FDI_RX_CTL(pipe);
  2686.         temp = I915_READ(reg);
  2687.         temp &= ~(0x7 << 16);
  2688.         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
  2689.         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  2690.  
  2691.         POSTING_READ(reg);
  2692.         udelay(100);
  2693.  
  2694.         /* Ironlake workaround, disable clock pointer after downing FDI */
  2695.         if (HAS_PCH_IBX(dev)) {
  2696.                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  2697.                 I915_WRITE(FDI_RX_CHICKEN(pipe),
  2698.                            I915_READ(FDI_RX_CHICKEN(pipe) &
  2699.                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
  2700.         } else if (HAS_PCH_CPT(dev)) {
  2701.                 cpt_phase_pointer_disable(dev, pipe);
  2702.         }
  2703.  
  2704.         /* still set train pattern 1 */
  2705.         reg = FDI_TX_CTL(pipe);
  2706.         temp = I915_READ(reg);
  2707.         temp &= ~FDI_LINK_TRAIN_NONE;
  2708.         temp |= FDI_LINK_TRAIN_PATTERN_1;
  2709.         I915_WRITE(reg, temp);
  2710.  
  2711.         reg = FDI_RX_CTL(pipe);
  2712.         temp = I915_READ(reg);
  2713.         if (HAS_PCH_CPT(dev)) {
  2714.                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2715.                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2716.         } else {
  2717.                 temp &= ~FDI_LINK_TRAIN_NONE;
  2718.                 temp |= FDI_LINK_TRAIN_PATTERN_1;
  2719.         }
  2720.         /* BPC in FDI rx is consistent with that in PIPECONF */
  2721.         temp &= ~(0x07 << 16);
  2722.         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
  2723.         I915_WRITE(reg, temp);
  2724.  
  2725.         POSTING_READ(reg);
  2726.         udelay(100);
  2727. }
  2728.  
  2729. /*
  2730.  * When we disable a pipe, we need to clear any pending scanline wait events
  2731.  * to avoid hanging the ring, which we assume we are waiting on.
  2732.  */
  2733. static void intel_clear_scanline_wait(struct drm_device *dev)
  2734. {
  2735.         struct drm_i915_private *dev_priv = dev->dev_private;
  2736.         struct intel_ring_buffer *ring;
  2737.         u32 tmp;
  2738.  
  2739.         if (IS_GEN2(dev))
  2740.                 /* Can't break the hang on i8xx */
  2741.                 return;
  2742.  
  2743.         ring = LP_RING(dev_priv);
  2744.         tmp = I915_READ_CTL(ring);