Subversion Repositories Kolibri OS

Rev

Rev 3482 | Rev 4104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3482 Rev 3746
Line 23... Line 23...
23
 * Authors:
23
 * Authors:
24
 *    Eugeni Dodonov 
24
 *    Eugeni Dodonov 
25
 *
25
 *
26
 */
26
 */
Line 27... Line -...
27
 
-
 
28
#define iowrite32(v, addr)      writel((v), (addr))
-
 
29
#define ioread32(addr)          readl(addr)
-
 
30
 
27
 
31
//#include 
28
//#include 
32
#include "i915_drv.h"
29
#include "i915_drv.h"
33
#include "intel_drv.h"
30
#include "intel_drv.h"
34
#include 
31
#include 
Line 1323... Line 1320...
1323
	int ignore_plane_sr, ignore_cursor_sr;
1320
	int ignore_plane_sr, ignore_cursor_sr;
1324
	unsigned int enabled = 0;
1321
	unsigned int enabled = 0;
Line 1325... Line 1322...
1325
 
1322
 
Line 1326... Line 1323...
1326
	vlv_update_drain_latency(dev);
1323
	vlv_update_drain_latency(dev);
1327
 
1324
 
1328
	if (g4x_compute_wm0(dev, 0,
1325
	if (g4x_compute_wm0(dev, PIPE_A,
1329
			    &valleyview_wm_info, latency_ns,
1326
			    &valleyview_wm_info, latency_ns,
1330
			    &valleyview_cursor_wm_info, latency_ns,
1327
			    &valleyview_cursor_wm_info, latency_ns,
Line 1331... Line 1328...
1331
			    &planea_wm, &cursora_wm))
1328
			    &planea_wm, &cursora_wm))
1332
		enabled |= 1;
1329
		enabled |= 1 << PIPE_A;
1333
 
1330
 
1334
	if (g4x_compute_wm0(dev, 1,
1331
	if (g4x_compute_wm0(dev, PIPE_B,
1335
			    &valleyview_wm_info, latency_ns,
1332
			    &valleyview_wm_info, latency_ns,
Line 1336... Line 1333...
1336
			    &valleyview_cursor_wm_info, latency_ns,
1333
			    &valleyview_cursor_wm_info, latency_ns,
1337
			    &planeb_wm, &cursorb_wm))
1334
			    &planeb_wm, &cursorb_wm))
1338
		enabled |= 2;
1335
		enabled |= 1 << PIPE_B;
1339
 
1336
 
Line 1379... Line 1376...
1379
	struct drm_i915_private *dev_priv = dev->dev_private;
1376
	struct drm_i915_private *dev_priv = dev->dev_private;
1380
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1377
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1381
	int plane_sr, cursor_sr;
1378
	int plane_sr, cursor_sr;
1382
	unsigned int enabled = 0;
1379
	unsigned int enabled = 0;
Line 1383... Line 1380...
1383
 
1380
 
1384
	if (g4x_compute_wm0(dev, 0,
1381
	if (g4x_compute_wm0(dev, PIPE_A,
1385
			    &g4x_wm_info, latency_ns,
1382
			    &g4x_wm_info, latency_ns,
1386
			    &g4x_cursor_wm_info, latency_ns,
1383
			    &g4x_cursor_wm_info, latency_ns,
1387
			    &planea_wm, &cursora_wm))
1384
			    &planea_wm, &cursora_wm))
Line 1388... Line 1385...
1388
		enabled |= 1;
1385
		enabled |= 1 << PIPE_A;
1389
 
1386
 
1390
	if (g4x_compute_wm0(dev, 1,
1387
	if (g4x_compute_wm0(dev, PIPE_B,
1391
			    &g4x_wm_info, latency_ns,
1388
			    &g4x_wm_info, latency_ns,
1392
			    &g4x_cursor_wm_info, latency_ns,
1389
			    &g4x_cursor_wm_info, latency_ns,
Line 1393... Line 1390...
1393
			    &planeb_wm, &cursorb_wm))
1390
			    &planeb_wm, &cursorb_wm))
1394
		enabled |= 2;
1391
		enabled |= 1 << PIPE_B;
1395
 
1392
 
1396
	if (single_plane_enabled(enabled) &&
1393
	if (single_plane_enabled(enabled) &&
Line 1738... Line 1735...
1738
	struct drm_i915_private *dev_priv = dev->dev_private;
1735
	struct drm_i915_private *dev_priv = dev->dev_private;
1739
	int fbc_wm, plane_wm, cursor_wm;
1736
	int fbc_wm, plane_wm, cursor_wm;
1740
	unsigned int enabled;
1737
	unsigned int enabled;
Line 1741... Line 1738...
1741
 
1738
 
1742
	enabled = 0;
1739
	enabled = 0;
1743
	if (g4x_compute_wm0(dev, 0,
1740
	if (g4x_compute_wm0(dev, PIPE_A,
1744
			    &ironlake_display_wm_info,
1741
			    &ironlake_display_wm_info,
1745
			    ILK_LP0_PLANE_LATENCY,
1742
			    ILK_LP0_PLANE_LATENCY,
1746
			    &ironlake_cursor_wm_info,
1743
			    &ironlake_cursor_wm_info,
1747
			    ILK_LP0_CURSOR_LATENCY,
1744
			    ILK_LP0_CURSOR_LATENCY,
1748
			    &plane_wm, &cursor_wm)) {
1745
			    &plane_wm, &cursor_wm)) {
1749
		I915_WRITE(WM0_PIPEA_ILK,
1746
		I915_WRITE(WM0_PIPEA_ILK,
1750
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1747
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1751
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1748
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1752
			      " plane %d, " "cursor: %d\n",
1749
			      " plane %d, " "cursor: %d\n",
1753
			      plane_wm, cursor_wm);
1750
			      plane_wm, cursor_wm);
1754
		enabled |= 1;
1751
		enabled |= 1 << PIPE_A;
Line 1755... Line 1752...
1755
	}
1752
	}
1756
 
1753
 
1757
	if (g4x_compute_wm0(dev, 1,
1754
	if (g4x_compute_wm0(dev, PIPE_B,
1758
			    &ironlake_display_wm_info,
1755
			    &ironlake_display_wm_info,
1759
			    ILK_LP0_PLANE_LATENCY,
1756
			    ILK_LP0_PLANE_LATENCY,
1760
			    &ironlake_cursor_wm_info,
1757
			    &ironlake_cursor_wm_info,
1761
			    ILK_LP0_CURSOR_LATENCY,
1758
			    ILK_LP0_CURSOR_LATENCY,
1762
			    &plane_wm, &cursor_wm)) {
1759
			    &plane_wm, &cursor_wm)) {
1763
		I915_WRITE(WM0_PIPEB_ILK,
1760
		I915_WRITE(WM0_PIPEB_ILK,
1764
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1761
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1765
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1762
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1766
			      " plane %d, cursor: %d\n",
1763
			      " plane %d, cursor: %d\n",
1767
			      plane_wm, cursor_wm);
1764
			      plane_wm, cursor_wm);
Line 1768... Line 1765...
1768
		enabled |= 2;
1765
		enabled |= 1 << PIPE_B;
1769
	}
1766
	}
1770
 
1767
 
Line 1823... Line 1820...
1823
	u32 val;
1820
	u32 val;
1824
	int fbc_wm, plane_wm, cursor_wm;
1821
	int fbc_wm, plane_wm, cursor_wm;
1825
	unsigned int enabled;
1822
	unsigned int enabled;
Line 1826... Line 1823...
1826
 
1823
 
1827
	enabled = 0;
1824
	enabled = 0;
1828
	if (g4x_compute_wm0(dev, 0,
1825
	if (g4x_compute_wm0(dev, PIPE_A,
1829
			    &sandybridge_display_wm_info, latency,
1826
			    &sandybridge_display_wm_info, latency,
1830
			    &sandybridge_cursor_wm_info, latency,
1827
			    &sandybridge_cursor_wm_info, latency,
1831
			    &plane_wm, &cursor_wm)) {
1828
			    &plane_wm, &cursor_wm)) {
1832
		val = I915_READ(WM0_PIPEA_ILK);
1829
		val = I915_READ(WM0_PIPEA_ILK);
1833
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1830
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1834
		I915_WRITE(WM0_PIPEA_ILK, val |
1831
		I915_WRITE(WM0_PIPEA_ILK, val |
1835
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1832
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1836
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1833
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1837
			      " plane %d, " "cursor: %d\n",
1834
			      " plane %d, " "cursor: %d\n",
1838
			      plane_wm, cursor_wm);
1835
			      plane_wm, cursor_wm);
1839
		enabled |= 1;
1836
		enabled |= 1 << PIPE_A;
Line 1840... Line 1837...
1840
	}
1837
	}
1841
 
1838
 
1842
	if (g4x_compute_wm0(dev, 1,
1839
	if (g4x_compute_wm0(dev, PIPE_B,
1843
			    &sandybridge_display_wm_info, latency,
1840
			    &sandybridge_display_wm_info, latency,
1844
			    &sandybridge_cursor_wm_info, latency,
1841
			    &sandybridge_cursor_wm_info, latency,
1845
			    &plane_wm, &cursor_wm)) {
1842
			    &plane_wm, &cursor_wm)) {
1846
		val = I915_READ(WM0_PIPEB_ILK);
1843
		val = I915_READ(WM0_PIPEB_ILK);
1847
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1844
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1848
		I915_WRITE(WM0_PIPEB_ILK, val |
1845
		I915_WRITE(WM0_PIPEB_ILK, val |
1849
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1846
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1850
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1847
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1851
			      " plane %d, cursor: %d\n",
1848
			      " plane %d, cursor: %d\n",
1852
			      plane_wm, cursor_wm);
1849
			      plane_wm, cursor_wm);
Line 1853... Line 1850...
1853
		enabled |= 2;
1850
		enabled |= 1 << PIPE_B;
1854
	}
1851
	}
1855
 
1852
 
Line 1926... Line 1923...
1926
	int fbc_wm, plane_wm, cursor_wm;
1923
	int fbc_wm, plane_wm, cursor_wm;
1927
	int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1924
	int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1928
	unsigned int enabled;
1925
	unsigned int enabled;
Line 1929... Line 1926...
1929
 
1926
 
1930
	enabled = 0;
1927
	enabled = 0;
1931
	if (g4x_compute_wm0(dev, 0,
1928
	if (g4x_compute_wm0(dev, PIPE_A,
1932
			    &sandybridge_display_wm_info, latency,
1929
			    &sandybridge_display_wm_info, latency,
1933
			    &sandybridge_cursor_wm_info, latency,
1930
			    &sandybridge_cursor_wm_info, latency,
1934
			    &plane_wm, &cursor_wm)) {
1931
			    &plane_wm, &cursor_wm)) {
1935
		val = I915_READ(WM0_PIPEA_ILK);
1932
		val = I915_READ(WM0_PIPEA_ILK);
1936
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1933
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1937
		I915_WRITE(WM0_PIPEA_ILK, val |
1934
		I915_WRITE(WM0_PIPEA_ILK, val |
1938
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1935
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1939
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1936
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1940
			      " plane %d, " "cursor: %d\n",
1937
			      " plane %d, " "cursor: %d\n",
1941
			      plane_wm, cursor_wm);
1938
			      plane_wm, cursor_wm);
1942
		enabled |= 1;
1939
		enabled |= 1 << PIPE_A;
Line 1943... Line 1940...
1943
	}
1940
	}
1944
 
1941
 
1945
	if (g4x_compute_wm0(dev, 1,
1942
	if (g4x_compute_wm0(dev, PIPE_B,
1946
			    &sandybridge_display_wm_info, latency,
1943
			    &sandybridge_display_wm_info, latency,
1947
			    &sandybridge_cursor_wm_info, latency,
1944
			    &sandybridge_cursor_wm_info, latency,
1948
			    &plane_wm, &cursor_wm)) {
1945
			    &plane_wm, &cursor_wm)) {
1949
		val = I915_READ(WM0_PIPEB_ILK);
1946
		val = I915_READ(WM0_PIPEB_ILK);
1950
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1947
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1951
		I915_WRITE(WM0_PIPEB_ILK, val |
1948
		I915_WRITE(WM0_PIPEB_ILK, val |
1952
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1949
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1953
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1950
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1954
			      " plane %d, cursor: %d\n",
1951
			      " plane %d, cursor: %d\n",
1955
			      plane_wm, cursor_wm);
1952
			      plane_wm, cursor_wm);
Line 1956... Line 1953...
1956
		enabled |= 2;
1953
		enabled |= 1 << PIPE_B;
1957
	}
1954
	}
1958
 
1955
 
1959
	if (g4x_compute_wm0(dev, 2,
1956
	if (g4x_compute_wm0(dev, PIPE_C,
1960
			    &sandybridge_display_wm_info, latency,
1957
			    &sandybridge_display_wm_info, latency,
1961
			    &sandybridge_cursor_wm_info, latency,
1958
			    &sandybridge_cursor_wm_info, latency,
1962
			    &plane_wm, &cursor_wm)) {
1959
			    &plane_wm, &cursor_wm)) {
1963
		val = I915_READ(WM0_PIPEC_IVB);
1960
		val = I915_READ(WM0_PIPEC_IVB);
1964
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1961
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1965
		I915_WRITE(WM0_PIPEC_IVB, val |
1962
		I915_WRITE(WM0_PIPEC_IVB, val |
1966
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1963
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1967
		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1964
		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1968
			      " plane %d, cursor: %d\n",
1965
			      " plane %d, cursor: %d\n",
Line 1969... Line 1966...
1969
			      plane_wm, cursor_wm);
1966
			      plane_wm, cursor_wm);
1970
		enabled |= 3;
1967
		enabled |= 1 << PIPE_C;
1971
	}
1968
	}
Line 2482... Line 2479...
2482
	WARN_ON(val < dev_priv->rps.min_delay);
2479
	WARN_ON(val < dev_priv->rps.min_delay);
Line 2483... Line 2480...
2483
 
2480
 
2484
	if (val == dev_priv->rps.cur_delay)
2481
	if (val == dev_priv->rps.cur_delay)
Line -... Line 2482...
-
 
2482
		return;
-
 
2483
 
-
 
2484
	if (IS_HASWELL(dev))
-
 
2485
		I915_WRITE(GEN6_RPNSWREQ,
2485
		return;
2486
			   HSW_FREQUENCY(val));
2486
 
2487
	else
2487
	I915_WRITE(GEN6_RPNSWREQ,
2488
	I915_WRITE(GEN6_RPNSWREQ,
2488
		   GEN6_FREQUENCY(val) |
2489
		   GEN6_FREQUENCY(val) |
Line 2576... Line 2577...
2576
	gen6_gt_force_wake_get(dev_priv);
2577
	gen6_gt_force_wake_get(dev_priv);
Line 2577... Line 2578...
2577
 
2578
 
2578
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2579
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
Line 2579... Line 2580...
2579
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2580
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2580
 
2581
 
2581
	/* In units of 100MHz */
2582
	/* In units of 50MHz */
2582
	dev_priv->rps.max_delay = rp_state_cap & 0xff;
2583
	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
Line 2583... Line 2584...
2583
	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2584
	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2584
	dev_priv->rps.cur_delay = 0;
2585
	dev_priv->rps.cur_delay = 0;
Line 2623... Line 2624...
2623
	I915_WRITE(GEN6_RC_CONTROL,
2624
	I915_WRITE(GEN6_RC_CONTROL,
2624
		   rc6_mask |
2625
		   rc6_mask |
2625
		   GEN6_RC_CTL_EI_MODE(1) |
2626
		   GEN6_RC_CTL_EI_MODE(1) |
2626
		   GEN6_RC_CTL_HW_ENABLE);
2627
		   GEN6_RC_CTL_HW_ENABLE);
Line -... Line 2628...
-
 
2628
 
-
 
2629
	if (IS_HASWELL(dev)) {
-
 
2630
		I915_WRITE(GEN6_RPNSWREQ,
-
 
2631
			   HSW_FREQUENCY(10));
-
 
2632
		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-
 
2633
			   HSW_FREQUENCY(12));
2627
 
2634
	} else {
2628
	I915_WRITE(GEN6_RPNSWREQ,
2635
	I915_WRITE(GEN6_RPNSWREQ,
2629
		   GEN6_FREQUENCY(10) |
2636
		   GEN6_FREQUENCY(10) |
2630
		   GEN6_OFFSET(0) |
2637
		   GEN6_OFFSET(0) |
2631
		   GEN6_AGGRESSIVE_TURBO);
2638
		   GEN6_AGGRESSIVE_TURBO);
2632
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
2639
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
-
 
2640
		   GEN6_FREQUENCY(12));
Line 2633... Line 2641...
2633
		   GEN6_FREQUENCY(12));
2641
	}
2634
 
2642
 
2635
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2643
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2636
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2644
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
Line 2653... Line 2661...
2653
 
2661
 
2654
	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2662
	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2655
	if (!ret) {
2663
	if (!ret) {
2656
		pcu_mbox = 0;
2664
		pcu_mbox = 0;
2657
		ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2665
		ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
-
 
2666
		if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
2658
		if (ret && pcu_mbox & (1<<31)) { /* OC supported */
2667
			DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
-
 
2668
					 (dev_priv->rps.max_delay & 0xff) * 50,
2659
		dev_priv->rps.max_delay = pcu_mbox & 0xff;
2669
					 (pcu_mbox & 0xff) * 50);
2660
		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2670
			dev_priv->rps.hw_max = pcu_mbox & 0xff;
2661
	}
2671
	}
2662
	} else {
2672
	} else {
2663
		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2673
		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
Line 2693... Line 2703...
2693
 
2703
 
2694
static void gen6_update_ring_freq(struct drm_device *dev)
2704
static void gen6_update_ring_freq(struct drm_device *dev)
2695
{
2705
{
2696
	struct drm_i915_private *dev_priv = dev->dev_private;
2706
	struct drm_i915_private *dev_priv = dev->dev_private;
2697
	int min_freq = 15;
2707
	int min_freq = 15;
2698
	int gpu_freq;
2708
	unsigned int gpu_freq;
2699
	unsigned int ia_freq, max_ia_freq;
2709
	unsigned int max_ia_freq, min_ring_freq;
Line 2700... Line 2710...
2700
	int scaling_factor = 180;
2710
	int scaling_factor = 180;
Line 2701... Line 2711...
2701
 
2711
 
Line 2710... Line 2720...
2710
		max_ia_freq = tsc_khz;
2720
		max_ia_freq = tsc_khz;
Line 2711... Line 2721...
2711
 
2721
 
2712
	/* Convert from kHz to MHz */
2722
	/* Convert from kHz to MHz */
Line -... Line 2723...
-
 
2723
	max_ia_freq /= 1000;
-
 
2724
 
-
 
2725
	min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
-
 
2726
	/* convert DDR frequency from units of 133.3MHz to bandwidth */
2713
	max_ia_freq /= 1000;
2727
	min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
2714
 
2728
 
2715
	/*
2729
	/*
2716
	 * For each potential GPU frequency, load a ring frequency we'd like
2730
	 * For each potential GPU frequency, load a ring frequency we'd like
2717
	 * to use for memory access.  We do this by specifying the IA frequency
2731
	 * to use for memory access.  We do this by specifying the IA frequency
2718
	 * the PCU should use as a reference to determine the ring frequency.
2732
	 * the PCU should use as a reference to determine the ring frequency.
2719
	 */
2733
	 */
2720
	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2734
	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
-
 
2735
	     gpu_freq--) {
Line -... Line 2736...
-
 
2736
		int diff = dev_priv->rps.max_delay - gpu_freq;
-
 
2737
		unsigned int ia_freq = 0, ring_freq = 0;
-
 
2738
 
-
 
2739
		if (IS_HASWELL(dev)) {
-
 
2740
			ring_freq = (gpu_freq * 5 + 3) / 4;
-
 
2741
			ring_freq = max(min_ring_freq, ring_freq);
-
 
2742
			/* leave ia_freq as the default, chosen by cpufreq */
-
 
2743
		} else {
2721
	     gpu_freq--) {
2744
			/* On older processors, there is no separate ring
2722
		int diff = dev_priv->rps.max_delay - gpu_freq;
2745
			 * clock domain, so in order to boost the bandwidth
2723
 
2746
			 * of the ring, we need to upclock the CPU (ia_freq).
2724
		/*
2747
			 *
2725
		 * For GPU frequencies less than 750MHz, just use the lowest
2748
			 * For GPU frequencies less than 750MHz,
2726
		 * ring freq.
2749
			 * just use the lowest ring freq.
2727
		 */
2750
		 */
2728
		if (gpu_freq < min_freq)
2751
		if (gpu_freq < min_freq)
2729
			ia_freq = 800;
2752
			ia_freq = 800;
2730
		else
-
 
-
 
2753
		else
Line 2731... Line 2754...
2731
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2754
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2732
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2755
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-
 
2756
		}
-
 
2757
 
2733
		ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
2758
		sandybridge_pcode_write(dev_priv,
2734
 
2759
					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2735
		sandybridge_pcode_write(dev_priv,
2760
					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
Line 2736... Line 2761...
2736
					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2761
					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
2737
					ia_freq | gpu_freq);
2762
					gpu_freq);
Line 2843... Line 2868...
2843
	 * safe to assume that renderctx is valid
2868
	 * safe to assume that renderctx is valid
2844
	 */
2869
	 */
2845
	ret = intel_ring_idle(ring);
2870
	ret = intel_ring_idle(ring);
2846
	dev_priv->mm.interruptible = was_interruptible;
2871
	dev_priv->mm.interruptible = was_interruptible;
2847
	if (ret) {
2872
	if (ret) {
2848
		DRM_ERROR("failed to enable ironlake power power savings\n");
2873
		DRM_ERROR("failed to enable ironlake power savings\n");
2849
		ironlake_teardown_rc6(dev);
2874
		ironlake_teardown_rc6(dev);
2850
		return;
2875
		return;
2851
	}
2876
	}
Line 2852... Line 2877...
2852
 
2877
 
Line 3588... Line 3613...
3588
 
3613
 
3589
static void cpt_init_clock_gating(struct drm_device *dev)
3614
static void cpt_init_clock_gating(struct drm_device *dev)
3590
{
3615
{
3591
	struct drm_i915_private *dev_priv = dev->dev_private;
3616
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3617
	int pipe;
Line 3592... Line 3618...
3592
	int pipe;
3618
	uint32_t val;
3593
 
3619
 
3594
	/*
3620
	/*
3595
	 * On Ibex Peak and Cougar Point, we need to disable clock
3621
	 * On Ibex Peak and Cougar Point, we need to disable clock
Line 3600... Line 3626...
3600
	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3626
	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3601
		   DPLS_EDP_PPS_FIX_DIS);
3627
		   DPLS_EDP_PPS_FIX_DIS);
3602
	/* The below fixes the weird display corruption, a few pixels shifted
3628
	/* The below fixes the weird display corruption, a few pixels shifted
3603
	 * downward, on (only) LVDS of some HP laptops with IVY.
3629
	 * downward, on (only) LVDS of some HP laptops with IVY.
3604
	 */
3630
	 */
3605
	for_each_pipe(pipe)
3631
	for_each_pipe(pipe) {
-
 
3632
		val = I915_READ(TRANS_CHICKEN2(pipe));
-
 
3633
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
-
 
3634
		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-
 
3635
		if (dev_priv->fdi_rx_polarity_inverted)
-
 
3636
			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-
 
3637
		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
-
 
3638
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
-
 
3639
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
3606
		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
3640
		I915_WRITE(TRANS_CHICKEN2(pipe), val);
-
 
3641
	}
3607
	/* WADP0ClockGatingDisable */
3642
	/* WADP0ClockGatingDisable */
3608
	for_each_pipe(pipe) {
3643
	for_each_pipe(pipe) {
3609
		I915_WRITE(TRANS_CHICKEN1(pipe),
3644
		I915_WRITE(TRANS_CHICKEN1(pipe),
3610
			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
3645
			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
3611
	}
3646
	}
Line 3794... Line 3829...
3794
 
3829
 
3795
	/* WaMbcDriverBootEnable */
3830
	/* WaMbcDriverBootEnable */
3796
	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3831
	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
Line -... Line 3832...
-
 
3832
		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
 
3833
 
-
 
3834
	/* WaSwitchSolVfFArbitrationPriority */
3797
		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
3835
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
3798
 
3836
 
3799
	/* XXX: This is a workaround for early silicon revisions and should be
3837
	/* XXX: This is a workaround for early silicon revisions and should be
3800
	 * removed later.
3838
	 * removed later.
3801
	 */
3839
	 */
Line 3900... Line 3938...
3900
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3938
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3901
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
3939
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
3902
	snpcr |= GEN6_MBC_SNPCR_MED;
3940
	snpcr |= GEN6_MBC_SNPCR_MED;
3903
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3941
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
Line -... Line 3942...
-
 
3942
 
3904
 
3943
	if (!HAS_PCH_NOP(dev))
Line 3905... Line 3944...
3905
	cpt_init_clock_gating(dev);
3944
	cpt_init_clock_gating(dev);
3906
 
3945
 
Line 3925... Line 3964...
3925
	/* WaDisableBackToBackFlipFix */
3964
	/* WaDisableBackToBackFlipFix */
3926
	I915_WRITE(IVB_CHICKEN3,
3965
	I915_WRITE(IVB_CHICKEN3,
3927
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3966
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3928
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
3967
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
Line -... Line 3968...
-
 
3968
 
3929
 
3969
	/* WaDisablePSDDualDispatchEnable */
-
 
3970
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3930
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3971
		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
Line 3931... Line 3972...
3931
		   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3972
				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3932
 
3973
 
3933
	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3974
	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
Line 3994... Line 4035...
3994
 
4035
 
3995
	I915_WRITE(CACHE_MODE_1,
4036
	I915_WRITE(CACHE_MODE_1,
Line 3996... Line 4037...
3996
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4037
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3997
 
-
 
3998
	/*
-
 
3999
	 * On ValleyView, the GUnit needs to signal the GT
-
 
4000
	 * when flip and other events complete.  So enable
-
 
4001
	 * all the GUnit->GT interrupts here
-
 
4002
	 */
-
 
4003
	I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
-
 
4004
		   PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
-
 
4005
		   SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
-
 
4006
		   PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
-
 
4007
		   PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
-
 
4008
		   SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
-
 
4009
		   PLANEA_FLIPDONE_INT_EN);
-
 
4010
 
4038
 
4011
	/*
4039
	/*
4012
	 * WaDisableVLVClockGating_VBIIssue
4040
	 * WaDisableVLVClockGating_VBIIssue
4013
	 * Disable clock gating on th GCFG unit to prevent a delay
4041
	 * Disable clock gating on th GCFG unit to prevent a delay
4014
	 * in the reporting of vblank events.
4042
	 * in the reporting of vblank events.
-
 
4043
	 */
-
 
4044
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
-
 
4045
 
-
 
4046
	/* Conservative clock gating settings for now */
-
 
4047
	I915_WRITE(0x9400, 0xffffffff);
-
 
4048
	I915_WRITE(0x9404, 0xffffffff);
-
 
4049
	I915_WRITE(0x9408, 0xffffffff);
-
 
4050
	I915_WRITE(0x940c, 0xffffffff);
-
 
4051
	I915_WRITE(0x9410, 0xffffffff);
4015
	 */
4052
	I915_WRITE(0x9414, 0xffffffff);
Line 4016... Line 4053...
4016
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
4053
	I915_WRITE(0x9418, 0xffffffff);
4017
}
4054
}
4018
 
4055
 
Line 4096... Line 4133...
4096
	struct drm_i915_private *dev_priv = dev->dev_private;
4133
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4097... Line 4134...
4097
 
4134
 
4098
	dev_priv->display.init_clock_gating(dev);
4135
	dev_priv->display.init_clock_gating(dev);
Line -... Line 4136...
-
 
4136
}
-
 
4137
 
-
 
4138
/**
-
 
4139
 * We should only use the power well if we explicitly asked the hardware to
-
 
4140
 * enable it, so check if it's enabled and also check if we've requested it to
-
 
4141
 * be enabled.
-
 
4142
 */
-
 
4143
bool intel_using_power_well(struct drm_device *dev)
-
 
4144
{
-
 
4145
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4146
 
-
 
4147
	if (IS_HASWELL(dev))
-
 
4148
		return I915_READ(HSW_PWR_WELL_DRIVER) ==
-
 
4149
		       (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
-
 
4150
	else
-
 
4151
		return true;
4099
}
4152
}
4100
 
4153
 
4101
void intel_set_power_well(struct drm_device *dev, bool enable)
4154
void intel_set_power_well(struct drm_device *dev, bool enable)
4102
{
4155
{
4103
	struct drm_i915_private *dev_priv = dev->dev_private;
4156
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4104... Line 4157...
4104
	bool is_enabled, enable_requested;
4157
	bool is_enabled, enable_requested;
4105
	uint32_t tmp;
4158
	uint32_t tmp;
Line 4106... Line 4159...
4106
 
4159
 
4107
	if (!IS_HASWELL(dev))
4160
	if (!HAS_POWER_WELL(dev))
Line 4140... Line 4193...
4140
 */
4193
 */
4141
void intel_init_power_well(struct drm_device *dev)
4194
void intel_init_power_well(struct drm_device *dev)
4142
{
4195
{
4143
	struct drm_i915_private *dev_priv = dev->dev_private;
4196
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4144... Line 4197...
4144
 
4197
 
4145
	if (!IS_HASWELL(dev))
4198
	if (!HAS_POWER_WELL(dev))
Line 4146... Line 4199...
4146
		return;
4199
		return;
4147
 
4200
 
Line 4202... Line 4255...
4202
					      "Disable CxSR\n");
4255
					      "Disable CxSR\n");
4203
				dev_priv->display.update_wm = NULL;
4256
				dev_priv->display.update_wm = NULL;
4204
			}
4257
			}
4205
			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
4258
			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
4206
		} else if (IS_IVYBRIDGE(dev)) {
4259
		} else if (IS_IVYBRIDGE(dev)) {
4207
			/* FIXME: detect B0+ stepping and use auto training */
-
 
4208
			if (SNB_READ_WM0_LATENCY()) {
4260
			if (SNB_READ_WM0_LATENCY()) {
4209
				dev_priv->display.update_wm = ivybridge_update_wm;
4261
				dev_priv->display.update_wm = ivybridge_update_wm;
4210
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4262
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
4211
			} else {
4263
			} else {
4212
				DRM_DEBUG_KMS("Failed to read display plane latency. "
4264
				DRM_DEBUG_KMS("Failed to read display plane latency. "
Line 4300... Line 4352...
4300
	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4352
	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4301
}
4353
}
Line 4302... Line 4354...
4302
 
4354
 
4303
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4355
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4304
{
-
 
4305
	u32 forcewake_ack;
-
 
4306
 
-
 
4307
	if (IS_HASWELL(dev_priv->dev))
-
 
4308
		forcewake_ack = FORCEWAKE_ACK_HSW;
-
 
4309
	else
-
 
4310
		forcewake_ack = FORCEWAKE_ACK;
-
 
4311
 
4356
{
4312
	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4357
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
4313
			    FORCEWAKE_ACK_TIMEOUT_MS))
4358
			    FORCEWAKE_ACK_TIMEOUT_MS))
Line 4314... Line 4359...
4314
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4359
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4315
 
4360
 
Line 4316... Line 4361...
4316
	I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
4361
	I915_WRITE_NOTRACE(FORCEWAKE, 1);
4317
	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4362
	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4318
 
4363
 
Line 4319... Line 4364...
4319
	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4364
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
4320
			    FORCEWAKE_ACK_TIMEOUT_MS))
4365
			    FORCEWAKE_ACK_TIMEOUT_MS))
Line 4337... Line 4382...
4337
	if (IS_HASWELL(dev_priv->dev))
4382
	if (IS_HASWELL(dev_priv->dev))
4338
		forcewake_ack = FORCEWAKE_ACK_HSW;
4383
		forcewake_ack = FORCEWAKE_ACK_HSW;
4339
	else
4384
	else
4340
		forcewake_ack = FORCEWAKE_MT_ACK;
4385
		forcewake_ack = FORCEWAKE_MT_ACK;
Line 4341... Line 4386...
4341
 
4386
 
4342
	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
4387
	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
4343
			    FORCEWAKE_ACK_TIMEOUT_MS))
4388
			    FORCEWAKE_ACK_TIMEOUT_MS))
Line 4344... Line 4389...
4344
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4389
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4345
 
4390
 
4346
	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4391
	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
Line 4347... Line 4392...
4347
	/* something from same cacheline, but !FORCEWAKE_MT */
4392
	/* something from same cacheline, but !FORCEWAKE_MT */
4348
	POSTING_READ(ECOBUS);
4393
	POSTING_READ(ECOBUS);
4349
 
4394
 
Line 4350... Line 4395...
4350
	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4395
	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
4351
			    FORCEWAKE_ACK_TIMEOUT_MS))
4396
			    FORCEWAKE_ACK_TIMEOUT_MS))
Line 4435... Line 4480...
4435
	POSTING_READ(FORCEWAKE_ACK_VLV);
4480
	POSTING_READ(FORCEWAKE_ACK_VLV);
4436
}
4481
}
Line 4437... Line 4482...
4437
 
4482
 
4438
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4483
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4439
{
4484
{
4440
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4485
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
4441
			    FORCEWAKE_ACK_TIMEOUT_MS))
4486
			    FORCEWAKE_ACK_TIMEOUT_MS))
Line 4442... Line 4487...
4442
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4487
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
 
4488
 
-
 
4489
	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
Line 4443... Line 4490...
4443
 
4490
	I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
4444
	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4491
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4445
 
4492
 
-
 
4493
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
-
 
4494
			    FORCEWAKE_ACK_TIMEOUT_MS))
-
 
4495
		DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
-
 
4496
 
-
 
4497
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
Line 4446... Line 4498...
4446
	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4498
			     FORCEWAKE_KERNEL),
4447
			    FORCEWAKE_ACK_TIMEOUT_MS))
4499
			    FORCEWAKE_ACK_TIMEOUT_MS))
Line 4448... Line 4500...
4448
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4500
		DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
4449
 
4501
 
4450
	__gen6_gt_wait_for_thread_c0(dev_priv);
4502
	__gen6_gt_wait_for_thread_c0(dev_priv);
4451
}
4503
}
4452
 
4504
 
-
 
4505
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4453
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4506
{
4454
{
4507
	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
Line 4455... Line 4508...
4455
	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4508
	I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
4456
	/* something from same cacheline, but !FORCEWAKE_VLV */
4509
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
Line 4537... Line 4590...
4537
 
4590
 
Line 4538... Line 4591...
4538
	I915_WRITE(GEN6_PCODE_DATA, 0);
4591
	I915_WRITE(GEN6_PCODE_DATA, 0);
4539
 
4592
 
-
 
4593
	return 0;
-
 
4594
}
-
 
4595
 
-
 
4596
static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
-
 
4597
			u8 addr, u32 *val)
-
 
4598
{
-
 
4599
	u32 cmd, devfn, port, be, bar;
-
 
4600
 
-
 
4601
	bar = 0;
-
 
4602
	be = 0xf;
-
 
4603
	port = IOSF_PORT_PUNIT;
-
 
4604
	devfn = PCI_DEVFN(2, 0);
-
 
4605
 
-
 
4606
	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
-
 
4607
		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
-
 
4608
		(bar << IOSF_BAR_SHIFT);
-
 
4609
 
-
 
4610
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
 
4611
 
-
 
4612
	if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
-
 
4613
		DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
-
 
4614
				 opcode == PUNIT_OPCODE_REG_READ ?
-
 
4615
				 "read" : "write");
-
 
4616
		return -EAGAIN;
-
 
4617
	}
-
 
4618
 
-
 
4619
	I915_WRITE(VLV_IOSF_ADDR, addr);
-
 
4620
	if (opcode == PUNIT_OPCODE_REG_WRITE)
-
 
4621
		I915_WRITE(VLV_IOSF_DATA, *val);
-
 
4622
	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
-
 
4623
 
-
 
4624
	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
-
 
4625
		     500)) {
-
 
4626
		DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
-
 
4627
			  opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
-
 
4628
			  addr);
-
 
4629
		return -ETIMEDOUT;
-
 
4630
	}
-
 
4631
 
-
 
4632
	if (opcode == PUNIT_OPCODE_REG_READ)
-
 
4633
		*val = I915_READ(VLV_IOSF_DATA);
-
 
4634
	I915_WRITE(VLV_IOSF_DATA, 0);
-
 
4635
 
-
 
4636
	return 0;
-
 
4637
}
-
 
4638
 
-
 
4639
int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
-
 
4640
{
-
 
4641
	return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
-
 
4642
}
-
 
4643
 
-
 
4644
int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
-
 
4645
{