Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4539 Rev 4560
Line 30... Line 30...
30
#include "intel_drv.h"
30
#include "intel_drv.h"
31
#include 
31
#include 
32
//#include "../../../platform/x86/intel_ips.h"
32
//#include "../../../platform/x86/intel_ips.h"
33
#include 
33
#include 
Line -... Line 34...
-
 
34
 
-
 
35
#include 
34
 
36
 
Line 35... Line 37...
35
#define FORCEWAKE_ACK_TIMEOUT_MS 2
37
#define FORCEWAKE_ACK_TIMEOUT_MS 2
Line 36... Line 38...
36
 
38
 
Line -... Line 39...
-
 
39
#define assert_spin_locked(x)
-
 
40
 
-
 
41
void getrawmonotonic(struct timespec *ts);
-
 
42
 
-
 
43
static inline void outb(u8 v, u16 port)
-
 
44
{
-
 
45
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
-
 
46
}
-
 
47
static inline u8 inb(u16 port)
-
 
48
{
-
 
49
    u8 v;
Line -... Line 50...
-
 
50
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
-
 
51
    return v;
-
 
52
}
-
 
53
 
-
 
54
 
-
 
55
/**
-
 
56
 * RC6 is a special power stage which allows the GPU to enter an very
-
 
57
 * low-voltage mode when idle, using down to 0V while at this stage.  This
-
 
58
 * stage is entered automatically when the GPU is idle when RC6 support is
-
 
59
 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
-
 
60
 *
-
 
61
 * There are different RC6 modes available in Intel GPU, which differentiate
-
 
62
 * among each other with the latency required to enter and leave RC6 and
-
 
63
 * voltage consumed by the GPU in different states.
-
 
64
 *
-
 
65
 * The combination of the following flags define which states GPU is allowed
-
 
66
 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
-
 
67
 * RC6pp is deepest RC6. Their support by hardware varies according to the
-
 
68
 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
-
 
69
 * which brings the most power savings; deeper states save more power, but
Line 37... Line 70...
37
#define assert_spin_locked(x)
70
 * require higher latency to switch to and wake up.
38
 
71
 */
39
void getrawmonotonic(struct timespec *ts);
72
#define INTEL_RC6_ENABLE			(1<<0)
40
 
73
#define INTEL_RC6p_ENABLE			(1<<1)
Line 49... Line 82...
49
 *
82
 *
50
 * FBC-related functionality can be enabled by the means of the
83
 * FBC-related functionality can be enabled by the means of the
51
 * i915.i915_enable_fbc parameter
84
 * i915.i915_enable_fbc parameter
52
 */
85
 */
Line 53... Line -...
53
 
-
 
54
static bool intel_crtc_active(struct drm_crtc *crtc)
-
 
55
{
-
 
56
	/* Be paranoid as we can arrive here with only partial
-
 
57
	 * state retrieved from the hardware during setup.
-
 
58
	 */
-
 
59
	return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
-
 
60
}
-
 
61
 
86
 
62
static void i8xx_disable_fbc(struct drm_device *dev)
87
static void i8xx_disable_fbc(struct drm_device *dev)
63
{
88
{
64
	struct drm_i915_private *dev_priv = dev->dev_private;
89
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 79... Line 104...
79
	}
104
	}
Line 80... Line 105...
80
 
105
 
81
	DRM_DEBUG_KMS("disabled FBC\n");
106
	DRM_DEBUG_KMS("disabled FBC\n");
Line 82... Line 107...
82
}
107
}
83
 
108
 
84
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
109
static void i8xx_enable_fbc(struct drm_crtc *crtc)
85
{
110
{
86
	struct drm_device *dev = crtc->dev;
111
	struct drm_device *dev = crtc->dev;
87
	struct drm_i915_private *dev_priv = dev->dev_private;
112
	struct drm_i915_private *dev_priv = dev->dev_private;
88
	struct drm_framebuffer *fb = crtc->fb;
113
	struct drm_framebuffer *fb = crtc->fb;
89
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
114
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
90
	struct drm_i915_gem_object *obj = intel_fb->obj;
115
	struct drm_i915_gem_object *obj = intel_fb->obj;
91
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
116
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
92
	int cfb_pitch;
117
	int cfb_pitch;
Line 93... Line 118...
93
	int plane, i;
118
	int plane, i;
94
	u32 fbc_ctl, fbc_ctl2;
119
	u32 fbc_ctl;
95
 
120
 
Line 96... Line 121...
96
	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
121
	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
-
 
122
	if (fb->pitches[0] < cfb_pitch)
-
 
123
		cfb_pitch = fb->pitches[0];
-
 
124
 
97
	if (fb->pitches[0] < cfb_pitch)
125
	/* FBC_CTL wants 32B or 64B units */
98
		cfb_pitch = fb->pitches[0];
126
	if (IS_GEN2(dev))
Line 99... Line 127...
99
 
127
		cfb_pitch = (cfb_pitch / 32) - 1;
100
	/* FBC_CTL wants 64B units */
128
	else
101
	cfb_pitch = (cfb_pitch / 64) - 1;
129
	cfb_pitch = (cfb_pitch / 64) - 1;
Line -... Line 130...
-
 
130
	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
 
131
 
-
 
132
	/* Clear old tags */
102
	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
133
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
103
 
134
		I915_WRITE(FBC_TAG + (i * 4), 0);
104
	/* Clear old tags */
135
 
105
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
136
	if (IS_GEN4(dev)) {
106
		I915_WRITE(FBC_TAG + (i * 4), 0);
137
		u32 fbc_ctl2;
-
 
138
 
Line 107... Line 139...
107
 
139
	/* Set it up... */
-
 
140
	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-
 
141
	fbc_ctl2 |= plane;
108
	/* Set it up... */
142
	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
109
	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
143
	I915_WRITE(FBC_FENCE_OFF, crtc->y);
110
	fbc_ctl2 |= plane;
144
	}
111
	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
145
 
112
	I915_WRITE(FBC_FENCE_OFF, crtc->y);
-
 
113
 
146
	/* enable it... */
114
	/* enable it... */
147
	fbc_ctl = I915_READ(FBC_CONTROL);
Line 115... Line 148...
115
	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
148
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
116
	if (IS_I945GM(dev))
149
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
Line 129... Line 162...
129
	struct drm_i915_private *dev_priv = dev->dev_private;
162
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 130... Line 163...
130
 
163
 
131
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
164
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
Line 132... Line 165...
132
}
165
}
133
 
166
 
134
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
167
static void g4x_enable_fbc(struct drm_crtc *crtc)
135
{
168
{
136
	struct drm_device *dev = crtc->dev;
169
	struct drm_device *dev = crtc->dev;
137
	struct drm_i915_private *dev_priv = dev->dev_private;
170
	struct drm_i915_private *dev_priv = dev->dev_private;
138
	struct drm_framebuffer *fb = crtc->fb;
171
	struct drm_framebuffer *fb = crtc->fb;
139
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
172
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
140
	struct drm_i915_gem_object *obj = intel_fb->obj;
173
	struct drm_i915_gem_object *obj = intel_fb->obj;
141
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
142
	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
174
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line 143... Line 175...
143
	unsigned long stall_watermark = 200;
175
	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
144
	u32 dpfc_ctl;
176
	u32 dpfc_ctl;
145
 
177
 
Line 146... Line -...
146
	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
-
 
147
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
-
 
148
	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
-
 
149
 
178
	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
Line 150... Line 179...
150
	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
179
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
151
		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
180
	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
Line 184... Line 213...
184
{
213
{
185
	struct drm_i915_private *dev_priv = dev->dev_private;
214
	struct drm_i915_private *dev_priv = dev->dev_private;
186
	u32 blt_ecoskpd;
215
	u32 blt_ecoskpd;
Line 187... Line 216...
187
 
216
 
-
 
217
	/* Make sure blitter notifies FBC of writes */
-
 
218
 
-
 
219
	/* Blitter is part of Media powerwell on VLV. No impact of
188
	/* Make sure blitter notifies FBC of writes */
220
	 * his param in other platforms for now */
-
 
221
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
189
	gen6_gt_force_wake_get(dev_priv);
222
 
190
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
223
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
191
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
224
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
192
		GEN6_BLITTER_LOCK_SHIFT;
225
		GEN6_BLITTER_LOCK_SHIFT;
193
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
226
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
194
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
227
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
195
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
228
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
196
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
229
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
197
			 GEN6_BLITTER_LOCK_SHIFT);
230
			 GEN6_BLITTER_LOCK_SHIFT);
198
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
231
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
-
 
232
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
199
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
233
 
200
	gen6_gt_force_wake_put(dev_priv);
234
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
Line 201... Line 235...
201
}
235
}
202
 
236
 
203
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
237
static void ironlake_enable_fbc(struct drm_crtc *crtc)
204
{
238
{
205
	struct drm_device *dev = crtc->dev;
239
	struct drm_device *dev = crtc->dev;
206
	struct drm_i915_private *dev_priv = dev->dev_private;
240
	struct drm_i915_private *dev_priv = dev->dev_private;
207
	struct drm_framebuffer *fb = crtc->fb;
241
	struct drm_framebuffer *fb = crtc->fb;
208
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
242
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
209
	struct drm_i915_gem_object *obj = intel_fb->obj;
243
	struct drm_i915_gem_object *obj = intel_fb->obj;
210
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
211
	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
244
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line 212... Line 245...
212
	unsigned long stall_watermark = 200;
245
	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
213
	u32 dpfc_ctl;
246
	u32 dpfc_ctl;
214
 
247
 
215
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
248
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
216
	dpfc_ctl &= DPFC_RESERVED;
249
	dpfc_ctl &= DPFC_RESERVED;
217
	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
250
	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
-
 
251
	/* Set persistent mode for front-buffer rendering, ala X. */
-
 
252
	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
218
	/* Set persistent mode for front-buffer rendering, ala X. */
253
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
Line 219... Line -...
219
	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
-
 
220
	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
-
 
221
	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
-
 
222
 
254
	if (IS_GEN5(dev))
223
	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
255
		dpfc_ctl |= obj->fence_reg;
224
		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
256
	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
225
		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
257
 
Line 247... Line 279...
247
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
279
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
248
	if (dpfc_ctl & DPFC_CTL_EN) {
280
	if (dpfc_ctl & DPFC_CTL_EN) {
249
		dpfc_ctl &= ~DPFC_CTL_EN;
281
		dpfc_ctl &= ~DPFC_CTL_EN;
250
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
282
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
Line 251... Line -...
251
 
-
 
252
		if (IS_IVYBRIDGE(dev))
-
 
253
			/* WaFbcDisableDpfcClockGating:ivb */
-
 
254
			I915_WRITE(ILK_DSPCLK_GATE_D,
-
 
255
				   I915_READ(ILK_DSPCLK_GATE_D) &
-
 
256
				   ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
 
257
 
-
 
258
		if (IS_HASWELL(dev))
-
 
259
			/* WaFbcDisableDpfcClockGating:hsw */
-
 
260
			I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-
 
261
				   I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
-
 
262
				   ~HSW_DPFC_GATING_DISABLE);
-
 
263
 
283
 
264
		DRM_DEBUG_KMS("disabled FBC\n");
284
		DRM_DEBUG_KMS("disabled FBC\n");
265
	}
285
	}
Line 266... Line 286...
266
}
286
}
Line 270... Line 290...
270
	struct drm_i915_private *dev_priv = dev->dev_private;
290
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 271... Line 291...
271
 
291
 
272
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
292
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
Line 273... Line 293...
273
}
293
}
274
 
294
 
275
static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
295
static void gen7_enable_fbc(struct drm_crtc *crtc)
276
{
296
{
277
	struct drm_device *dev = crtc->dev;
297
	struct drm_device *dev = crtc->dev;
278
	struct drm_i915_private *dev_priv = dev->dev_private;
298
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 288... Line 308...
288
		   intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
308
		   intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
Line 289... Line 309...
289
 
309
 
290
	if (IS_IVYBRIDGE(dev)) {
310
	if (IS_IVYBRIDGE(dev)) {
291
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
311
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
292
		I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
-
 
293
		/* WaFbcDisableDpfcClockGating:ivb */
-
 
294
		I915_WRITE(ILK_DSPCLK_GATE_D,
-
 
295
			   I915_READ(ILK_DSPCLK_GATE_D) |
-
 
296
			   ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
312
		I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
297
	} else {
313
	} else {
298
		/* WaFbcAsynchFlipDisableFbcQueue:hsw */
314
		/* WaFbcAsynchFlipDisableFbcQueue:hsw */
299
		I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
315
		I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
300
			   HSW_BYPASS_FBC_QUEUE);
-
 
301
		/* WaFbcDisableDpfcClockGating:hsw */
-
 
302
		I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-
 
303
			   I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
-
 
304
			   HSW_DPFC_GATING_DISABLE);
316
			   HSW_BYPASS_FBC_QUEUE);
Line 305... Line 317...
305
	}
317
	}
306
 
318
 
307
	I915_WRITE(SNB_DPFC_CTL_SA,
319
	I915_WRITE(SNB_DPFC_CTL_SA,
Line 308... Line 320...
308
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
320
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
Line 309... Line 321...
309
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
321
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
310
 
322
 
Line 311... Line 323...
311
	sandybridge_blit_fbc_update(dev);
323
	sandybridge_blit_fbc_update(dev);
312
 
324
 
313
	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
325
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
Line 335... Line 347...
335
	if (work == dev_priv->fbc.fbc_work) {
347
	if (work == dev_priv->fbc.fbc_work) {
336
		/* Double check that we haven't switched fb without cancelling
348
		/* Double check that we haven't switched fb without cancelling
337
		 * the prior work.
349
		 * the prior work.
338
		 */
350
		 */
339
		if (work->crtc->fb == work->fb) {
351
		if (work->crtc->fb == work->fb) {
340
			dev_priv->display.enable_fbc(work->crtc,
352
			dev_priv->display.enable_fbc(work->crtc);
341
						     work->interval);
-
 
Line 342... Line 353...
342
 
353
 
343
			dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
354
			dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
344
			dev_priv->fbc.fb_id = work->crtc->fb->base.id;
355
			dev_priv->fbc.fb_id = work->crtc->fb->base.id;
345
			dev_priv->fbc.y = work->crtc->y;
356
			dev_priv->fbc.y = work->crtc->y;
Line 373... Line 384...
373
	 * necessary to run.
384
	 * necessary to run.
374
	 */
385
	 */
375
	dev_priv->fbc.fbc_work = NULL;
386
	dev_priv->fbc.fbc_work = NULL;
376
}
387
}
Line 377... Line 388...
377
 
388
 
378
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
389
static void intel_enable_fbc(struct drm_crtc *crtc)
379
{
390
{
380
	struct intel_fbc_work *work;
391
	struct intel_fbc_work *work;
381
	struct drm_device *dev = crtc->dev;
392
	struct drm_device *dev = crtc->dev;
Line 382... Line 393...
382
	struct drm_i915_private *dev_priv = dev->dev_private;
393
	struct drm_i915_private *dev_priv = dev->dev_private;
383
 
394
 
Line 384... Line 395...
384
	if (!dev_priv->display.enable_fbc)
395
	if (!dev_priv->display.enable_fbc)
Line 385... Line 396...
385
		return;
396
		return;
386
 
397
 
387
	intel_cancel_fbc_work(dev_priv);
398
	intel_cancel_fbc_work(dev_priv);
388
 
399
 
389
	work = kzalloc(sizeof *work, GFP_KERNEL);
400
	work = kzalloc(sizeof(*work), GFP_KERNEL);
390
	if (work == NULL) {
401
	if (work == NULL) {
Line 391... Line 402...
391
		DRM_ERROR("Failed to allocate FBC work structure\n");
402
		DRM_ERROR("Failed to allocate FBC work structure\n");
392
		dev_priv->display.enable_fbc(crtc, interval);
403
		dev_priv->display.enable_fbc(crtc);
393
		return;
-
 
394
	}
404
		return;
Line 395... Line 405...
395
 
405
	}
Line 396... Line 406...
396
	work->crtc = crtc;
406
 
Line 464... Line 474...
464
	struct drm_crtc *crtc = NULL, *tmp_crtc;
474
	struct drm_crtc *crtc = NULL, *tmp_crtc;
465
	struct intel_crtc *intel_crtc;
475
	struct intel_crtc *intel_crtc;
466
	struct drm_framebuffer *fb;
476
	struct drm_framebuffer *fb;
467
	struct intel_framebuffer *intel_fb;
477
	struct intel_framebuffer *intel_fb;
468
	struct drm_i915_gem_object *obj;
478
	struct drm_i915_gem_object *obj;
-
 
479
	const struct drm_display_mode *adjusted_mode;
469
	unsigned int max_hdisplay, max_vdisplay;
480
	unsigned int max_width, max_height;
Line 470... Line 481...
470
 
481
 
471
	if (!I915_HAS_FBC(dev)) {
482
	if (!HAS_FBC(dev)) {
472
		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
483
		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
473
		return;
484
		return;
Line 474... Line 485...
474
	}
485
	}
Line 488... Line 499...
488
	 *   - new fb is too large to fit in compressed buffer
499
	 *   - new fb is too large to fit in compressed buffer
489
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
500
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
490
	 */
501
	 */
491
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
502
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
492
		if (intel_crtc_active(tmp_crtc) &&
503
		if (intel_crtc_active(tmp_crtc) &&
493
		    !to_intel_crtc(tmp_crtc)->primary_disabled) {
504
		    to_intel_crtc(tmp_crtc)->primary_enabled) {
494
			if (crtc) {
505
			if (crtc) {
495
				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
506
				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
496
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
507
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
497
				goto out_disable;
508
				goto out_disable;
498
			}
509
			}
Line 508... Line 519...
508
 
519
 
509
	intel_crtc = to_intel_crtc(crtc);
520
	intel_crtc = to_intel_crtc(crtc);
510
	fb = crtc->fb;
521
	fb = crtc->fb;
511
	intel_fb = to_intel_framebuffer(fb);
522
	intel_fb = to_intel_framebuffer(fb);
-
 
523
	obj = intel_fb->obj;
Line 512... Line 524...
512
	obj = intel_fb->obj;
524
	adjusted_mode = &intel_crtc->config.adjusted_mode;
513
 
525
 
514
	if (i915_enable_fbc < 0 &&
526
	if (i915_enable_fbc < 0 &&
515
	    INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
527
	    INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
Line 520... Line 532...
520
	if (!i915_enable_fbc) {
532
	if (!i915_enable_fbc) {
521
		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
533
		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
522
		DRM_DEBUG_KMS("fbc disabled per module param\n");
534
		DRM_DEBUG_KMS("fbc disabled per module param\n");
523
		goto out_disable;
535
		goto out_disable;
524
	}
536
	}
525
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
537
	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
526
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
538
	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
527
		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
539
		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
528
		DRM_DEBUG_KMS("mode incompatible with compression, "
540
		DRM_DEBUG_KMS("mode incompatible with compression, "
529
			      "disabling\n");
541
			      "disabling\n");
530
		goto out_disable;
542
		goto out_disable;
531
	}
543
	}
Line 532... Line 544...
532
 
544
 
533
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
545
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
534
		max_hdisplay = 4096;
546
		max_width = 4096;
535
		max_vdisplay = 2048;
547
		max_height = 2048;
536
	} else {
548
	} else {
537
		max_hdisplay = 2048;
549
		max_width = 2048;
538
		max_vdisplay = 1536;
550
		max_height = 1536;
539
	}
551
	}
540
	if ((crtc->mode.hdisplay > max_hdisplay) ||
552
	if (intel_crtc->config.pipe_src_w > max_width ||
541
	    (crtc->mode.vdisplay > max_vdisplay)) {
553
	    intel_crtc->config.pipe_src_h > max_height) {
542
		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
554
		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
543
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
555
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
544
		goto out_disable;
556
		goto out_disable;
545
	}
557
	}
546
	if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
558
	if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
547
	    intel_crtc->plane != 0) {
559
	    intel_crtc->plane != PLANE_A) {
548
		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
560
		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
549
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
561
			DRM_DEBUG_KMS("plane not A, disabling compression\n");
550
		goto out_disable;
562
		goto out_disable;
Line 551... Line 563...
551
	}
563
	}
552
 
564
 
Line 606... Line 618...
606
		 */
618
		 */
607
		DRM_DEBUG_KMS("disabling active FBC for update\n");
619
		DRM_DEBUG_KMS("disabling active FBC for update\n");
608
		intel_disable_fbc(dev);
620
		intel_disable_fbc(dev);
609
	}
621
	}
Line 610... Line 622...
610
 
622
 
611
	intel_enable_fbc(crtc, 500);
623
	intel_enable_fbc(crtc);
612
	dev_priv->fbc.no_fbc_reason = FBC_OK;
624
	dev_priv->fbc.no_fbc_reason = FBC_OK;
Line 613... Line 625...
613
	return;
625
	return;
614
 
626
 
Line 828... Line 840...
828
		      plane ? "B" : "A", size);
840
		      plane ? "B" : "A", size);
Line 829... Line 841...
829
 
841
 
830
	return size;
842
	return size;
Line 831... Line 843...
831
}
843
}
832
 
844
 
833
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
845
static int i830_get_fifo_size(struct drm_device *dev, int plane)
834
{
846
{
835
	struct drm_i915_private *dev_priv = dev->dev_private;
847
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 861... Line 873...
861
		      size);
873
		      size);
Line 862... Line 874...
862
 
874
 
863
	return size;
875
	return size;
Line 864... Line -...
864
}
-
 
865
 
-
 
866
static int i830_get_fifo_size(struct drm_device *dev, int plane)
-
 
867
{
-
 
868
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
869
	uint32_t dsparb = I915_READ(DSPARB);
-
 
870
	int size;
-
 
871
 
-
 
872
	size = dsparb & 0x7f;
-
 
873
	size >>= 1; /* Convert to cachelines */
-
 
874
 
-
 
875
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-
 
876
		      plane ? "B" : "A", size);
-
 
877
 
-
 
878
	return size;
-
 
879
}
876
}
880
 
877
 
881
/* Pineview has different values for various configs */
878
/* Pineview has different values for various configs */
882
static const struct intel_watermark_params pineview_display_wm = {
879
static const struct intel_watermark_params pineview_display_wm = {
883
	PINEVIEW_DISPLAY_FIFO,
880
	PINEVIEW_DISPLAY_FIFO,
Line 954... Line 951...
954
	I915_MAX_WM,
951
	I915_MAX_WM,
955
	1,
952
	1,
956
	2,
953
	2,
957
	I915_FIFO_LINE_SIZE
954
	I915_FIFO_LINE_SIZE
958
};
955
};
959
static const struct intel_watermark_params i855_wm_info = {
956
static const struct intel_watermark_params i830_wm_info = {
960
	I855GM_FIFO_SIZE,
957
	I855GM_FIFO_SIZE,
961
	I915_MAX_WM,
958
	I915_MAX_WM,
962
	1,
959
	1,
963
	2,
960
	2,
964
	I830_FIFO_LINE_SIZE
961
	I830_FIFO_LINE_SIZE
965
};
962
};
966
static const struct intel_watermark_params i830_wm_info = {
963
static const struct intel_watermark_params i845_wm_info = {
967
	I830_FIFO_SIZE,
964
	I830_FIFO_SIZE,
968
	I915_MAX_WM,
965
	I915_MAX_WM,
969
	1,
966
	1,
970
	2,
967
	2,
971
	I830_FIFO_LINE_SIZE
968
	I830_FIFO_LINE_SIZE
972
};
969
};
Line 973... Line -...
973
 
-
 
974
static const struct intel_watermark_params ironlake_display_wm_info = {
-
 
975
	ILK_DISPLAY_FIFO,
-
 
976
	ILK_DISPLAY_MAXWM,
-
 
977
	ILK_DISPLAY_DFTWM,
-
 
978
	2,
-
 
979
	ILK_FIFO_LINE_SIZE
-
 
980
};
-
 
981
static const struct intel_watermark_params ironlake_cursor_wm_info = {
-
 
982
	ILK_CURSOR_FIFO,
-
 
983
	ILK_CURSOR_MAXWM,
-
 
984
	ILK_CURSOR_DFTWM,
-
 
985
	2,
-
 
986
	ILK_FIFO_LINE_SIZE
-
 
987
};
-
 
988
static const struct intel_watermark_params ironlake_display_srwm_info = {
-
 
989
	ILK_DISPLAY_SR_FIFO,
-
 
990
	ILK_DISPLAY_MAX_SRWM,
-
 
991
	ILK_DISPLAY_DFT_SRWM,
-
 
992
	2,
-
 
993
	ILK_FIFO_LINE_SIZE
-
 
994
};
-
 
995
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
-
 
996
	ILK_CURSOR_SR_FIFO,
-
 
997
	ILK_CURSOR_MAX_SRWM,
-
 
998
	ILK_CURSOR_DFT_SRWM,
-
 
999
	2,
-
 
1000
	ILK_FIFO_LINE_SIZE
-
 
1001
};
-
 
1002
 
-
 
1003
static const struct intel_watermark_params sandybridge_display_wm_info = {
-
 
1004
	SNB_DISPLAY_FIFO,
-
 
1005
	SNB_DISPLAY_MAXWM,
-
 
1006
	SNB_DISPLAY_DFTWM,
-
 
1007
	2,
-
 
1008
	SNB_FIFO_LINE_SIZE
-
 
1009
};
-
 
1010
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
-
 
1011
	SNB_CURSOR_FIFO,
-
 
1012
	SNB_CURSOR_MAXWM,
-
 
1013
	SNB_CURSOR_DFTWM,
-
 
1014
	2,
-
 
1015
	SNB_FIFO_LINE_SIZE
-
 
1016
};
-
 
1017
static const struct intel_watermark_params sandybridge_display_srwm_info = {
-
 
1018
	SNB_DISPLAY_SR_FIFO,
-
 
1019
	SNB_DISPLAY_MAX_SRWM,
-
 
1020
	SNB_DISPLAY_DFT_SRWM,
-
 
1021
	2,
-
 
1022
	SNB_FIFO_LINE_SIZE
-
 
1023
};
-
 
1024
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
-
 
1025
	SNB_CURSOR_SR_FIFO,
-
 
1026
	SNB_CURSOR_MAX_SRWM,
-
 
1027
	SNB_CURSOR_DFT_SRWM,
-
 
1028
	2,
-
 
1029
	SNB_FIFO_LINE_SIZE
-
 
1030
};
-
 
1031
 
-
 
1032
 
970
 
1033
/**
971
/**
1034
 * intel_calculate_wm - calculate watermark level
972
 * intel_calculate_wm - calculate watermark level
1035
 * @clock_in_khz: pixel clock
973
 * @clock_in_khz: pixel clock
1036
 * @wm: chip FIFO params
974
 * @wm: chip FIFO params
Line 1093... Line 1031...
1093
	}
1031
	}
Line 1094... Line 1032...
1094
 
1032
 
1095
	return enabled;
1033
	return enabled;
Line 1096... Line 1034...
1096
}
1034
}
1097
 
1035
 
-
 
1036
static void pineview_update_wm(struct drm_crtc *unused_crtc)
1098
static void pineview_update_wm(struct drm_device *dev)
1037
{
1099
{
1038
	struct drm_device *dev = unused_crtc->dev;
1100
	struct drm_i915_private *dev_priv = dev->dev_private;
1039
	struct drm_i915_private *dev_priv = dev->dev_private;
1101
	struct drm_crtc *crtc;
1040
	struct drm_crtc *crtc;
1102
	const struct cxsr_latency *latency;
1041
	const struct cxsr_latency *latency;
Line 1111... Line 1050...
1111
		return;
1050
		return;
1112
	}
1051
	}
Line 1113... Line 1052...
1113
 
1052
 
1114
	crtc = single_enabled_crtc(dev);
1053
	crtc = single_enabled_crtc(dev);
1115
	if (crtc) {
1054
	if (crtc) {
1116
		int clock = crtc->mode.clock;
1055
		const struct drm_display_mode *adjusted_mode;
-
 
1056
		int pixel_size = crtc->fb->bits_per_pixel / 8;
-
 
1057
		int clock;
-
 
1058
 
-
 
1059
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
Line 1117... Line 1060...
1117
		int pixel_size = crtc->fb->bits_per_pixel / 8;
1060
		clock = adjusted_mode->crtc_clock;
1118
 
1061
 
1119
		/* Display SR */
1062
		/* Display SR */
1120
		wm = intel_calculate_wm(clock, &pineview_display_wm,
1063
		wm = intel_calculate_wm(clock, &pineview_display_wm,
Line 1172... Line 1115...
1172
			    int cursor_latency_ns,
1115
			    int cursor_latency_ns,
1173
			    int *plane_wm,
1116
			    int *plane_wm,
1174
			    int *cursor_wm)
1117
			    int *cursor_wm)
1175
{
1118
{
1176
	struct drm_crtc *crtc;
1119
	struct drm_crtc *crtc;
-
 
1120
	const struct drm_display_mode *adjusted_mode;
1177
	int htotal, hdisplay, clock, pixel_size;
1121
	int htotal, hdisplay, clock, pixel_size;
1178
	int line_time_us, line_count;
1122
	int line_time_us, line_count;
1179
	int entries, tlb_miss;
1123
	int entries, tlb_miss;
Line 1180... Line 1124...
1180
 
1124
 
Line 1183... Line 1127...
1183
		*cursor_wm = cursor->guard_size;
1127
		*cursor_wm = cursor->guard_size;
1184
		*plane_wm = display->guard_size;
1128
		*plane_wm = display->guard_size;
1185
        return false;
1129
        return false;
1186
	}
1130
	}
Line -... Line 1131...
-
 
1131
 
1187
 
1132
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1188
	htotal = crtc->mode.htotal;
1133
	clock = adjusted_mode->crtc_clock;
1189
	hdisplay = crtc->mode.hdisplay;
1134
	htotal = adjusted_mode->crtc_htotal;
1190
	clock = crtc->mode.clock;
1135
	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
Line 1191... Line 1136...
1191
	pixel_size = crtc->fb->bits_per_pixel / 8;
1136
	pixel_size = crtc->fb->bits_per_pixel / 8;
1192
 
1137
 
1193
	/* Use the small buffer method to calculate plane watermark */
1138
	/* Use the small buffer method to calculate plane watermark */
Line 1256... Line 1201...
1256
			     const struct intel_watermark_params *display,
1201
			     const struct intel_watermark_params *display,
1257
			     const struct intel_watermark_params *cursor,
1202
			     const struct intel_watermark_params *cursor,
1258
			     int *display_wm, int *cursor_wm)
1203
			     int *display_wm, int *cursor_wm)
1259
{
1204
{
1260
	struct drm_crtc *crtc;
1205
	struct drm_crtc *crtc;
-
 
1206
	const struct drm_display_mode *adjusted_mode;
1261
	int hdisplay, htotal, pixel_size, clock;
1207
	int hdisplay, htotal, pixel_size, clock;
1262
	unsigned long line_time_us;
1208
	unsigned long line_time_us;
1263
	int line_count, line_size;
1209
	int line_count, line_size;
1264
	int small, large;
1210
	int small, large;
1265
	int entries;
1211
	int entries;
Line 1268... Line 1214...
1268
		*display_wm = *cursor_wm = 0;
1214
		*display_wm = *cursor_wm = 0;
1269
		return false;
1215
		return false;
1270
	}
1216
	}
Line 1271... Line 1217...
1271
 
1217
 
-
 
1218
	crtc = intel_get_crtc_for_plane(dev, plane);
1272
	crtc = intel_get_crtc_for_plane(dev, plane);
1219
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1273
	hdisplay = crtc->mode.hdisplay;
1220
	clock = adjusted_mode->crtc_clock;
1274
	htotal = crtc->mode.htotal;
1221
	htotal = adjusted_mode->crtc_htotal;
1275
	clock = crtc->mode.clock;
1222
	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
Line 1276... Line 1223...
1276
	pixel_size = crtc->fb->bits_per_pixel / 8;
1223
	pixel_size = crtc->fb->bits_per_pixel / 8;
1277
 
1224
 
1278
	line_time_us = (htotal * 1000) / clock;
1225
	line_time_us = (htotal * 1000) / clock;
Line 1309... Line 1256...
1309
 
1256
 
1310
	crtc = intel_get_crtc_for_plane(dev, plane);
1257
	crtc = intel_get_crtc_for_plane(dev, plane);
1311
	if (!intel_crtc_active(crtc))
1258
	if (!intel_crtc_active(crtc))
Line 1312... Line 1259...
1312
		return false;
1259
		return false;
1313
 
1260
 
Line 1314... Line 1261...
1314
	clock = crtc->mode.clock;	/* VESA DOT Clock */
1261
	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1315
	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
1262
	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
1316
 
1263
 
Line 1371... Line 1318...
1371
	}
1318
	}
1372
}
1319
}
Line 1373... Line 1320...
1373
 
1320
 
Line 1374... Line 1321...
1374
#define single_plane_enabled(mask) is_power_of_2(mask)
1321
#define single_plane_enabled(mask) is_power_of_2(mask)
1375
 
1322
 
-
 
1323
static void valleyview_update_wm(struct drm_crtc *crtc)
1376
static void valleyview_update_wm(struct drm_device *dev)
1324
{
1377
{
1325
	struct drm_device *dev = crtc->dev;
1378
	static const int sr_latency_ns = 12000;
1326
	static const int sr_latency_ns = 12000;
1379
	struct drm_i915_private *dev_priv = dev->dev_private;
1327
	struct drm_i915_private *dev_priv = dev->dev_private;
1380
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1328
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
Line 1430... Line 1378...
1430
	I915_WRITE(DSPFW3,
1378
	I915_WRITE(DSPFW3,
1431
		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1379
		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1432
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1380
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1433
}
1381
}
Line 1434... Line 1382...
1434
 
1382
 
1435
static void g4x_update_wm(struct drm_device *dev)
1383
static void g4x_update_wm(struct drm_crtc *crtc)
-
 
1384
{
1436
{
1385
	struct drm_device *dev = crtc->dev;
1437
	static const int sr_latency_ns = 12000;
1386
	static const int sr_latency_ns = 12000;
1438
	struct drm_i915_private *dev_priv = dev->dev_private;
1387
	struct drm_i915_private *dev_priv = dev->dev_private;
1439
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1388
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1440
	int plane_sr, cursor_sr;
1389
	int plane_sr, cursor_sr;
Line 1482... Line 1431...
1482
	I915_WRITE(DSPFW3,
1431
	I915_WRITE(DSPFW3,
1483
		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1432
		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1484
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1433
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1485
}
1434
}
Line 1486... Line 1435...
1486
 
1435
 
1487
static void i965_update_wm(struct drm_device *dev)
1436
static void i965_update_wm(struct drm_crtc *unused_crtc)
-
 
1437
{
1488
{
1438
	struct drm_device *dev = unused_crtc->dev;
1489
	struct drm_i915_private *dev_priv = dev->dev_private;
1439
	struct drm_i915_private *dev_priv = dev->dev_private;
1490
	struct drm_crtc *crtc;
1440
	struct drm_crtc *crtc;
1491
	int srwm = 1;
1441
	int srwm = 1;
Line 1492... Line 1442...
1492
	int cursor_sr = 16;
1442
	int cursor_sr = 16;
1493
 
1443
 
1494
	/* Calc sr entries for one plane configs */
1444
	/* Calc sr entries for one plane configs */
1495
	crtc = single_enabled_crtc(dev);
1445
	crtc = single_enabled_crtc(dev);
1496
	if (crtc) {
1446
	if (crtc) {
-
 
1447
		/* self-refresh has much higher latency */
-
 
1448
		static const int sr_latency_ns = 12000;
1497
		/* self-refresh has much higher latency */
1449
		const struct drm_display_mode *adjusted_mode =
1498
		static const int sr_latency_ns = 12000;
1450
			&to_intel_crtc(crtc)->config.adjusted_mode;
1499
		int clock = crtc->mode.clock;
1451
		int clock = adjusted_mode->crtc_clock;
1500
		int htotal = crtc->mode.htotal;
1452
		int htotal = adjusted_mode->crtc_htotal;
1501
		int hdisplay = crtc->mode.hdisplay;
1453
		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1502
		int pixel_size = crtc->fb->bits_per_pixel / 8;
1454
		int pixel_size = crtc->fb->bits_per_pixel / 8;
Line 1503... Line 1455...
1503
		unsigned long line_time_us;
1455
		unsigned long line_time_us;
Line 1547... Line 1499...
1547
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1499
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1548
	/* update cursor SR watermark */
1500
	/* update cursor SR watermark */
1549
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1501
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1550
}
1502
}
Line 1551... Line 1503...
1551
 
1503
 
1552
static void i9xx_update_wm(struct drm_device *dev)
1504
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
-
 
1505
{
1553
{
1506
	struct drm_device *dev = unused_crtc->dev;
1554
	struct drm_i915_private *dev_priv = dev->dev_private;
1507
	struct drm_i915_private *dev_priv = dev->dev_private;
1555
	const struct intel_watermark_params *wm_info;
1508
	const struct intel_watermark_params *wm_info;
1556
	uint32_t fwater_lo;
1509
	uint32_t fwater_lo;
1557
	uint32_t fwater_hi;
1510
	uint32_t fwater_hi;
Line 1563... Line 1516...
1563
	if (IS_I945GM(dev))
1516
	if (IS_I945GM(dev))
1564
		wm_info = &i945_wm_info;
1517
		wm_info = &i945_wm_info;
1565
	else if (!IS_GEN2(dev))
1518
	else if (!IS_GEN2(dev))
1566
		wm_info = &i915_wm_info;
1519
		wm_info = &i915_wm_info;
1567
	else
1520
	else
1568
		wm_info = &i855_wm_info;
1521
		wm_info = &i830_wm_info;
Line 1569... Line 1522...
1569
 
1522
 
1570
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1523
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1571
	crtc = intel_get_crtc_for_plane(dev, 0);
1524
	crtc = intel_get_crtc_for_plane(dev, 0);
-
 
1525
	if (intel_crtc_active(crtc)) {
1572
	if (intel_crtc_active(crtc)) {
1526
		const struct drm_display_mode *adjusted_mode;
1573
		int cpp = crtc->fb->bits_per_pixel / 8;
1527
		int cpp = crtc->fb->bits_per_pixel / 8;
1574
		if (IS_GEN2(dev))
1528
		if (IS_GEN2(dev))
Line -... Line 1529...
-
 
1529
			cpp = 4;
1575
			cpp = 4;
1530
 
1576
 
1531
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1577
		planea_wm = intel_calculate_wm(crtc->mode.clock,
1532
		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1578
					       wm_info, fifo_size, cpp,
1533
					       wm_info, fifo_size, cpp,
1579
					       latency_ns);
1534
					       latency_ns);
1580
		enabled = crtc;
1535
		enabled = crtc;
Line 1581... Line 1536...
1581
	} else
1536
	} else
1582
		planea_wm = fifo_size - wm_info->guard_size;
1537
		planea_wm = fifo_size - wm_info->guard_size;
1583
 
1538
 
-
 
1539
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1584
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1540
	crtc = intel_get_crtc_for_plane(dev, 1);
1585
	crtc = intel_get_crtc_for_plane(dev, 1);
1541
	if (intel_crtc_active(crtc)) {
1586
	if (intel_crtc_active(crtc)) {
1542
		const struct drm_display_mode *adjusted_mode;
Line -... Line 1543...
-
 
1543
		int cpp = crtc->fb->bits_per_pixel / 8;
1587
		int cpp = crtc->fb->bits_per_pixel / 8;
1544
		if (IS_GEN2(dev))
1588
		if (IS_GEN2(dev))
1545
			cpp = 4;
1589
			cpp = 4;
1546
 
1590
 
1547
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1591
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
1548
		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1592
					       wm_info, fifo_size, cpp,
1549
					       wm_info, fifo_size, cpp,
Line 1607... Line 1564...
1607
 
1564
 
1608
	/* Play safe and disable self-refresh before adjusting watermarks. */
1565
	/* Play safe and disable self-refresh before adjusting watermarks. */
1609
	if (IS_I945G(dev) || IS_I945GM(dev))
1566
	if (IS_I945G(dev) || IS_I945GM(dev))
1610
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1567
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1611
	else if (IS_I915GM(dev))
1568
	else if (IS_I915GM(dev))
Line 1612... Line 1569...
1612
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1569
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1613
 
1570
 
1614
	/* Calc sr entries for one plane configs */
1571
	/* Calc sr entries for one plane configs */
1615
	if (HAS_FW_BLC(dev) && enabled) {
1572
	if (HAS_FW_BLC(dev) && enabled) {
-
 
1573
		/* self-refresh has much higher latency */
-
 
1574
		static const int sr_latency_ns = 6000;
1616
		/* self-refresh has much higher latency */
1575
		const struct drm_display_mode *adjusted_mode =
1617
		static const int sr_latency_ns = 6000;
1576
			&to_intel_crtc(enabled)->config.adjusted_mode;
1618
		int clock = enabled->mode.clock;
1577
		int clock = adjusted_mode->crtc_clock;
1619
		int htotal = enabled->mode.htotal;
1578
		int htotal = adjusted_mode->crtc_htotal;
1620
		int hdisplay = enabled->mode.hdisplay;
1579
		int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1621
		int pixel_size = enabled->fb->bits_per_pixel / 8;
1580
		int pixel_size = enabled->fb->bits_per_pixel / 8;
Line 1622... Line 1581...
1622
		unsigned long line_time_us;
1581
		unsigned long line_time_us;
Line 1657... Line 1616...
1657
		if (enabled) {
1616
		if (enabled) {
1658
			if (IS_I945G(dev) || IS_I945GM(dev))
1617
			if (IS_I945G(dev) || IS_I945GM(dev))
1659
				I915_WRITE(FW_BLC_SELF,
1618
				I915_WRITE(FW_BLC_SELF,
1660
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1619
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1661
			else if (IS_I915GM(dev))
1620
			else if (IS_I915GM(dev))
1662
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1621
				I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1663
			DRM_DEBUG_KMS("memory self refresh enabled\n");
1622
			DRM_DEBUG_KMS("memory self refresh enabled\n");
1664
		} else
1623
		} else
1665
			DRM_DEBUG_KMS("memory self refresh disabled\n");
1624
			DRM_DEBUG_KMS("memory self refresh disabled\n");
1666
	}
1625
	}
1667
}
1626
}
Line 1668... Line 1627...
1668
 
1627
 
1669
static void i830_update_wm(struct drm_device *dev)
1628
static void i845_update_wm(struct drm_crtc *unused_crtc)
-
 
1629
{
1670
{
1630
	struct drm_device *dev = unused_crtc->dev;
1671
	struct drm_i915_private *dev_priv = dev->dev_private;
1631
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1632
	struct drm_crtc *crtc;
1672
	struct drm_crtc *crtc;
1633
	const struct drm_display_mode *adjusted_mode;
1673
	uint32_t fwater_lo;
1634
	uint32_t fwater_lo;
Line 1674... Line 1635...
1674
	int planea_wm;
1635
	int planea_wm;
1675
 
1636
 
1676
	crtc = single_enabled_crtc(dev);
1637
	crtc = single_enabled_crtc(dev);
Line -... Line 1638...
-
 
1638
	if (crtc == NULL)
1677
	if (crtc == NULL)
1639
		return;
-
 
1640
 
1678
		return;
1641
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1679
 
1642
	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1680
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1643
				       &i845_wm_info,
1681
				       dev_priv->display.get_fifo_size(dev, 0),
1644
				       dev_priv->display.get_fifo_size(dev, 0),
Line 1682... Line 1645...
1682
				       4, latency_ns);
1645
				       4, latency_ns);
Line 1683... Line 1646...
1683
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1646
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1684
	fwater_lo |= (3<<8) | planea_wm;
1647
	fwater_lo |= (3<<8) | planea_wm;
Line 1685... Line -...
1685
 
-
 
1686
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
-
 
1687
 
-
 
1688
	I915_WRITE(FW_BLC, fwater_lo);
-
 
1689
}
-
 
1690
 
-
 
1691
/*
-
 
1692
 * Check the wm result.
-
 
1693
 *
-
 
1694
 * If any calculated watermark values is larger than the maximum value that
-
 
1695
 * can be programmed into the associated watermark register, that watermark
-
 
1696
 * must be disabled.
-
 
1697
 */
-
 
1698
static bool ironlake_check_srwm(struct drm_device *dev, int level,
-
 
1699
				int fbc_wm, int display_wm, int cursor_wm,
-
 
1700
				const struct intel_watermark_params *display,
-
 
1701
				const struct intel_watermark_params *cursor)
-
 
1702
{
-
 
1703
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1704
 
-
 
1705
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
-
 
1706
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
 
1707
 
-
 
1708
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
-
 
1709
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
-
 
1710
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
-
 
1711
 
-
 
1712
		/* fbc has it's own way to disable FBC WM */
-
 
1713
		I915_WRITE(DISP_ARB_CTL,
-
 
1714
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
-
 
1715
		return false;
-
 
1716
	} else if (INTEL_INFO(dev)->gen >= 6) {
-
 
1717
		/* enable FBC WM (except on ILK, where it must remain off) */
-
 
1718
		I915_WRITE(DISP_ARB_CTL,
-
 
1719
			   I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
-
 
1720
	}
-
 
1721
 
-
 
1722
	if (display_wm > display->max_wm) {
-
 
1723
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
-
 
1724
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
-
 
1725
		return false;
-
 
1726
	}
-
 
1727
 
-
 
1728
	if (cursor_wm > cursor->max_wm) {
-
 
1729
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
-
 
1730
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
-
 
1731
		return false;
-
 
1732
	}
-
 
1733
 
-
 
1734
	if (!(fbc_wm || display_wm || cursor_wm)) {
-
 
1735
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
-
 
1736
		return false;
-
 
1737
	}
-
 
1738
 
-
 
1739
	return true;
-
 
1740
}
-
 
1741
 
-
 
1742
/*
-
 
1743
 * Compute watermark values of WM[1-3],
-
 
1744
 */
-
 
1745
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
-
 
1746
				  int latency_ns,
-
 
1747
				  const struct intel_watermark_params *display,
-
 
1748
				  const struct intel_watermark_params *cursor,
-
 
1749
				  int *fbc_wm, int *display_wm, int *cursor_wm)
-
 
1750
{
-
 
1751
	struct drm_crtc *crtc;
-
 
1752
	unsigned long line_time_us;
-
 
1753
	int hdisplay, htotal, pixel_size, clock;
-
 
1754
	int line_count, line_size;
-
 
1755
	int small, large;
-
 
1756
	int entries;
-
 
1757
 
-
 
1758
	if (!latency_ns) {
-
 
1759
		*fbc_wm = *display_wm = *cursor_wm = 0;
-
 
1760
		return false;
-
 
1761
	}
-
 
1762
 
-
 
1763
	crtc = intel_get_crtc_for_plane(dev, plane);
-
 
1764
	hdisplay = crtc->mode.hdisplay;
-
 
1765
	htotal = crtc->mode.htotal;
-
 
1766
	clock = crtc->mode.clock;
-
 
1767
	pixel_size = crtc->fb->bits_per_pixel / 8;
-
 
1768
 
-
 
1769
	line_time_us = (htotal * 1000) / clock;
-
 
1770
	line_count = (latency_ns / line_time_us + 1000) / 1000;
-
 
1771
	line_size = hdisplay * pixel_size;
-
 
1772
 
-
 
1773
	/* Use the minimum of the small and large buffer method for primary */
-
 
1774
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-
 
1775
	large = line_count * line_size;
-
 
1776
 
-
 
1777
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
-
 
1778
	*display_wm = entries + display->guard_size;
-
 
1779
 
-
 
1780
	/*
-
 
1781
	 * Spec says:
-
 
1782
	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
-
 
1783
	 */
-
 
1784
	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
 
1785
 
-
 
1786
	/* calculate the self-refresh watermark for display cursor */
-
 
1787
	entries = line_count * pixel_size * 64;
-
 
1788
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
-
 
1789
	*cursor_wm = entries + cursor->guard_size;
-
 
1790
 
-
 
1791
	return ironlake_check_srwm(dev, level,
-
 
1792
				   *fbc_wm, *display_wm, *cursor_wm,
-
 
1793
				   display, cursor);
-
 
1794
}
-
 
1795
 
-
 
1796
static void ironlake_update_wm(struct drm_device *dev)
-
 
1797
{
-
 
1798
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1799
	int fbc_wm, plane_wm, cursor_wm;
-
 
1800
	unsigned int enabled;
-
 
1801
 
-
 
1802
	enabled = 0;
-
 
1803
	if (g4x_compute_wm0(dev, PIPE_A,
-
 
1804
			    &ironlake_display_wm_info,
-
 
1805
			    dev_priv->wm.pri_latency[0] * 100,
-
 
1806
			    &ironlake_cursor_wm_info,
-
 
1807
			    dev_priv->wm.cur_latency[0] * 100,
-
 
1808
			    &plane_wm, &cursor_wm)) {
-
 
1809
		I915_WRITE(WM0_PIPEA_ILK,
-
 
1810
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-
 
1811
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-
 
1812
			      " plane %d, " "cursor: %d\n",
-
 
1813
			      plane_wm, cursor_wm);
-
 
1814
		enabled |= 1 << PIPE_A;
-
 
1815
	}
-
 
1816
 
-
 
1817
	if (g4x_compute_wm0(dev, PIPE_B,
-
 
1818
			    &ironlake_display_wm_info,
-
 
1819
			    dev_priv->wm.pri_latency[0] * 100,
-
 
1820
			    &ironlake_cursor_wm_info,
-
 
1821
			    dev_priv->wm.cur_latency[0] * 100,
-
 
1822
			    &plane_wm, &cursor_wm)) {
-
 
1823
		I915_WRITE(WM0_PIPEB_ILK,
-
 
1824
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-
 
1825
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-
 
1826
			      " plane %d, cursor: %d\n",
-
 
1827
			      plane_wm, cursor_wm);
-
 
1828
		enabled |= 1 << PIPE_B;
-
 
1829
	}
-
 
1830
 
-
 
1831
	/*
-
 
1832
	 * Calculate and update the self-refresh watermark only when one
-
 
1833
	 * display plane is used.
-
 
1834
	 */
-
 
1835
	I915_WRITE(WM3_LP_ILK, 0);
-
 
1836
	I915_WRITE(WM2_LP_ILK, 0);
-
 
1837
	I915_WRITE(WM1_LP_ILK, 0);
-
 
1838
 
-
 
1839
	if (!single_plane_enabled(enabled))
-
 
1840
		return;
-
 
1841
	enabled = ffs(enabled) - 1;
-
 
1842
 
-
 
1843
	/* WM1 */
-
 
1844
	if (!ironlake_compute_srwm(dev, 1, enabled,
-
 
1845
				   dev_priv->wm.pri_latency[1] * 500,
-
 
1846
				   &ironlake_display_srwm_info,
-
 
1847
				   &ironlake_cursor_srwm_info,
-
 
1848
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
1849
		return;
-
 
1850
 
-
 
1851
	I915_WRITE(WM1_LP_ILK,
-
 
1852
		   WM1_LP_SR_EN |
-
 
1853
		   (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-
 
1854
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
1855
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
1856
		   cursor_wm);
-
 
1857
 
-
 
1858
	/* WM2 */
-
 
1859
	if (!ironlake_compute_srwm(dev, 2, enabled,
-
 
1860
				   dev_priv->wm.pri_latency[2] * 500,
-
 
1861
				   &ironlake_display_srwm_info,
-
 
1862
				   &ironlake_cursor_srwm_info,
-
 
1863
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
1864
		return;
-
 
1865
 
-
 
1866
	I915_WRITE(WM2_LP_ILK,
-
 
1867
		   WM2_LP_EN |
-
 
1868
		   (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-
 
1869
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
1870
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
1871
		   cursor_wm);
-
 
1872
 
-
 
1873
	/*
-
 
1874
	 * WM3 is unsupported on ILK, probably because we don't have latency
-
 
1875
	 * data for that power state
-
 
1876
	 */
-
 
1877
}
-
 
1878
 
-
 
1879
static void sandybridge_update_wm(struct drm_device *dev)
-
 
1880
{
-
 
1881
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1882
	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
-
 
1883
	u32 val;
-
 
1884
	int fbc_wm, plane_wm, cursor_wm;
-
 
1885
	unsigned int enabled;
-
 
1886
 
-
 
1887
	enabled = 0;
-
 
1888
	if (g4x_compute_wm0(dev, PIPE_A,
-
 
1889
			    &sandybridge_display_wm_info, latency,
-
 
1890
			    &sandybridge_cursor_wm_info, latency,
-
 
1891
			    &plane_wm, &cursor_wm)) {
-
 
1892
		val = I915_READ(WM0_PIPEA_ILK);
-
 
1893
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-
 
1894
		I915_WRITE(WM0_PIPEA_ILK, val |
-
 
1895
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-
 
1896
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-
 
1897
			      " plane %d, " "cursor: %d\n",
-
 
1898
			      plane_wm, cursor_wm);
-
 
1899
		enabled |= 1 << PIPE_A;
-
 
1900
	}
-
 
1901
 
-
 
1902
	if (g4x_compute_wm0(dev, PIPE_B,
-
 
1903
			    &sandybridge_display_wm_info, latency,
-
 
1904
			    &sandybridge_cursor_wm_info, latency,
-
 
1905
			    &plane_wm, &cursor_wm)) {
-
 
1906
		val = I915_READ(WM0_PIPEB_ILK);
-
 
1907
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-
 
1908
		I915_WRITE(WM0_PIPEB_ILK, val |
-
 
1909
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-
 
1910
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-
 
1911
			      " plane %d, cursor: %d\n",
-
 
1912
			      plane_wm, cursor_wm);
-
 
1913
		enabled |= 1 << PIPE_B;
-
 
1914
	}
-
 
1915
 
-
 
1916
	/*
-
 
1917
	 * Calculate and update the self-refresh watermark only when one
-
 
1918
	 * display plane is used.
-
 
1919
	 *
-
 
1920
	 * SNB support 3 levels of watermark.
-
 
1921
	 *
-
 
1922
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-
 
1923
	 * and disabled in the descending order
-
 
1924
	 *
-
 
1925
	 */
-
 
1926
	I915_WRITE(WM3_LP_ILK, 0);
-
 
1927
	I915_WRITE(WM2_LP_ILK, 0);
-
 
1928
	I915_WRITE(WM1_LP_ILK, 0);
-
 
1929
 
-
 
1930
	if (!single_plane_enabled(enabled) ||
-
 
1931
	    dev_priv->sprite_scaling_enabled)
-
 
1932
		return;
-
 
1933
	enabled = ffs(enabled) - 1;
-
 
1934
 
-
 
1935
	/* WM1 */
-
 
1936
	if (!ironlake_compute_srwm(dev, 1, enabled,
-
 
1937
				   dev_priv->wm.pri_latency[1] * 500,
-
 
1938
				   &sandybridge_display_srwm_info,
-
 
1939
				   &sandybridge_cursor_srwm_info,
-
 
1940
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
1941
		return;
-
 
1942
 
-
 
1943
	I915_WRITE(WM1_LP_ILK,
-
 
1944
		   WM1_LP_SR_EN |
-
 
1945
		   (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-
 
1946
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
1947
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
1948
		   cursor_wm);
-
 
1949
 
-
 
1950
	/* WM2 */
-
 
1951
	if (!ironlake_compute_srwm(dev, 2, enabled,
-
 
1952
				   dev_priv->wm.pri_latency[2] * 500,
-
 
1953
				   &sandybridge_display_srwm_info,
-
 
1954
				   &sandybridge_cursor_srwm_info,
-
 
1955
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
1956
		return;
-
 
1957
 
-
 
1958
	I915_WRITE(WM2_LP_ILK,
-
 
1959
		   WM2_LP_EN |
-
 
1960
		   (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-
 
1961
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
1962
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
1963
		   cursor_wm);
-
 
1964
 
-
 
1965
	/* WM3 */
-
 
1966
	if (!ironlake_compute_srwm(dev, 3, enabled,
-
 
1967
				   dev_priv->wm.pri_latency[3] * 500,
-
 
1968
				   &sandybridge_display_srwm_info,
-
 
1969
				   &sandybridge_cursor_srwm_info,
-
 
1970
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
1971
		return;
-
 
1972
 
-
 
1973
	I915_WRITE(WM3_LP_ILK,
-
 
1974
		   WM3_LP_EN |
-
 
1975
		   (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
-
 
1976
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
1977
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
1978
		   cursor_wm);
-
 
1979
}
-
 
1980
 
-
 
1981
static void ivybridge_update_wm(struct drm_device *dev)
-
 
1982
{
-
 
1983
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1984
	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
-
 
1985
	u32 val;
-
 
1986
	int fbc_wm, plane_wm, cursor_wm;
-
 
1987
	int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
-
 
1988
	unsigned int enabled;
-
 
1989
 
-
 
1990
	enabled = 0;
-
 
1991
	if (g4x_compute_wm0(dev, PIPE_A,
-
 
1992
			    &sandybridge_display_wm_info, latency,
-
 
1993
			    &sandybridge_cursor_wm_info, latency,
-
 
1994
			    &plane_wm, &cursor_wm)) {
-
 
1995
		val = I915_READ(WM0_PIPEA_ILK);
-
 
1996
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-
 
1997
		I915_WRITE(WM0_PIPEA_ILK, val |
-
 
1998
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-
 
1999
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-
 
2000
			      " plane %d, " "cursor: %d\n",
-
 
2001
			      plane_wm, cursor_wm);
-
 
2002
		enabled |= 1 << PIPE_A;
-
 
2003
	}
-
 
2004
 
-
 
2005
	if (g4x_compute_wm0(dev, PIPE_B,
-
 
2006
			    &sandybridge_display_wm_info, latency,
-
 
2007
			    &sandybridge_cursor_wm_info, latency,
-
 
2008
			    &plane_wm, &cursor_wm)) {
-
 
2009
		val = I915_READ(WM0_PIPEB_ILK);
-
 
2010
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-
 
2011
		I915_WRITE(WM0_PIPEB_ILK, val |
-
 
2012
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-
 
2013
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-
 
2014
			      " plane %d, cursor: %d\n",
-
 
2015
			      plane_wm, cursor_wm);
-
 
2016
		enabled |= 1 << PIPE_B;
-
 
2017
	}
-
 
2018
 
-
 
2019
	if (g4x_compute_wm0(dev, PIPE_C,
-
 
2020
			    &sandybridge_display_wm_info, latency,
-
 
2021
			    &sandybridge_cursor_wm_info, latency,
-
 
2022
			    &plane_wm, &cursor_wm)) {
-
 
2023
		val = I915_READ(WM0_PIPEC_IVB);
-
 
2024
		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-
 
2025
		I915_WRITE(WM0_PIPEC_IVB, val |
-
 
2026
			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-
 
2027
		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
-
 
2028
			      " plane %d, cursor: %d\n",
-
 
2029
			      plane_wm, cursor_wm);
-
 
2030
		enabled |= 1 << PIPE_C;
-
 
2031
	}
-
 
2032
 
-
 
2033
	/*
-
 
2034
	 * Calculate and update the self-refresh watermark only when one
-
 
2035
	 * display plane is used.
-
 
2036
	 *
-
 
2037
	 * SNB support 3 levels of watermark.
-
 
2038
	 *
-
 
2039
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-
 
2040
	 * and disabled in the descending order
-
 
2041
	 *
-
 
2042
	 */
-
 
2043
	I915_WRITE(WM3_LP_ILK, 0);
-
 
2044
	I915_WRITE(WM2_LP_ILK, 0);
-
 
2045
	I915_WRITE(WM1_LP_ILK, 0);
-
 
2046
 
-
 
2047
	if (!single_plane_enabled(enabled) ||
-
 
2048
	    dev_priv->sprite_scaling_enabled)
-
 
2049
		return;
-
 
2050
	enabled = ffs(enabled) - 1;
-
 
2051
 
-
 
2052
	/* WM1 */
-
 
2053
	if (!ironlake_compute_srwm(dev, 1, enabled,
-
 
2054
				   dev_priv->wm.pri_latency[1] * 500,
-
 
2055
				   &sandybridge_display_srwm_info,
-
 
2056
				   &sandybridge_cursor_srwm_info,
-
 
2057
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
2058
		return;
-
 
2059
 
-
 
2060
	I915_WRITE(WM1_LP_ILK,
-
 
2061
		   WM1_LP_SR_EN |
-
 
2062
		   (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-
 
2063
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
2064
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
2065
		   cursor_wm);
-
 
2066
 
-
 
2067
	/* WM2 */
-
 
2068
	if (!ironlake_compute_srwm(dev, 2, enabled,
-
 
2069
				   dev_priv->wm.pri_latency[2] * 500,
-
 
2070
				   &sandybridge_display_srwm_info,
-
 
2071
				   &sandybridge_cursor_srwm_info,
-
 
2072
				   &fbc_wm, &plane_wm, &cursor_wm))
-
 
2073
		return;
-
 
2074
 
-
 
2075
	I915_WRITE(WM2_LP_ILK,
-
 
2076
		   WM2_LP_EN |
-
 
2077
		   (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-
 
2078
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
-
 
2079
		   (plane_wm << WM1_LP_SR_SHIFT) |
-
 
2080
		   cursor_wm);
-
 
2081
 
-
 
2082
	/* WM3, note we have to correct the cursor latency */
-
 
2083
	if (!ironlake_compute_srwm(dev, 3, enabled,
-
 
2084
				   dev_priv->wm.pri_latency[3] * 500,
-
 
2085
				   &sandybridge_display_srwm_info,
-
 
2086
				   &sandybridge_cursor_srwm_info,
-
 
2087
				   &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
-
 
2088
	    !ironlake_compute_srwm(dev, 3, enabled,
-
 
2089
				   dev_priv->wm.cur_latency[3] * 500,
-
 
2090
				   &sandybridge_display_srwm_info,
-
 
2091
				   &sandybridge_cursor_srwm_info,
-
 
2092
				   &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
-
 
2093
		return;
-
 
2094
 
-
 
2095
	I915_WRITE(WM3_LP_ILK,
-
 
2096
		   WM3_LP_EN |
-
 
2097
		   (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
1648
 
2098
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
1649
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2099
		   (plane_wm << WM1_LP_SR_SHIFT) |
1650
 
2100
		   cursor_wm);
1651
	I915_WRITE(FW_BLC, fwater_lo);
2101
}
1652
}
Line 2102... Line 1653...
2102
 
1653
 
Line 2103... Line 1654...
2103
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1654
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2104
				      struct drm_crtc *crtc)
1655
				      struct drm_crtc *crtc)
Line 2105... Line 1656...
2105
{
1656
{
2106
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1657
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2107
	uint32_t pixel_rate;
1658
	uint32_t pixel_rate;
Line 2108... Line 1659...
2108
 
1659
 
2109
	pixel_rate = intel_crtc->config.adjusted_mode.clock;
1660
	pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2110
 
1661
 
2111
	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1662
	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2112
	 * adjust the pixel_rate here. */
1663
	 * adjust the pixel_rate here. */
2113
 
1664
 
2114
	if (intel_crtc->config.pch_pfit.enabled) {
1665
	if (intel_crtc->config.pch_pfit.enabled) {
Line 2166... Line 1717...
2166
			   uint8_t bytes_per_pixel)
1717
			   uint8_t bytes_per_pixel)
2167
{
1718
{
2168
	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1719
	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2169
}
1720
}
Line 2170... Line 1721...
2170
 
1721
 
2171
struct hsw_pipe_wm_parameters {
1722
struct ilk_pipe_wm_parameters {
2172
	bool active;
1723
	bool active;
2173
	uint32_t pipe_htotal;
1724
	uint32_t pipe_htotal;
2174
	uint32_t pixel_rate;
1725
	uint32_t pixel_rate;
2175
	struct intel_plane_wm_parameters pri;
1726
	struct intel_plane_wm_parameters pri;
2176
	struct intel_plane_wm_parameters spr;
1727
	struct intel_plane_wm_parameters spr;
2177
	struct intel_plane_wm_parameters cur;
1728
	struct intel_plane_wm_parameters cur;
Line 2178... Line 1729...
2178
};
1729
};
2179
 
1730
 
2180
struct hsw_wm_maximums {
1731
struct ilk_wm_maximums {
2181
	uint16_t pri;
1732
	uint16_t pri;
2182
	uint16_t spr;
1733
	uint16_t spr;
2183
	uint16_t cur;
1734
	uint16_t cur;
Line 2184... Line -...
2184
	uint16_t fbc;
-
 
2185
};
-
 
2186
 
-
 
2187
struct hsw_wm_values {
-
 
2188
	uint32_t wm_pipe[3];
-
 
2189
	uint32_t wm_lp[3];
-
 
2190
	uint32_t wm_lp_spr[3];
-
 
2191
	uint32_t wm_linetime[3];
-
 
2192
	bool enable_fbc_wm;
1735
	uint16_t fbc;
2193
};
1736
};
2194
 
1737
 
2195
/* used in computing the new watermarks state */
1738
/* used in computing the new watermarks state */
2196
struct intel_wm_config {
1739
struct intel_wm_config {
2197
	unsigned int num_pipes_active;
-
 
2198
	bool sprites_enabled;
1740
	unsigned int num_pipes_active;
Line 2199... Line 1741...
2199
	bool sprites_scaled;
1741
	bool sprites_enabled;
2200
	bool fbc_wm_enabled;
1742
	bool sprites_scaled;
2201
};
1743
};
2202
 
1744
 
2203
/*
1745
/*
2204
 * For both WM_PIPE and WM_LP.
1746
 * For both WM_PIPE and WM_LP.
2205
 * mem_value must be in 0.1us units.
1747
 * mem_value must be in 0.1us units.
2206
 */
1748
 */
2207
static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
1749
static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
Line 2231... Line 1773...
2231
 
1773
 
2232
/*
1774
/*
2233
 * For both WM_PIPE and WM_LP.
1775
 * For both WM_PIPE and WM_LP.
2234
 * mem_value must be in 0.1us units.
1776
 * mem_value must be in 0.1us units.
2235
 */
1777
 */
2236
static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
1778
static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
2237
				   uint32_t mem_value)
1779
				   uint32_t mem_value)
2238
{
1780
{
Line 2239... Line 1781...
2239
	uint32_t method1, method2;
1781
	uint32_t method1, method2;
Line 2254... Line 1796...
2254
 
1796
 
2255
/*
1797
/*
2256
 * For both WM_PIPE and WM_LP.
1798
 * For both WM_PIPE and WM_LP.
2257
 * mem_value must be in 0.1us units.
1799
 * mem_value must be in 0.1us units.
2258
 */
1800
 */
2259
static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
1801
static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
2260
				   uint32_t mem_value)
1802
				   uint32_t mem_value)
2261
{
1803
{
2262
	if (!params->active || !params->cur.enabled)
1804
	if (!params->active || !params->cur.enabled)
Line 2268... Line 1810...
2268
			      params->cur.bytes_per_pixel,
1810
			      params->cur.bytes_per_pixel,
2269
			      mem_value);
1811
			      mem_value);
2270
}
1812
}
Line 2271... Line 1813...
2271
 
1813
 
2272
/* Only for WM_LP. */
1814
/* Only for WM_LP. */
2273
static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
1815
static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
2274
				   uint32_t pri_val)
1816
				   uint32_t pri_val)
2275
{
1817
{
2276
	if (!params->active || !params->pri.enabled)
1818
	if (!params->active || !params->pri.enabled)
Line 2281... Line 1823...
2281
			  params->pri.bytes_per_pixel);
1823
			  params->pri.bytes_per_pixel);
2282
}
1824
}
Line 2283... Line 1825...
2283
 
1825
 
2284
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1826
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2285
{
1827
{
-
 
1828
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1829
		return 3072;
2286
	if (INTEL_INFO(dev)->gen >= 7)
1830
	else if (INTEL_INFO(dev)->gen >= 7)
2287
		return 768;
1831
		return 768;
2288
	else
1832
	else
2289
		return 512;
1833
		return 512;
Line 2326... Line 1870...
2326
			fifo_size /= 2;
1870
			fifo_size /= 2;
2327
		}
1871
		}
2328
	}
1872
	}
Line 2329... Line 1873...
2329
 
1873
 
2330
	/* clamp to max that the registers can hold */
1874
	/* clamp to max that the registers can hold */
-
 
1875
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1876
		max = level == 0 ? 255 : 2047;
2331
	if (INTEL_INFO(dev)->gen >= 7)
1877
	else if (INTEL_INFO(dev)->gen >= 7)
2332
		/* IVB/HSW primary/sprite plane watermarks */
1878
		/* IVB/HSW primary/sprite plane watermarks */
2333
		max = level == 0 ? 127 : 1023;
1879
		max = level == 0 ? 127 : 1023;
2334
	else if (!is_sprite)
1880
	else if (!is_sprite)
2335
		/* ILK/SNB primary plane watermarks */
1881
		/* ILK/SNB primary plane watermarks */
Line 2356... Line 1902...
2356
	else
1902
	else
2357
		return level == 0 ? 31 : 63;
1903
		return level == 0 ? 31 : 63;
2358
}
1904
}
Line 2359... Line 1905...
2359
 
1905
 
2360
/* Calculate the maximum FBC watermark */
1906
/* Calculate the maximum FBC watermark */
2361
static unsigned int ilk_fbc_wm_max(void)
1907
static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
2362
{
1908
{
-
 
1909
	/* max that registers can hold */
-
 
1910
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1911
		return 31;
2363
	/* max that registers can hold */
1912
	else
2364
	return 15;
1913
	return 15;
Line 2365... Line 1914...
2365
}
1914
}
2366
 
1915
 
2367
static void ilk_wm_max(struct drm_device *dev,
1916
static void ilk_compute_wm_maximums(struct drm_device *dev,
2368
		       int level,
1917
		       int level,
2369
		       const struct intel_wm_config *config,
1918
		       const struct intel_wm_config *config,
2370
		       enum intel_ddb_partitioning ddb_partitioning,
1919
		       enum intel_ddb_partitioning ddb_partitioning,
2371
		       struct hsw_wm_maximums *max)
1920
				    struct ilk_wm_maximums *max)
2372
{
1921
{
2373
	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1922
	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2374
	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1923
	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2375
	max->cur = ilk_cursor_wm_max(dev, level, config);
1924
	max->cur = ilk_cursor_wm_max(dev, level, config);
Line 2376... Line 1925...
2376
	max->fbc = ilk_fbc_wm_max();
1925
	max->fbc = ilk_fbc_wm_max(dev);
2377
}
1926
}
2378
 
1927
 
2379
static bool ilk_check_wm(int level,
1928
static bool ilk_validate_wm_level(int level,
2380
			 const struct hsw_wm_maximums *max,
1929
				  const struct ilk_wm_maximums *max,
Line 2381... Line 1930...
2381
			 struct intel_wm_level *result)
1930
			 struct intel_wm_level *result)
Line 2412... Line 1961...
2412
		result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1961
		result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2413
		result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1962
		result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2414
		result->enable = true;
1963
		result->enable = true;
2415
	}
1964
	}
Line 2416... Line -...
2416
 
-
 
2417
	DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
 
2418
 
1965
 
2419
	return ret;
1966
	return ret;
Line 2420... Line 1967...
2420
}
1967
}
2421
 
1968
 
2422
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
1969
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2423
				 int level,
1970
				 int level,
2424
				 struct hsw_pipe_wm_parameters *p,
1971
				 const struct ilk_pipe_wm_parameters *p,
2425
				 struct intel_wm_level *result)
1972
				 struct intel_wm_level *result)
2426
{
1973
{
2427
	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1974
	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
Line 2440... Line 1987...
2440
	result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1987
	result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2441
	result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1988
	result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2442
	result->enable = true;
1989
	result->enable = true;
2443
}
1990
}
Line 2444... Line -...
2444
 
-
 
2445
static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
-
 
2446
			      int level, struct hsw_wm_maximums *max,
-
 
2447
			      struct hsw_pipe_wm_parameters *params,
-
 
2448
			      struct intel_wm_level *result)
-
 
2449
{
-
 
2450
	enum pipe pipe;
-
 
2451
	struct intel_wm_level res[3];
-
 
2452
 
-
 
2453
	for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
-
 
2454
		ilk_compute_wm_level(dev_priv, level, ¶ms[pipe], &res[pipe]);
-
 
2455
 
-
 
2456
	result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
-
 
2457
	result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
-
 
2458
	result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
-
 
2459
	result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
-
 
2460
	result->enable = true;
-
 
2461
 
-
 
2462
	return ilk_check_wm(level, max, result);
-
 
2463
}
-
 
2464
 
-
 
2465
static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
-
 
2466
				    enum pipe pipe,
-
 
2467
				    struct hsw_pipe_wm_parameters *params)
-
 
2468
{
-
 
2469
	uint32_t pri_val, cur_val, spr_val;
-
 
2470
	/* WM0 latency values stored in 0.1us units */
-
 
2471
	uint16_t pri_latency = dev_priv->wm.pri_latency[0];
-
 
2472
	uint16_t spr_latency = dev_priv->wm.spr_latency[0];
-
 
2473
	uint16_t cur_latency = dev_priv->wm.cur_latency[0];
-
 
2474
 
-
 
2475
	pri_val = ilk_compute_pri_wm(params, pri_latency, false);
-
 
2476
	spr_val = ilk_compute_spr_wm(params, spr_latency);
-
 
2477
	cur_val = ilk_compute_cur_wm(params, cur_latency);
-
 
2478
 
-
 
2479
	WARN(pri_val > 127,
-
 
2480
	     "Primary WM error, mode not supported for pipe %c\n",
-
 
2481
	     pipe_name(pipe));
-
 
2482
	WARN(spr_val > 127,
-
 
2483
	     "Sprite WM error, mode not supported for pipe %c\n",
-
 
2484
	     pipe_name(pipe));
-
 
2485
	WARN(cur_val > 63,
-
 
2486
	     "Cursor WM error, mode not supported for pipe %c\n",
-
 
2487
	     pipe_name(pipe));
-
 
2488
 
-
 
2489
	return (pri_val << WM0_PIPE_PLANE_SHIFT) |
-
 
2490
	       (spr_val << WM0_PIPE_SPRITE_SHIFT) |
-
 
2491
	       cur_val;
-
 
2492
}
-
 
2493
 
1991
 
2494
static uint32_t
1992
static uint32_t
2495
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1993
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2496
{
1994
{
2497
	struct drm_i915_private *dev_priv = dev->dev_private;
1995
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 2503... Line 2001...
2503
		return 0;
2001
		return 0;
Line 2504... Line 2002...
2504
 
2002
 
2505
	/* The WM are computed with base on how long it takes to fill a single
2003
	/* The WM are computed with base on how long it takes to fill a single
2506
	 * row at the given clock rate, multiplied by 8.
2004
	 * row at the given clock rate, multiplied by 8.
2507
	 * */
2005
	 * */
-
 
2006
	linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2508
	linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2007
				     mode->crtc_clock);
2509
	ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2008
	ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
Line 2510... Line 2009...
2510
					 intel_ddi_get_cdclk_freq(dev_priv));
2009
					 intel_ddi_get_cdclk_freq(dev_priv));
2511
 
2010
 
2512
	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2011
	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
Line 2513... Line 2012...
2513
	       PIPE_WM_LINETIME_TIME(linetime);
2012
	       PIPE_WM_LINETIME_TIME(linetime);
2514
}
2013
}
2515
 
2014
 
Line 2516... Line 2015...
2516
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2015
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2517
{
2016
{
Line 2518... Line 2017...
2518
	struct drm_i915_private *dev_priv = dev->dev_private;
2017
	struct drm_i915_private *dev_priv = dev->dev_private;
2519
 
2018
 
2520
	if (IS_HASWELL(dev)) {
2019
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Line 2560... Line 2059...
2560
	/* WaDoubleCursorLP3Latency:ivb */
2059
	/* WaDoubleCursorLP3Latency:ivb */
2561
	if (IS_IVYBRIDGE(dev))
2060
	if (IS_IVYBRIDGE(dev))
2562
		wm[3] *= 2;
2061
		wm[3] *= 2;
2563
}
2062
}
Line 2564... Line 2063...
2564
 
2063
 
2565
static void intel_print_wm_latency(struct drm_device *dev,
-
 
2566
				   const char *name,
-
 
2567
				   const uint16_t wm[5])
2064
static int ilk_wm_max_level(const struct drm_device *dev)
2568
{
-
 
2569
	int level, max_level;
-
 
2570
 
2065
{
2571
	/* how many WM levels are we expecting */
2066
	/* how many WM levels are we expecting */
2572
	if (IS_HASWELL(dev))
2067
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2573
		max_level = 4;
2068
		return 4;
2574
	else if (INTEL_INFO(dev)->gen >= 6)
2069
	else if (INTEL_INFO(dev)->gen >= 6)
2575
		max_level = 3;
2070
		return 3;
2576
	else
2071
	else
-
 
2072
		return 2;
-
 
2073
}
-
 
2074
 
-
 
2075
static void intel_print_wm_latency(struct drm_device *dev,
-
 
2076
				   const char *name,
-
 
2077
				   const uint16_t wm[5])
-
 
2078
{
Line 2577... Line 2079...
2577
		max_level = 2;
2079
	int level, max_level = ilk_wm_max_level(dev);
2578
 
2080
 
Line 2579... Line 2081...
2579
	for (level = 0; level <= max_level; level++) {
2081
	for (level = 0; level <= max_level; level++) {
Line 2612... Line 2114...
2612
	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2114
	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2613
	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2115
	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2614
	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2116
	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2615
}
2117
}
Line 2616... Line 2118...
2616
 
2118
 
2617
static void hsw_compute_wm_parameters(struct drm_device *dev,
2119
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2618
				      struct hsw_pipe_wm_parameters *params,
-
 
2619
				      struct hsw_wm_maximums *lp_max_1_2,
2120
				      struct ilk_pipe_wm_parameters *p,
2620
				      struct hsw_wm_maximums *lp_max_5_6)
2121
				      struct intel_wm_config *config)
2621
{
2122
{
2622
	struct drm_crtc *crtc;
-
 
2623
	struct drm_plane *plane;
-
 
2624
	enum pipe pipe;
-
 
2625
	struct intel_wm_config config = {};
-
 
2626
 
-
 
2627
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2123
	struct drm_device *dev = crtc->dev;
2628
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
2629
		struct hsw_pipe_wm_parameters *p;
-
 
2630
 
2124
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2631
		pipe = intel_crtc->pipe;
2125
	enum pipe pipe = intel_crtc->pipe;
Line 2632... Line 2126...
2632
		p = ¶ms[pipe];
2126
	struct drm_plane *plane;
2633
 
2127
 
2634
		p->active = intel_crtc_active(crtc);
-
 
2635
		if (!p->active)
-
 
2636
			continue;
-
 
2637
 
-
 
2638
		config.num_pipes_active++;
2128
		p->active = intel_crtc_active(crtc);
2639
 
2129
	if (p->active) {
2640
		p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2130
		p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2641
		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2131
		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2642
		p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2132
		p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2643
		p->cur.bytes_per_pixel = 4;
-
 
2644
		p->pri.horiz_pixels =
2133
		p->cur.bytes_per_pixel = 4;
2645
			intel_crtc->config.requested_mode.hdisplay;
2134
		p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2646
		p->cur.horiz_pixels = 64;
2135
		p->cur.horiz_pixels = 64;
2647
		/* TODO: for now, assume primary and cursor planes are always enabled. */
2136
		/* TODO: for now, assume primary and cursor planes are always enabled. */
2648
		p->pri.enabled = true;
2137
		p->pri.enabled = true;
Line -... Line 2138...
-
 
2138
		p->cur.enabled = true;
-
 
2139
	}
-
 
2140
 
2649
		p->cur.enabled = true;
2141
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2650
	}
2142
		config->num_pipes_active += intel_crtc_active(crtc);
2651
 
-
 
2652
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-
 
2653
		struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
2654
		struct hsw_pipe_wm_parameters *p;
-
 
Line -... Line 2143...
-
 
2143
 
2655
 
2144
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
Line 2656... Line 2145...
2656
		pipe = intel_plane->pipe;
2145
		struct intel_plane *intel_plane = to_intel_plane(plane);
2657
		p = ¶ms[pipe];
2146
 
-
 
2147
		if (intel_plane->pipe == pipe)
2658
 
2148
		p->spr = intel_plane->wm;
Line -... Line 2149...
-
 
2149
 
-
 
2150
		config->sprites_enabled |= intel_plane->wm.enabled;
-
 
2151
		config->sprites_scaled |= intel_plane->wm.scaled;
-
 
2152
	}
-
 
2153
}
-
 
2154
 
-
 
2155
/* Compute new watermarks for the pipe */
-
 
2156
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
-
 
2157
				  const struct ilk_pipe_wm_parameters *params,
-
 
2158
				  struct intel_pipe_wm *pipe_wm)
-
 
2159
{
-
 
2160
	struct drm_device *dev = crtc->dev;
-
 
2161
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2162
	int level, max_level = ilk_wm_max_level(dev);
-
 
2163
	/* LP0 watermark maximums depend on this pipe alone */
-
 
2164
	struct intel_wm_config config = {
-
 
2165
		.num_pipes_active = 1,
2659
		p->spr = intel_plane->wm;
2166
		.sprites_enabled = params->spr.enabled,
Line 2660... Line 2167...
2660
 
2167
		.sprites_scaled = params->spr.scaled,
2661
		config.sprites_enabled |= p->spr.enabled;
2168
	};
-
 
2169
	struct ilk_wm_maximums max;
-
 
2170
 
-
 
2171
	/* LP0 watermarks always use 1/2 DDB partitioning */
-
 
2172
	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
-
 
2173
 
-
 
2174
	/* ILK/SNB: LP2+ watermarks only w/o sprites */
-
 
2175
	if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
-
 
2176
		max_level = 1;
-
 
2177
 
-
 
2178
	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
-
 
2179
	if (params->spr.scaled)
2662
		config.sprites_scaled |= p->spr.scaled;
2180
		max_level = 0;
2663
	}
2181
 
2664
 
2182
	for (level = 0; level <= max_level; level++)
-
 
2183
		ilk_compute_wm_level(dev_priv, level, params,
2665
	ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
2184
				     &pipe_wm->wm[level]);
Line -... Line 2185...
-
 
2185
 
2666
 
2186
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
2187
		pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2667
	/* 5/6 split only in single pipe config on IVB+ */
2188
 
2668
	if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
2189
	/* At least LP0 must be valid */
2669
		ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
2190
	return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2670
	else
2191
}
2671
		*lp_max_5_6 = *lp_max_1_2;
-
 
2672
}
2192
 
2673
 
-
 
2674
static void hsw_compute_wm_results(struct drm_device *dev,
-
 
2675
				   struct hsw_pipe_wm_parameters *params,
-
 
Line 2676... Line 2193...
2676
				   struct hsw_wm_maximums *lp_maximums,
2193
/*
2677
				   struct hsw_wm_values *results)
2194
 * Merge the watermarks from all active pipes for a specific level.
2678
{
-
 
2679
	struct drm_i915_private *dev_priv = dev->dev_private;
2195
 */
2680
	struct drm_crtc *crtc;
-
 
2681
	struct intel_wm_level lp_results[4] = {};
-
 
Line -... Line 2196...
-
 
2196
static void ilk_merge_wm_level(struct drm_device *dev,
-
 
2197
			       int level,
-
 
2198
			       struct intel_wm_level *ret_wm)
-
 
2199
{
2682
	enum pipe pipe;
2200
	const struct intel_crtc *intel_crtc;
-
 
2201
 
-
 
2202
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
-
 
2203
		const struct intel_wm_level *wm =
Line -... Line 2204...
-
 
2204
			&intel_crtc->wm.active.wm[level];
-
 
2205
 
-
 
2206
		if (!wm->enable)
-
 
2207
			return;
2683
	int level, max_level, wm_lp;
2208
 
-
 
2209
		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
-
 
2210
		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
-
 
2211
		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2684
 
2212
		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2685
	for (level = 1; level <= 4; level++)
2213
	}
-
 
2214
 
-
 
2215
	ret_wm->enable = true;
-
 
2216
}
-
 
2217
 
-
 
2218
/*
-
 
2219
 * Merge all low power watermarks for all active pipes.
-
 
2220
 */
-
 
2221
static void ilk_wm_merge(struct drm_device *dev,
-
 
2222
			 const struct intel_wm_config *config,
-
 
2223
			 const struct ilk_wm_maximums *max,
-
 
2224
			 struct intel_pipe_wm *merged)
-
 
2225
{
2686
		if (!hsw_compute_lp_wm(dev_priv, level,
2226
	int level, max_level = ilk_wm_max_level(dev);
-
 
2227
 
-
 
2228
	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
-
 
2229
	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
-
 
2230
	    config->num_pipes_active > 1)
-
 
2231
		return;
-
 
2232
 
-
 
2233
	/* ILK: FBC WM must be disabled always */
-
 
2234
	merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
-
 
2235
 
-
 
2236
	/* merge each WM1+ level */
-
 
2237
	for (level = 1; level <= max_level; level++) {
2687
				       lp_maximums, params,
2238
		struct intel_wm_level *wm = &merged->wm[level];
2688
				       &lp_results[level - 1]))
2239
 
2689
			break;
2240
		ilk_merge_wm_level(dev, level, wm);
-
 
2241
 
-
 
2242
		if (!ilk_validate_wm_level(level, max, wm))
-
 
2243
			break;
-
 
2244
 
-
 
2245
		/*
-
 
2246
		 * The spec says it is preferred to disable
-
 
2247
		 * FBC WMs instead of disabling a WM level.
-
 
2248
		 */
-
 
2249
		if (wm->fbc_val > max->fbc) {
-
 
2250
			merged->fbc_wm_enabled = false;
-
 
2251
			wm->fbc_val = 0;
-
 
2252
		}
-
 
2253
	}
-
 
2254
 
2690
	max_level = level - 1;
2255
	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2691
 
2256
	/*
-
 
2257
	 * FIXME this is racy. FBC might get enabled later.
-
 
2258
	 * What we should check here is whether FBC can be
-
 
2259
	 * enabled sometime later.
-
 
2260
	 */
-
 
2261
	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
-
 
2262
		for (level = 2; level <= max_level; level++) {
-
 
2263
			struct intel_wm_level *wm = &merged->wm[level];
Line -... Line 2264...
-
 
2264
 
-
 
2265
			wm->enable = false;
-
 
2266
		}
-
 
2267
	}
-
 
2268
}
-
 
2269
 
-
 
2270
static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
-
 
2271
{
-
 
2272
	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
-
 
2273
	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
-
 
2274
}
-
 
2275
 
-
 
2276
/* The value we need to program into the WM_LPx latency field */
-
 
2277
static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
-
 
2278
{
-
 
2279
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2280
 
-
 
2281
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
2282
		return 2 * level;
-
 
2283
	else
-
 
2284
		return dev_priv->wm.pri_latency[level];
-
 
2285
}
-
 
2286
 
2692
	memset(results, 0, sizeof(*results));
2287
static void ilk_compute_wm_results(struct drm_device *dev,
2693
 
2288
				   const struct intel_pipe_wm *merged,
Line 2694... Line 2289...
2694
	/* The spec says it is preferred to disable FBC WMs instead of disabling
2289
				   enum intel_ddb_partitioning partitioning,
-
 
2290
				   struct ilk_wm_values *results)
2695
	 * a WM level. */
2291
{
-
 
2292
	struct intel_crtc *intel_crtc;
2696
	results->enable_fbc_wm = true;
2293
	int level, wm_lp;
Line 2697... Line 2294...
2697
	for (level = 1; level <= max_level; level++) {
2294
 
2698
		if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
2295
	results->enable_fbc_wm = merged->fbc_wm_enabled;
-
 
2296
	results->partitioning = partitioning;
2699
			results->enable_fbc_wm = false;
2297
 
-
 
2298
	/* LP1+ register values */
-
 
2299
	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
-
 
2300
		const struct intel_wm_level *r;
-
 
2301
 
-
 
2302
		level = ilk_wm_lp_to_level(wm_lp, merged);
2700
			lp_results[level - 1].fbc_val = 0;
2303
 
-
 
2304
		r = &merged->wm[level];
-
 
2305
		if (!r->enable)
-
 
2306
			break;
2701
		}
2307
 
-
 
2308
		results->wm_lp[wm_lp - 1] = WM3_LP_EN |
-
 
2309
			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2702
	}
2310
			(r->pri_val << WM1_LP_SR_SHIFT) |
2703
 
2311
			r->cur_val;
Line 2704... Line 2312...
2704
	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2312
 
-
 
2313
		if (INTEL_INFO(dev)->gen >= 8)
2705
		const struct intel_wm_level *r;
2314
			results->wm_lp[wm_lp - 1] |=
-
 
2315
				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2706
 
2316
		else
Line -... Line 2317...
-
 
2317
			results->wm_lp[wm_lp - 1] |=
-
 
2318
				r->fbc_val << WM1_LP_FBC_SHIFT;
-
 
2319
 
-
 
2320
		if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
-
 
2321
			WARN_ON(wm_lp != 1);
2707
		level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2322
			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2708
		if (level > max_level)
2323
		} else
2709
			break;
2324
		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
-
 
2325
	}
2710
 
2326
 
2711
		r = &lp_results[level - 1];
2327
	/* LP0 register values */
Line 2712... Line 2328...
2712
		results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2328
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2713
							  r->fbc_val,
2329
		enum pipe pipe = intel_crtc->pipe;
2714
							  r->pri_val,
2330
		const struct intel_wm_level *r =
-
 
2331
			&intel_crtc->wm.active.wm[0];
2715
							  r->cur_val);
2332
 
2716
		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2333
		if (WARN_ON(!r->enable))
-
 
2334
			continue;
2717
	}
2335
 
Line 2718... Line 2336...
2718
 
2336
		results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2719
	for_each_pipe(pipe)
2337
 
2720
		results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
2338
		results->wm_pipe[pipe] =
2721
							     ¶ms[pipe]);
2339
			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2722
 
2340
			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2723
	for_each_pipe(pipe) {
2341
			r->cur_val;
Line 2724... Line 2342...
2724
		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2342
	}
2725
		results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2343
}
2726
	}
2344
 
2727
}
2345
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2728
 
2346
 * case both are at the same level. Prefer r1 in case they're the same. */
2729
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2347
static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2730
 * case both are at the same level. Prefer r1 in case they're the same. */
2348
						  struct intel_pipe_wm *r1,
2731
static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2349
						  struct intel_pipe_wm *r2)
2732
					   struct hsw_wm_values *r2)
2350
{
2733
{
2351
	int level, max_level = ilk_wm_max_level(dev);
2734
	int i, val_r1 = 0, val_r2 = 0;
2352
	int level1 = 0, level2 = 0;
Line -... Line 2353...
-
 
2353
 
-
 
2354
	for (level = 1; level <= max_level; level++) {
-
 
2355
		if (r1->wm[level].enable)
-
 
2356
			level1 = level;
-
 
2357
		if (r2->wm[level].enable)
-
 
2358
			level2 = level;
-
 
2359
	}
-
 
2360
 
-
 
2361
	if (level1 == level2) {
-
 
2362
		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
-
 
2363
			return r2;
-
 
2364
		else
-
 
2365
			return r1;
-
 
2366
	} else if (level1 > level2) {
-
 
2367
		return r1;
-
 
2368
	} else {
-
 
2369
		return r2;
-
 
2370
	}
-
 
2371
}
-
 
2372
 
-
 
2373
/* dirty bits used to track which watermarks need changes */
-
 
2374
#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-
 
2375
#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
-
 
2376
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
-
 
2377
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
-
 
2378
#define WM_DIRTY_FBC (1 << 24)
-
 
2379
#define WM_DIRTY_DDB (1 << 25)
-
 
2380
 
-
 
2381
static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
-
 
2382
					 const struct ilk_wm_values *old,
-
 
2383
					 const struct ilk_wm_values *new)
-
 
2384
{
-
 
2385
	unsigned int dirty = 0;
-
 
2386
	enum pipe pipe;
-
 
2387
	int wm_lp;
-
 
2388
 
-
 
2389
	for_each_pipe(pipe) {
-
 
2390
		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
-
 
2391
			dirty |= WM_DIRTY_LINETIME(pipe);
-
 
2392
			/* Must disable LP1+ watermarks too */
-
 
2393
			dirty |= WM_DIRTY_LP_ALL;
-
 
2394
		}
-
 
2395
 
-
 
2396
		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
-
 
2397
			dirty |= WM_DIRTY_PIPE(pipe);
-
 
2398
			/* Must disable LP1+ watermarks too */
-
 
2399
			dirty |= WM_DIRTY_LP_ALL;
-
 
2400
		}
-
 
2401
	}
-
 
2402
 
-
 
2403
	if (old->enable_fbc_wm != new->enable_fbc_wm) {
-
 
2404
		dirty |= WM_DIRTY_FBC;
-
 
2405
		/* Must disable LP1+ watermarks too */
-
 
2406
		dirty |= WM_DIRTY_LP_ALL;
-
 
2407
	}
-
 
2408
 
-
 
2409
	if (old->partitioning != new->partitioning) {
-
 
2410
		dirty |= WM_DIRTY_DDB;
-
 
2411
		/* Must disable LP1+ watermarks too */
-
 
2412
		dirty |= WM_DIRTY_LP_ALL;
-
 
2413
	}
-
 
2414
 
-
 
2415
	/* LP1+ watermarks already deemed dirty, no need to continue */
-
 
2416
	if (dirty & WM_DIRTY_LP_ALL)
-
 
2417
		return dirty;
-
 
2418
 
-
 
2419
	/* Find the lowest numbered LP1+ watermark in need of an update... */
-
 
2420
	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
-
 
2421
		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
-
 
2422
		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
-
 
2423
			break;
-
 
2424
	}
-
 
2425
 
-
 
2426
	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
-
 
2427
	for (; wm_lp <= 3; wm_lp++)
-
 
2428
		dirty |= WM_DIRTY_LP(wm_lp);
-
 
2429
 
-
 
2430
	return dirty;
-
 
2431
}
-
 
2432
 
-
 
2433
static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
-
 
2434
			       unsigned int dirty)
-
 
2435
{
-
 
2436
	struct ilk_wm_values *previous = &dev_priv->wm.hw;
-
 
2437
	bool changed = false;
-
 
2438
 
-
 
2439
	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
-
 
2440
		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
-
 
2441
		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
-
 
2442
		changed = true;
2735
 
2443
	}
2736
	for (i = 0; i < 3; i++) {
2444
	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2737
		if (r1->wm_lp[i] & WM3_LP_EN)
2445
		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2738
			val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2446
		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2739
		if (r2->wm_lp[i] & WM3_LP_EN)
2447
		changed = true;
2740
			val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2448
	}
2741
	}
-
 
2742
 
2449
	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
-
 
2450
		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2743
	if (val_r1 == val_r2) {
2451
		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
-
 
2452
		changed = true;
2744
		if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2453
	}
2745
			return r2;
-
 
2746
		else
-
 
2747
			return r1;
-
 
2748
	} else if (val_r1 > val_r2) {
-
 
2749
		return r1;
-
 
2750
	} else {
-
 
2751
		return r2;
-
 
2752
	}
-
 
2753
}
-
 
2754
 
-
 
2755
/*
-
 
2756
 * The spec says we shouldn't write when we don't need, because every write
-
 
2757
 * causes WMs to be re-evaluated, expending some power.
-
 
2758
	 */
-
 
2759
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-
 
2760
				struct hsw_wm_values *results,
-
 
2761
				enum intel_ddb_partitioning partitioning)
-
 
2762
{
-
 
2763
	struct hsw_wm_values previous;
-
 
2764
	uint32_t val;
-
 
Line 2765... Line 2454...
2765
	enum intel_ddb_partitioning prev_partitioning;
2454
 
2766
	bool prev_enable_fbc_wm;
-
 
2767
 
-
 
2768
	previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2455
	/*
2769
	previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
-
 
2770
	previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
-
 
2771
	previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
-
 
2772
	previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
-
 
2773
	previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
-
 
2774
	previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-
 
2775
	previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2456
	 * Don't touch WM1S_LP_EN here.
Line 2776... Line -...
2776
	previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
-
 
2777
	previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
-
 
2778
	previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
-
 
2779
	previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
 
2780
 
-
 
2781
	prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2457
	 * Doing so could cause underruns.
Line 2782... Line 2458...
2782
				INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2458
	 */
2783
 
2459
 
2784
	prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2460
	return changed;
2785
 
2461
}
2786
	if (memcmp(results->wm_pipe, previous.wm_pipe,
2462
 
2787
		   sizeof(results->wm_pipe)) == 0 &&
2463
/*
Line 2788... Line 2464...
2788
	    memcmp(results->wm_lp, previous.wm_lp,
2464
 * The spec says we shouldn't write when we don't need, because every write
2789
		   sizeof(results->wm_lp)) == 0 &&
2465
 * causes WMs to be re-evaluated, expending some power.
2790
	    memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2466
	 */
2791
		   sizeof(results->wm_lp_spr)) == 0 &&
2467
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2792
	    memcmp(results->wm_linetime, previous.wm_linetime,
2468
				struct ilk_wm_values *results)
2793
		   sizeof(results->wm_linetime)) == 0 &&
2469
{
Line 2794... Line 2470...
2794
	    partitioning == prev_partitioning &&
2470
	struct drm_device *dev = dev_priv->dev;
-
 
2471
	struct ilk_wm_values *previous = &dev_priv->wm.hw;
2795
	    results->enable_fbc_wm == prev_enable_fbc_wm)
2472
	unsigned int dirty;
2796
		return;
2473
	uint32_t val;
2797
 
2474
 
2798
	if (previous.wm_lp[2] != 0)
2475
	dirty = ilk_compute_wm_dirty(dev, previous, results);
2799
		I915_WRITE(WM3_LP_ILK, 0);
2476
	if (!dirty)
2800
	if (previous.wm_lp[1] != 0)
2477
		return;
-
 
2478
 
-
 
2479
	_ilk_disable_lp_wm(dev_priv, dirty);
-
 
2480
 
-
 
2481
	if (dirty & WM_DIRTY_PIPE(PIPE_A))
-
 
2482
		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
-
 
2483
	if (dirty & WM_DIRTY_PIPE(PIPE_B))
-
 
2484
		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
-
 
2485
	if (dirty & WM_DIRTY_PIPE(PIPE_C))
2801
		I915_WRITE(WM2_LP_ILK, 0);
2486
		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
Line 2802... Line 2487...
2802
	if (previous.wm_lp[0] != 0)
2487
 
2803
		I915_WRITE(WM1_LP_ILK, 0);
2488
	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2804
 
2489
		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2805
	if (previous.wm_pipe[0] != results->wm_pipe[0])
2490
	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2806
		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2491
		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2807
	if (previous.wm_pipe[1] != results->wm_pipe[1])
2492
	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2808
		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2493
		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2809
	if (previous.wm_pipe[2] != results->wm_pipe[2])
2494
 
Line -... Line 2495...
-
 
2495
	if (dirty & WM_DIRTY_DDB) {
2810
		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2496
		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2811
 
2497
		val = I915_READ(WM_MISC);
-
 
2498
			if (results->partitioning == INTEL_DDB_PART_1_2)
-
 
2499
			val &= ~WM_MISC_DATA_PARTITION_5_6;
2812
	if (previous.wm_linetime[0] != results->wm_linetime[0])
2500
		else
2813
		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2501
			val |= WM_MISC_DATA_PARTITION_5_6;
2814
	if (previous.wm_linetime[1] != results->wm_linetime[1])
2502
		I915_WRITE(WM_MISC, val);
2815
		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2503
		} else {
-
 
2504
			val = I915_READ(DISP_ARB_CTL2);
Line 2816... Line 2505...
2816
	if (previous.wm_linetime[2] != results->wm_linetime[2])
2505
			if (results->partitioning == INTEL_DDB_PART_1_2)
2817
		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2506
				val &= ~DISP_DATA_PARTITION_5_6;
2818
 
2507
			else
2819
	if (prev_partitioning != partitioning) {
2508
				val |= DISP_DATA_PARTITION_5_6;
2820
		val = I915_READ(WM_MISC);
2509
			I915_WRITE(DISP_ARB_CTL2, val);
2821
		if (partitioning == INTEL_DDB_PART_1_2)
2510
		}
-
 
2511
	}
-
 
2512
 
2822
			val &= ~WM_MISC_DATA_PARTITION_5_6;
2513
	if (dirty & WM_DIRTY_FBC) {
Line 2823... Line 2514...
2823
		else
2514
		val = I915_READ(DISP_ARB_CTL);
2824
			val |= WM_MISC_DATA_PARTITION_5_6;
2515
		if (results->enable_fbc_wm)
2825
		I915_WRITE(WM_MISC, val);
2516
			val &= ~DISP_FBC_WM_DIS;
-
 
2517
		else
-
 
2518
			val |= DISP_FBC_WM_DIS;
-
 
2519
		I915_WRITE(DISP_ARB_CTL, val);
-
 
2520
	}
-
 
2521
 
-
 
2522
	if (dirty & WM_DIRTY_LP(1) &&
-
 
2523
	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
-
 
2524
		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-
 
2525
 
2826
	}
2526
	if (INTEL_INFO(dev)->gen >= 7) {
2827
 
2527
		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2828
	if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2528
		I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2829
		val = I915_READ(DISP_ARB_CTL);
2529
		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
-
 
2530
		I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
-
 
2531
	}
-
 
2532
 
-
 
2533
	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
-
 
2534
		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-
 
2535
	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
-
 
2536
		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-
 
2537
	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
-
 
2538
		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
-
 
2539
 
-
 
2540
	dev_priv->wm.hw = *results;
-
 
2541
}
-
 
2542
 
-
 
2543
static bool ilk_disable_lp_wm(struct drm_device *dev)
-
 
2544
{
Line -... Line 2545...
-
 
2545
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2546
 
-
 
2547
	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
-
 
2548
}
2830
		if (results->enable_fbc_wm)
2549
 
Line 2831... Line -...
2831
			val &= ~DISP_FBC_WM_DIS;
-
 
2832
		else
-
 
2833
			val |= DISP_FBC_WM_DIS;
-
 
2834
		I915_WRITE(DISP_ARB_CTL, val);
-
 
2835
	}
-
 
2836
 
2550
static void ilk_update_wm(struct drm_crtc *crtc)
2837
	if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2551
{
2838
		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2552
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2839
	if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2553
	struct drm_device *dev = crtc->dev;
Line 2840... Line 2554...
2840
		I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2554
	struct drm_i915_private *dev_priv = dev->dev_private;
2841
	if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2555
	struct ilk_wm_maximums max;
Line -... Line 2556...
-
 
2556
	struct ilk_pipe_wm_parameters params = {};
-
 
2557
	struct ilk_wm_values results = {};
2842
		I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2558
	enum intel_ddb_partitioning partitioning;
2843
 
2559
	struct intel_pipe_wm pipe_wm = {};
Line 2844... Line 2560...
2844
	if (results->wm_lp[0] != 0)
2560
	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2845
		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2561
	struct intel_wm_config config = {};
2846
	if (results->wm_lp[1] != 0)
2562
 
2847
		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2563
	ilk_compute_wm_parameters(crtc, ¶ms, &config);
2848
	if (results->wm_lp[2] != 0)
2564
 
-
 
2565
	intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2849
		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2566
 
Line 2850... Line 2567...
2850
}
2567
	if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2851
 
2568
		return;
2852
static void haswell_update_wm(struct drm_device *dev)
2569
 
2853
{
2570
	intel_crtc->wm.active = pipe_wm;
Line 2854... Line -...
2854
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2855
	struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2571
 
2856
	struct hsw_pipe_wm_parameters params[3];
-
 
2857
	struct hsw_wm_values results_1_2, results_5_6, *best_results;
-
 
2858
	enum intel_ddb_partitioning partitioning;
2572
	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2859
 
-
 
2860
	hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
2573
	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2861
 
2574
 
2862
	hsw_compute_wm_results(dev, params,
2575
	/* 5/6 split only in single pipe config on IVB+ */
2863
			       &lp_max_1_2, &results_1_2);
-
 
2864
	if (lp_max_1_2.pri != lp_max_5_6.pri) {
-
 
2865
		hsw_compute_wm_results(dev, params,
-
 
2866
				       &lp_max_5_6, &results_5_6);
-
 
2867
		best_results = hsw_find_best_result(&results_1_2, &results_5_6);
-
 
2868
	} else {
2576
	if (INTEL_INFO(dev)->gen >= 7 &&
2869
		best_results = &results_1_2;
-
 
2870
	}
-
 
2871
 
2577
	    config.num_pipes_active == 1 && config.sprites_enabled) {
2872
	partitioning = (best_results == &results_1_2) ?
-
 
2873
		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
-
 
2874
 
-
 
2875
	hsw_write_wm_values(dev_priv, best_results, partitioning);
-
 
2876
}
-
 
2877
 
-
 
2878
static void haswell_update_sprite_wm(struct drm_plane *plane,
-
 
2879
				     struct drm_crtc *crtc,
-
 
2880
				     uint32_t sprite_width, int pixel_size,
-
 
2881
				     bool enabled, bool scaled)
2578
		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2882
{
-
 
2883
		struct intel_plane *intel_plane = to_intel_plane(plane);
2579
		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2884
 
-
 
Line 2885... Line 2580...
2885
	intel_plane->wm.enabled = enabled;
2580
 
2886
	intel_plane->wm.scaled = scaled;
2581
		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
Line 2887... Line -...
2887
	intel_plane->wm.horiz_pixels = sprite_width;
-
 
2888
			intel_plane->wm.bytes_per_pixel = pixel_size;
2582
	} else {
2889
 
-
 
2890
	haswell_update_wm(plane->dev);
-
 
2891
}
-
 
2892
 
2583
		best_lp_wm = &lp_wm_1_2;
2893
static bool
2584
	}
2894
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2585
 
2895
			      uint32_t sprite_width, int pixel_size,
-
 
2896
			      const struct intel_watermark_params *display,
2586
	partitioning = (best_lp_wm == &lp_wm_1_2) ?
2897
			      int display_latency_ns, int *sprite_wm)
-
 
2898
{
-
 
2899
	struct drm_crtc *crtc;
-
 
2900
	int clock;
-
 
2901
	int entries, tlb_miss;
-
 
2902
 
-
 
2903
	crtc = intel_get_crtc_for_plane(dev, plane);
-
 
2904
	if (!intel_crtc_active(crtc)) {
-
 
2905
		*sprite_wm = display->guard_size;
2587
		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2906
		return false;
2588
 
2907
	}
-
 
2908
 
2589
	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2909
	clock = crtc->mode.clock;
-
 
2910
 
-
 
2911
	/* Use the small buffer method to calculate the sprite watermark */
-
 
2912
	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2590
 
2913
	tlb_miss = display->fifo_size*display->cacheline_size -
2591
	ilk_write_wm_values(dev_priv, &results);
2914
		sprite_width * 8;
2592
}
2915
	if (tlb_miss > 0)
2593
 
2916
		entries += tlb_miss;
2594
static void ilk_update_sprite_wm(struct drm_plane *plane,
Line 2917... Line 2595...
2917
	entries = DIV_ROUND_UP(entries, display->cacheline_size);
2595
				     struct drm_crtc *crtc,
-
 
2596
				     uint32_t sprite_width, int pixel_size,
2918
	*sprite_wm = entries + display->guard_size;
2597
				     bool enabled, bool scaled)
Line 2919... Line -...
2919
	if (*sprite_wm > (int)display->max_wm)
-
 
2920
		*sprite_wm = display->max_wm;
2598
{
2921
 
2599
	struct drm_device *dev = plane->dev;
Line -... Line 2600...
-
 
2600
		struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
2601
 
2922
	return true;
2602
	intel_plane->wm.enabled = enabled;
-
 
2603
	intel_plane->wm.scaled = scaled;
-
 
2604
	intel_plane->wm.horiz_pixels = sprite_width;
-
 
2605
			intel_plane->wm.bytes_per_pixel = pixel_size;
-
 
2606
 
-
 
2607
	/*
-
 
2608
	 * IVB workaround: must disable low power watermarks for at least
-
 
2609
	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2923
}
2610
	 * when scaling is disabled.
-
 
2611
	 *
-
 
2612
	 * WaCxSRDisabledForSpriteScaling:ivb
Line -... Line 2613...
-
 
2613
	 */
-
 
2614
	if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
-
 
2615
		intel_wait_for_vblank(dev, intel_plane->pipe);
-
 
2616
 
-
 
2617
	ilk_update_wm(crtc);
-
 
2618
}
2924
 
2619
 
-
 
2620
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2925
static bool
2621
{
Line 2926... Line 2622...
2926
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2622
	struct drm_device *dev = crtc->dev;
2927
				uint32_t sprite_width, int pixel_size,
-
 
2928
				const struct intel_watermark_params *display,
-
 
2929
				int latency_ns, int *sprite_wm)
-
 
2930
{
2623
	struct drm_i915_private *dev_priv = dev->dev_private;
2931
	struct drm_crtc *crtc;
-
 
2932
	unsigned long line_time_us;
2624
	struct ilk_wm_values *hw = &dev_priv->wm.hw;
2933
	int clock;
-
 
2934
	int line_count, line_size;
-
 
2935
	int small, large;
-
 
2936
	int entries;
-
 
2937
 
-
 
2938
	if (!latency_ns) {
-
 
2939
		*sprite_wm = 0;
-
 
2940
		return false;
-
 
2941
	}
-
 
2942
 
-
 
2943
	crtc = intel_get_crtc_for_plane(dev, plane);
-
 
2944
	clock = crtc->mode.clock;
-
 
2945
	if (!clock) {
-
 
2946
		*sprite_wm = 0;
-
 
2947
		return false;
-
 
2948
	}
-
 
2949
 
-
 
2950
	line_time_us = (sprite_width * 1000) / clock;
-
 
2951
	if (!line_time_us) {
-
 
2952
		*sprite_wm = 0;
-
 
2953
		return false;
-
 
2954
	}
-
 
2955
 
-
 
2956
	line_count = (latency_ns / line_time_us + 1000) / 1000;
-
 
2957
	line_size = sprite_width * pixel_size;
-
 
2958
 
2625
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2959
	/* Use the minimum of the small and large buffer method for primary */
-
 
2960
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-
 
2961
	large = line_count * line_size;
-
 
2962
 
-
 
2963
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
-
 
2964
	*sprite_wm = entries + display->guard_size;
-
 
2965
 
2626
	struct intel_pipe_wm *active = &intel_crtc->wm.active;
2966
	return *sprite_wm > 0x3ff ? false : true;
-
 
2967
}
-
 
2968
 
-
 
2969
static void sandybridge_update_sprite_wm(struct drm_plane *plane,
-
 
Line 2970... Line 2627...
2970
					 struct drm_crtc *crtc,
2627
	enum pipe pipe = intel_crtc->pipe;
2971
					 uint32_t sprite_width, int pixel_size,
-
 
2972
					 bool enabled, bool scaled)
-
 
2973
{
-
 
2974
	struct drm_device *dev = plane->dev;
-
 
2975
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2976
	int pipe = to_intel_plane(plane)->pipe;
-
 
2977
	int latency = dev_priv->wm.spr_latency[0] * 100;	/* In unit 0.1us */
2628
	static const unsigned int wm0_pipe_reg[] = {
2978
	u32 val;
-
 
2979
	int sprite_wm, reg;
-
 
2980
	int ret;
-
 
Line 2981... Line 2629...
2981
 
2629
		[PIPE_A] = WM0_PIPEA_ILK,
2982
	if (!enabled)
2630
		[PIPE_B] = WM0_PIPEB_ILK,
-
 
2631
		[PIPE_C] = WM0_PIPEC_IVB,
2983
		return;
2632
	};
-
 
2633
 
-
 
2634
	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-
 
2635
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Line 2984... Line 2636...
2984
 
2636
		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2985
	switch (pipe) {
-
 
2986
	case 0:
2637
 
2987
		reg = WM0_PIPEA_ILK;
2638
	if (intel_crtc_active(crtc)) {
2988
		break;
2639
		u32 tmp = hw->wm_pipe[pipe];
2989
	case 1:
-
 
2990
		reg = WM0_PIPEB_ILK;
2640
 
2991
		break;
-
 
2992
	case 2:
-
 
2993
		reg = WM0_PIPEC_IVB;
-
 
2994
		break;
2641
		/*
Line 2995... Line -...
2995
	default:
-
 
2996
		return; /* bad pipe */
-
 
2997
	}
-
 
2998
 
-
 
2999
	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2642
		 * For active pipes LP0 watermark is marked as
3000
					    &sandybridge_display_wm_info,
-
 
3001
					    latency, &sprite_wm);
-
 
3002
	if (!ret) {
-
 
3003
		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
-
 
3004
			      pipe_name(pipe));
-
 
3005
		return;
2643
		 * enabled, and LP1+ watermaks as disabled since
3006
	}
2644
		 * we can't really reverse compute them in case
Line 3007... Line 2645...
3007
 
2645
		 * multiple pipes are active.
3008
	val = I915_READ(reg);
2646
		 */
3009
	val &= ~WM0_PIPE_SPRITE_MASK;
2647
		active->wm[0].enable = true;
Line 3082... Line 2720...
3082
 * and include an extra 2 entries to account for clock crossings.
2720
 * and include an extra 2 entries to account for clock crossings.
3083
 *
2721
 *
3084
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
2722
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
3085
 * to set the non-SR watermarks to 8.
2723
 * to set the non-SR watermarks to 8.
3086
 */
2724
 */
3087
void intel_update_watermarks(struct drm_device *dev)
2725
void intel_update_watermarks(struct drm_crtc *crtc)
3088
{
2726
{
3089
	struct drm_i915_private *dev_priv = dev->dev_private;
2727
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
Line 3090... Line 2728...
3090
 
2728
 
3091
	if (dev_priv->display.update_wm)
2729
	if (dev_priv->display.update_wm)
3092
		dev_priv->display.update_wm(dev);
2730
		dev_priv->display.update_wm(crtc);
Line 3093... Line 2731...
3093
}
2731
}
3094
 
2732
 
3095
void intel_update_sprite_watermarks(struct drm_plane *plane,
2733
void intel_update_sprite_watermarks(struct drm_plane *plane,
Line 3269... Line 2907...
3269
/* There's a funny hw issue where the hw returns all 0 when reading from
2907
/* There's a funny hw issue where the hw returns all 0 when reading from
3270
 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2908
 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3271
 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2909
 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3272
 * all limits and the gpu stuck at whatever frequency it is at atm).
2910
 * all limits and the gpu stuck at whatever frequency it is at atm).
3273
 */
2911
 */
3274
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2912
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3275
{
2913
{
3276
	u32 limits;
2914
	u32 limits;
Line 3277... Line -...
3277
 
-
 
3278
	limits = 0;
-
 
3279
 
-
 
3280
	if (*val >= dev_priv->rps.max_delay)
-
 
3281
		*val = dev_priv->rps.max_delay;
-
 
3282
	limits |= dev_priv->rps.max_delay << 24;
-
 
3283
 
2915
 
3284
	/* Only set the down limit when we've reached the lowest level to avoid
2916
	/* Only set the down limit when we've reached the lowest level to avoid
3285
	 * getting more interrupts, otherwise leave this clear. This prevents a
2917
	 * getting more interrupts, otherwise leave this clear. This prevents a
3286
	 * race in the hw when coming out of rc6: There's a tiny window where
2918
	 * race in the hw when coming out of rc6: There's a tiny window where
3287
	 * the hw runs at the minimal clock before selecting the desired
2919
	 * the hw runs at the minimal clock before selecting the desired
3288
	 * frequency, if the down threshold expires in that window we will not
2920
	 * frequency, if the down threshold expires in that window we will not
3289
	 * receive a down interrupt. */
2921
	 * receive a down interrupt. */
3290
	if (*val <= dev_priv->rps.min_delay) {
2922
	limits = dev_priv->rps.max_delay << 24;
3291
		*val = dev_priv->rps.min_delay;
2923
	if (val <= dev_priv->rps.min_delay)
3292
		limits |= dev_priv->rps.min_delay << 16;
-
 
Line 3293... Line 2924...
3293
	}
2924
		limits |= dev_priv->rps.min_delay << 16;
3294
 
2925
 
Line -... Line 2926...
-
 
2926
	return limits;
-
 
2927
}
-
 
2928
 
-
 
2929
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
-
 
2930
{
-
 
2931
	int new_power;
-
 
2932
 
-
 
2933
	new_power = dev_priv->rps.power;
-
 
2934
	switch (dev_priv->rps.power) {
-
 
2935
	case LOW_POWER:
-
 
2936
		if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
-
 
2937
			new_power = BETWEEN;
-
 
2938
		break;
-
 
2939
 
-
 
2940
	case BETWEEN:
-
 
2941
		if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
-
 
2942
			new_power = LOW_POWER;
-
 
2943
		else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
-
 
2944
			new_power = HIGH_POWER;
-
 
2945
		break;
-
 
2946
 
-
 
2947
	case HIGH_POWER:
-
 
2948
		if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
-
 
2949
			new_power = BETWEEN;
-
 
2950
		break;
-
 
2951
	}
-
 
2952
	/* Max/min bins are special */
-
 
2953
	if (val == dev_priv->rps.min_delay)
-
 
2954
		new_power = LOW_POWER;
-
 
2955
	if (val == dev_priv->rps.max_delay)
-
 
2956
		new_power = HIGH_POWER;
-
 
2957
	if (new_power == dev_priv->rps.power)
-
 
2958
		return;
-
 
2959
 
-
 
2960
	/* Note the units here are not exactly 1us, but 1280ns. */
-
 
2961
	switch (new_power) {
-
 
2962
	case LOW_POWER:
-
 
2963
		/* Upclock if more than 95% busy over 16ms */
-
 
2964
		I915_WRITE(GEN6_RP_UP_EI, 12500);
-
 
2965
		I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
-
 
2966
 
-
 
2967
		/* Downclock if less than 85% busy over 32ms */
-
 
2968
		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
-
 
2969
		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
-
 
2970
 
-
 
2971
		I915_WRITE(GEN6_RP_CONTROL,
-
 
2972
			   GEN6_RP_MEDIA_TURBO |
-
 
2973
			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-
 
2974
			   GEN6_RP_MEDIA_IS_GFX |
-
 
2975
			   GEN6_RP_ENABLE |
-
 
2976
			   GEN6_RP_UP_BUSY_AVG |
-
 
2977
			   GEN6_RP_DOWN_IDLE_AVG);
-
 
2978
		break;
-
 
2979
 
-
 
2980
	case BETWEEN:
-
 
2981
		/* Upclock if more than 90% busy over 13ms */
-
 
2982
		I915_WRITE(GEN6_RP_UP_EI, 10250);
-
 
2983
		I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
-
 
2984
 
-
 
2985
		/* Downclock if less than 75% busy over 32ms */
-
 
2986
		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
-
 
2987
		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
-
 
2988
 
-
 
2989
		I915_WRITE(GEN6_RP_CONTROL,
-
 
2990
			   GEN6_RP_MEDIA_TURBO |
-
 
2991
			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-
 
2992
			   GEN6_RP_MEDIA_IS_GFX |
-
 
2993
			   GEN6_RP_ENABLE |
-
 
2994
			   GEN6_RP_UP_BUSY_AVG |
-
 
2995
			   GEN6_RP_DOWN_IDLE_AVG);
-
 
2996
		break;
-
 
2997
 
-
 
2998
	case HIGH_POWER:
-
 
2999
		/* Upclock if more than 85% busy over 10ms */
-
 
3000
		I915_WRITE(GEN6_RP_UP_EI, 8000);
-
 
3001
		I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
-
 
3002
 
-
 
3003
		/* Downclock if less than 60% busy over 32ms */
-
 
3004
		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
-
 
3005
		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
-
 
3006
 
-
 
3007
		I915_WRITE(GEN6_RP_CONTROL,
-
 
3008
			   GEN6_RP_MEDIA_TURBO |
-
 
3009
			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-
 
3010
			   GEN6_RP_MEDIA_IS_GFX |
-
 
3011
			   GEN6_RP_ENABLE |
-
 
3012
			   GEN6_RP_UP_BUSY_AVG |
-
 
3013
			   GEN6_RP_DOWN_IDLE_AVG);
-
 
3014
		break;
-
 
3015
	}
-
 
3016
 
-
 
3017
	dev_priv->rps.power = new_power;
3295
	return limits;
3018
	dev_priv->rps.last_adj = 0;
3296
}
3019
}
3297
 
3020
 
3298
void gen6_set_rps(struct drm_device *dev, u8 val)
-
 
Line 3299... Line 3021...
3299
{
3021
void gen6_set_rps(struct drm_device *dev, u8 val)
3300
	struct drm_i915_private *dev_priv = dev->dev_private;
3022
{
3301
	u32 limits = gen6_rps_limits(dev_priv, &val);
3023
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3302... Line 3024...
3302
 
3024
 
3303
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3025
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Line -... Line 3026...
-
 
3026
	WARN_ON(val > dev_priv->rps.max_delay);
-
 
3027
	WARN_ON(val < dev_priv->rps.min_delay);
3304
	WARN_ON(val > dev_priv->rps.max_delay);
3028
 
3305
	WARN_ON(val < dev_priv->rps.min_delay);
3029
	if (val == dev_priv->rps.cur_delay)
3306
 
3030
		return;
3307
	if (val == dev_priv->rps.cur_delay)
3031
 
3308
		return;
3032
	gen6_set_rps_thresholds(dev_priv, val);
Line 3317... Line 3041...
3317
		   GEN6_AGGRESSIVE_TURBO);
3041
		   GEN6_AGGRESSIVE_TURBO);
Line 3318... Line 3042...
3318
 
3042
 
3319
	/* Make sure we continue to get interrupts
3043
	/* Make sure we continue to get interrupts
3320
	 * until we hit the minimum or maximum frequencies.
3044
	 * until we hit the minimum or maximum frequencies.
3321
	 */
3045
	 */
-
 
3046
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
Line 3322... Line 3047...
3322
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3047
		   gen6_rps_limits(dev_priv, val));
Line 3323... Line 3048...
3323
 
3048
 
Line 3324... Line 3049...
3324
	POSTING_READ(GEN6_RPNSWREQ);
3049
	POSTING_READ(GEN6_RPNSWREQ);
3325
 
3050
 
Line 3326... Line -...
3326
	dev_priv->rps.cur_delay = val;
-
 
3327
 
-
 
3328
	trace_intel_gpu_freq_change(val * 50);
-
 
3329
}
-
 
3330
 
-
 
3331
/*
3051
	dev_priv->rps.cur_delay = val;
3332
 * Wait until the previous freq change has completed,
3052
 
3333
 * or the timeout elapsed, and then update our notion
-
 
3334
 * of the current GPU frequency.
-
 
3335
 */
3053
	trace_intel_gpu_freq_change(val * 50);
3336
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
-
 
3337
{
-
 
3338
	u32 pval;
-
 
Line -... Line 3054...
-
 
3054
}
-
 
3055
 
-
 
3056
void gen6_rps_idle(struct drm_i915_private *dev_priv)
-
 
3057
{
3339
 
3058
	struct drm_device *dev = dev_priv->dev;
-
 
3059
 
-
 
3060
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
3061
	if (dev_priv->rps.enabled) {
-
 
3062
		if (IS_VALLEYVIEW(dev))
-
 
3063
			valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
Line 3340... Line -...
3340
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
 
3341
 
-
 
3342
	if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3064
		else
3343
		DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-
 
-
 
3065
			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3344
 
3066
		dev_priv->rps.last_adj = 0;
Line -... Line 3067...
-
 
3067
	}
-
 
3068
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
3069
}
-
 
3070
 
-
 
3071
void gen6_rps_boost(struct drm_i915_private *dev_priv)
-
 
3072
{
3345
	pval >>= 8;
3073
	struct drm_device *dev = dev_priv->dev;
-
 
3074
 
-
 
3075
	mutex_lock(&dev_priv->rps.hw_lock);
3346
 
3076
	if (dev_priv->rps.enabled) {
Line 3347... Line 3077...
3347
	if (pval != dev_priv->rps.cur_delay)
3077
		if (IS_VALLEYVIEW(dev))
3348
		DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3078
			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3349
				 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3079
		else
Line 3350... Line -...
3350
				 dev_priv->rps.cur_delay,
-
 
3351
				 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-
 
3352
 
3080
			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3353
	dev_priv->rps.cur_delay = pval;
3081
		dev_priv->rps.last_adj = 0;
3354
}
3082
	}
Line 3355... Line -...
3355
 
-
 
3356
void valleyview_set_rps(struct drm_device *dev, u8 val)
-
 
3357
{
3083
	mutex_unlock(&dev_priv->rps.hw_lock);
3358
	struct drm_i915_private *dev_priv = dev->dev_private;
3084
}
3359
 
-
 
3360
	gen6_rps_limits(dev_priv, &val);
3085
 
3361
 
3086
void valleyview_set_rps(struct drm_device *dev, u8 val)
Line 3362... Line 3087...
3362
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3087
{
3363
	WARN_ON(val > dev_priv->rps.max_delay);
3088
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3364... Line 3089...
3364
	WARN_ON(val < dev_priv->rps.min_delay);
3089
 
Line 3365... Line 3090...
3365
 
3090
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Line 3366... Line 3091...
3366
	vlv_update_rps_cur_delay(dev_priv);
3091
	WARN_ON(val > dev_priv->rps.max_delay);
3367
 
3092
	WARN_ON(val < dev_priv->rps.min_delay);
Line 3368... Line 3093...
3368
	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3093
 
3369
			 vlv_gpu_freq(dev_priv->mem_freq,
3094
	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3370
				      dev_priv->rps.cur_delay),
3095
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
Line 3421... Line 3146...
3421
		drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3146
		drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3422
		dev_priv->vlv_pctx = NULL;
3147
		dev_priv->vlv_pctx = NULL;
3423
	}
3148
	}
3424
}
3149
}
Line -... Line 3150...
-
 
3150
 
-
 
3151
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
-
 
3152
{
-
 
3153
	if (IS_GEN6(dev))
-
 
3154
		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
-
 
3155
 
-
 
3156
	if (IS_HASWELL(dev))
-
 
3157
		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
-
 
3158
 
-
 
3159
	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-
 
3160
			(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-
 
3161
			(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-
 
3162
			(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
-
 
3163
}
3425
 
3164
 
3426
int intel_enable_rc6(const struct drm_device *dev)
3165
int intel_enable_rc6(const struct drm_device *dev)
3427
{
3166
{
3428
	/* No RC6 before Ironlake */
3167
	/* No RC6 before Ironlake */
3429
	if (INTEL_INFO(dev)->gen < 5)
3168
	if (INTEL_INFO(dev)->gen < 5)
Line 3435... Line 3174...
3435
 
3174
 
3436
	/* Disable RC6 on Ironlake */
3175
	/* Disable RC6 on Ironlake */
3437
	if (INTEL_INFO(dev)->gen == 5)
3176
	if (INTEL_INFO(dev)->gen == 5)
Line 3438... Line 3177...
3438
		return 0;
3177
		return 0;
3439
 
-
 
3440
	if (IS_HASWELL(dev)) {
3178
 
3441
		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
-
 
Line 3442... Line 3179...
3442
		return INTEL_RC6_ENABLE;
3179
	if (IS_HASWELL(dev))
3443
	}
3180
		return INTEL_RC6_ENABLE;
3444
 
-
 
3445
	/* snb/ivb have more than one rc6 state. */
3181
 
3446
	if (INTEL_INFO(dev)->gen == 6) {
-
 
Line 3447... Line -...
3447
		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
-
 
3448
		return INTEL_RC6_ENABLE;
3182
	/* snb/ivb have more than one rc6 state. */
3449
	}
3183
	if (INTEL_INFO(dev)->gen == 6)
Line 3450... Line 3184...
3450
 
3184
		return INTEL_RC6_ENABLE;
3451
	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3185
 
Line 3473... Line 3207...
3473
		enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3207
		enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
Line 3474... Line 3208...
3474
 
3208
 
3475
	I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3209
	I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
Line -... Line 3210...
-
 
3210
}
-
 
3211
 
-
 
3212
static void gen8_enable_rps(struct drm_device *dev)
-
 
3213
{
-
 
3214
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3215
	struct intel_ring_buffer *ring;
-
 
3216
	uint32_t rc6_mask = 0, rp_state_cap;
-
 
3217
	int unused;
-
 
3218
 
-
 
3219
	/* 1a: Software RC state - RC0 */
-
 
3220
	I915_WRITE(GEN6_RC_STATE, 0);
-
 
3221
 
-
 
3222
	/* 1c & 1d: Get forcewake during program sequence. Although the driver
-
 
3223
	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-
 
3224
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
-
 
3225
 
-
 
3226
	/* 2a: Disable RC states. */
-
 
3227
	I915_WRITE(GEN6_RC_CONTROL, 0);
-
 
3228
 
-
 
3229
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-
 
3230
 
-
 
3231
	/* 2b: Program RC6 thresholds.*/
-
 
3232
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
-
 
3233
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-
 
3234
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
 
3235
	for_each_ring(ring, dev_priv, unused)
-
 
3236
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
-
 
3237
	I915_WRITE(GEN6_RC_SLEEP, 0);
-
 
3238
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
 
3239
 
-
 
3240
	/* 3: Enable RC6 */
-
 
3241
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-
 
3242
		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-
 
3243
	DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
-
 
3244
	I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-
 
3245
			GEN6_RC_CTL_EI_MODE(1) |
-
 
3246
			rc6_mask);
-
 
3247
 
-
 
3248
	/* 4 Program defaults and thresholds for RPS*/
-
 
3249
	I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
-
 
3250
	I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
-
 
3251
	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
-
 
3252
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
 
3253
 
-
 
3254
	/* Docs recommend 900MHz, and 300 MHz respectively */
-
 
3255
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-
 
3256
		   dev_priv->rps.max_delay << 24 |
-
 
3257
		   dev_priv->rps.min_delay << 16);
-
 
3258
 
-
 
3259
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
-
 
3260
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
-
 
3261
	I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
-
 
3262
	I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
 
3263
 
-
 
3264
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
 
3265
 
-
 
3266
	/* 5: Enable RPS */
-
 
3267
	I915_WRITE(GEN6_RP_CONTROL,
-
 
3268
		   GEN6_RP_MEDIA_TURBO |
-
 
3269
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-
 
3270
		   GEN6_RP_MEDIA_IS_GFX |
-
 
3271
		   GEN6_RP_ENABLE |
-
 
3272
		   GEN6_RP_UP_BUSY_AVG |
-
 
3273
		   GEN6_RP_DOWN_IDLE_AVG);
-
 
3274
 
-
 
3275
	/* 6: Ring frequency + overclocking (our driver does this later */
-
 
3276
 
-
 
3277
	gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
-
 
3278
 
-
 
3279
	gen6_enable_rps_interrupts(dev);
-
 
3280
 
-
 
3281
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3476
}
3282
}
3477
 
3283
 
3478
static void gen6_enable_rps(struct drm_device *dev)
3284
static void gen6_enable_rps(struct drm_device *dev)
3479
{
3285
{
3480
	struct drm_i915_private *dev_priv = dev->dev_private;
3286
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3500... Line 3306...
3500
	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3306
	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3501
		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3307
		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3502
		I915_WRITE(GTFIFODBG, gtfifodbg);
3308
		I915_WRITE(GTFIFODBG, gtfifodbg);
3503
	}
3309
	}
Line 3504... Line 3310...
3504
 
3310
 
Line 3505... Line 3311...
3505
	gen6_gt_force_wake_get(dev_priv);
3311
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3506
 
3312
 
Line 3507... Line 3313...
3507
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3313
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3508
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3314
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3509
 
3315
 
-
 
3316
	/* In units of 50MHz */
-
 
3317
	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
-
 
3318
	dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3510
	/* In units of 50MHz */
3319
	dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
Line 3511... Line 3320...
3511
	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3320
	dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
3512
	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
3321
	dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
Line 3524... Line 3333...
3524
	for_each_ring(ring, dev_priv, i)
3333
	for_each_ring(ring, dev_priv, i)
3525
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3334
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
Line 3526... Line 3335...
3526
 
3335
 
3527
	I915_WRITE(GEN6_RC_SLEEP, 0);
3336
	I915_WRITE(GEN6_RC_SLEEP, 0);
3528
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3337
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3529
	if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3338
	if (IS_IVYBRIDGE(dev))
3530
		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3339
		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3531
	else
3340
	else
3532
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3341
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3533
	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3342
	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
Line 3545... Line 3354...
3545
 
3354
 
3546
		if (rc6_mode & INTEL_RC6pp_ENABLE)
3355
		if (rc6_mode & INTEL_RC6pp_ENABLE)
3547
			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3356
			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
Line 3548... Line -...
3548
	}
-
 
3549
 
-
 
3550
	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-
 
3551
			(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3357
	}
Line 3552... Line 3358...
3552
			(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3358
 
3553
			(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3359
	intel_print_rc6_info(dev, rc6_mask);
3554
 
3360
 
3555
	I915_WRITE(GEN6_RC_CONTROL,
3361
	I915_WRITE(GEN6_RC_CONTROL,
Line 3556... Line -...
3556
		   rc6_mask |
-
 
3557
		   GEN6_RC_CTL_EI_MODE(1) |
-
 
3558
		   GEN6_RC_CTL_HW_ENABLE);
-
 
3559
 
-
 
3560
	if (IS_HASWELL(dev)) {
-
 
3561
		I915_WRITE(GEN6_RPNSWREQ,
-
 
3562
			   HSW_FREQUENCY(10));
-
 
3563
		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-
 
3564
			   HSW_FREQUENCY(12));
-
 
3565
	} else {
-
 
3566
	I915_WRITE(GEN6_RPNSWREQ,
-
 
3567
		   GEN6_FREQUENCY(10) |
-
 
3568
		   GEN6_OFFSET(0) |
-
 
3569
		   GEN6_AGGRESSIVE_TURBO);
-
 
3570
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
-
 
3571
		   GEN6_FREQUENCY(12));
-
 
3572
	}
3362
		   rc6_mask |
3573
 
-
 
3574
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-
 
3575
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-
 
3576
		   dev_priv->rps.max_delay << 24 |
-
 
3577
		   dev_priv->rps.min_delay << 16);
-
 
3578
 
3363
		   GEN6_RC_CTL_EI_MODE(1) |
3579
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-
 
3580
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3364
		   GEN6_RC_CTL_HW_ENABLE);
3581
	I915_WRITE(GEN6_RP_UP_EI, 66000);
-
 
3582
	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
 
3583
 
-
 
3584
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
 
3585
	I915_WRITE(GEN6_RP_CONTROL,
-
 
3586
		   GEN6_RP_MEDIA_TURBO |
-
 
3587
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-
 
Line 3588... Line 3365...
3588
		   GEN6_RP_MEDIA_IS_GFX |
3365
 
3589
		   GEN6_RP_ENABLE |
3366
	/* Power down if completely idle for over 50ms */
3590
		   GEN6_RP_UP_BUSY_AVG |
3367
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3591
		   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3368
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
Line 3602... Line 3379...
3602
	}
3379
	}
3603
	} else {
3380
	} else {
3604
		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3381
		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3605
	}
3382
	}
Line -... Line 3383...
-
 
3383
 
3606
 
3384
	dev_priv->rps.power = HIGH_POWER; /* force a reset */
Line 3607... Line 3385...
3607
	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
3385
	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
Line 3608... Line 3386...
3608
 
3386
 
3609
	gen6_enable_rps_interrupts(dev);
3387
	gen6_enable_rps_interrupts(dev);
Line 3620... Line 3398...
3620
		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3398
		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3621
		if (ret)
3399
		if (ret)
3622
			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3400
			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3623
	}
3401
	}
Line 3624... Line 3402...
3624
 
3402
 
3625
	gen6_gt_force_wake_put(dev_priv);
3403
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
Line 3626... Line 3404...
3626
}
3404
}
3627
 
3405
 
3628
void gen6_update_ring_freq(struct drm_device *dev)
3406
void gen6_update_ring_freq(struct drm_device *dev)
3629
{
3407
{
3630
	struct drm_i915_private *dev_priv = dev->dev_private;
3408
	struct drm_i915_private *dev_priv = dev->dev_private;
3631
	int min_freq = 15;
3409
	int min_freq = 15;
3632
	unsigned int gpu_freq;
3410
	unsigned int gpu_freq;
-
 
3411
	unsigned int max_ia_freq, min_ring_freq;
Line 3633... Line 3412...
3633
	unsigned int max_ia_freq, min_ring_freq;
3412
	int scaling_factor = 180;
Line 3634... Line 3413...
3634
	int scaling_factor = 180;
3413
	struct cpufreq_policy *policy;
3635
 
3414
 
3636
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3415
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3637
 
3416
 
3638
	max_ia_freq = cpufreq_quick_get_max(0);
3417
	max_ia_freq = cpufreq_quick_get_max(0);
3639
	/*
-
 
3640
	 * Default to measured freq if none found, PCU will ensure we don't go
3418
	/*
Line 3641... Line 3419...
3641
	 * over
3419
	 * Default to measured freq if none found, PCU will ensure we don't go
3642
	 */
3420
	 * over
Line 3643... Line 3421...
3643
	if (!max_ia_freq)
3421
	 */
3644
		max_ia_freq = tsc_khz;
3422
		max_ia_freq = tsc_khz;
3645
 
3423
 
Line 3646... Line 3424...
3646
	/* Convert from kHz to MHz */
3424
	/* Convert from kHz to MHz */
3647
	max_ia_freq /= 1000;
3425
	max_ia_freq /= 1000;
3648
 
3426
 
3649
	min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
3427
	min_ring_freq = I915_READ(DCLK) & 0xf;
Line 3658... Line 3436...
3658
	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3436
	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3659
	     gpu_freq--) {
3437
	     gpu_freq--) {
3660
		int diff = dev_priv->rps.max_delay - gpu_freq;
3438
		int diff = dev_priv->rps.max_delay - gpu_freq;
3661
		unsigned int ia_freq = 0, ring_freq = 0;
3439
		unsigned int ia_freq = 0, ring_freq = 0;
Line -... Line 3440...
-
 
3440
 
-
 
3441
		if (INTEL_INFO(dev)->gen >= 8) {
-
 
3442
			/* max(2 * GT, DDR). NB: GT is 50MHz units */
3662
 
3443
			ring_freq = max(min_ring_freq, gpu_freq);
3663
		if (IS_HASWELL(dev)) {
3444
		} else if (IS_HASWELL(dev)) {
3664
			ring_freq = (gpu_freq * 5 + 3) / 4;
3445
			ring_freq = mult_frac(gpu_freq, 5, 4);
3665
			ring_freq = max(min_ring_freq, ring_freq);
3446
			ring_freq = max(min_ring_freq, ring_freq);
3666
			/* leave ia_freq as the default, chosen by cpufreq */
3447
			/* leave ia_freq as the default, chosen by cpufreq */
3667
		} else {
3448
		} else {
3668
			/* On older processors, there is no separate ring
3449
			/* On older processors, there is no separate ring
Line 3715... Line 3496...
3715
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3496
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3716
{
3497
{
3717
	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3498
	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3718
}
3499
}
Line 3719... Line -...
3719
 
-
 
3720
static void vlv_rps_timer_work(struct work_struct *work)
-
 
3721
{
-
 
3722
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-
 
3723
						    rps.vlv_work.work);
-
 
3724
 
-
 
3725
	/*
-
 
3726
	 * Timer fired, we must be idle.  Drop to min voltage state.
-
 
3727
	 * Note: we use RPe here since it should match the
-
 
3728
	 * Vmin we were shooting for.  That should give us better
-
 
3729
	 * perf when we come back out of RC6 than if we used the
-
 
3730
	 * min freq available.
-
 
3731
	 */
-
 
3732
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
3733
	if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-
 
3734
		valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
-
 
3735
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
3736
}
-
 
3737
 
3500
 
3738
static void valleyview_setup_pctx(struct drm_device *dev)
3501
static void valleyview_setup_pctx(struct drm_device *dev)
3739
{
3502
{
3740
	struct drm_i915_private *dev_priv = dev->dev_private;
3503
	struct drm_i915_private *dev_priv = dev->dev_private;
3741
	struct drm_i915_gem_object *pctx;
3504
	struct drm_i915_gem_object *pctx;
Line 3779... Line 3542...
3779
 
3542
 
3780
static void valleyview_enable_rps(struct drm_device *dev)
3543
static void valleyview_enable_rps(struct drm_device *dev)
3781
{
3544
{
3782
	struct drm_i915_private *dev_priv = dev->dev_private;
3545
	struct drm_i915_private *dev_priv = dev->dev_private;
3783
	struct intel_ring_buffer *ring;
3546
	struct intel_ring_buffer *ring;
3784
	u32 gtfifodbg, val;
3547
	u32 gtfifodbg, val, rc6_mode = 0;
Line 3785... Line 3548...
3785
	int i;
3548
	int i;
Line 3786... Line 3549...
3786
 
3549
 
3787
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3550
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
 
3551
 
3788
 
3552
	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3789
	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3553
		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
Line 3790... Line 3554...
3790
		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3554
				 gtfifodbg);
Line -... Line 3555...
-
 
3555
		I915_WRITE(GTFIFODBG, gtfifodbg);
3791
		I915_WRITE(GTFIFODBG, gtfifodbg);
3556
	}
Line 3792... Line 3557...
3792
	}
3557
 
3793
 
3558
	valleyview_setup_pctx(dev);
3794
	valleyview_setup_pctx(dev);
3559
 
3795
 
3560
	/* If VLV, Forcewake all wells, else re-direct to regular path */
Line 3815... Line 3580...
3815
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3580
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
Line 3816... Line 3581...
3816
 
3581
 
3817
	for_each_ring(ring, dev_priv, i)
3582
	for_each_ring(ring, dev_priv, i)
Line 3818... Line 3583...
3818
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3583
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
Line 3819... Line 3584...
3819
 
3584
 
3820
	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3585
	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
-
 
3586
 
3821
 
3587
	/* allows RC6 residency counter to work */
3822
	/* allows RC6 residency counter to work */
3588
	I915_WRITE(VLV_COUNTER_CONTROL,
-
 
3589
		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
-
 
3590
				      VLV_MEDIA_RC6_COUNT_EN |
-
 
3591
				      VLV_RENDER_RC6_COUNT_EN));
-
 
3592
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-
 
3593
		rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
-
 
3594
 
Line 3823... Line 3595...
3823
	I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
3595
	intel_print_rc6_info(dev, rc6_mode);
3824
	I915_WRITE(GEN6_RC_CONTROL,
-
 
3825
		   GEN7_RC_CTL_TO_MODE);
-
 
3826
 
-
 
3827
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
3828
	switch ((val >> 6) & 3) {
-
 
3829
	case 0:
-
 
3830
	case 1:
-
 
3831
		dev_priv->mem_freq = 800;
-
 
3832
		break;
-
 
3833
	case 2:
-
 
3834
		dev_priv->mem_freq = 1066;
-
 
3835
		break;
-
 
3836
	case 3:
-
 
Line 3837... Line 3596...
3837
		dev_priv->mem_freq = 1333;
3596
 
3838
		break;
3597
	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
Line 3839... Line 3598...
3839
	}
3598
 
3840
	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3599
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3841
 
3600
 
3842
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
-
 
3843
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3601
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
Line 3844... Line 3602...
3844
 
3602
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3845
	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3603
 
3846
	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3604
	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3847
			 vlv_gpu_freq(dev_priv->mem_freq,
3605
	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3848
				      dev_priv->rps.cur_delay),
-
 
3849
			 dev_priv->rps.cur_delay);
3606
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
Line 3850... Line 3607...
3850
 
3607
			 dev_priv->rps.cur_delay);
3851
	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3608
 
3852
	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3609
	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3853
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-
 
3854
			 vlv_gpu_freq(dev_priv->mem_freq,
3610
	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
Line 3855... Line 3611...
3855
				      dev_priv->rps.max_delay),
3611
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3856
			 dev_priv->rps.max_delay);
3612
			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
3857
 
3613
			 dev_priv->rps.max_delay);
3858
	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
-
 
3859
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3614
 
Line 3860... Line 3615...
3860
			 vlv_gpu_freq(dev_priv->mem_freq,
3615
	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3861
				      dev_priv->rps.rpe_delay),
3616
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3862
			 dev_priv->rps.rpe_delay);
-
 
3863
 
3617
			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
Line 3864... Line 3618...
3864
	dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3618
			 dev_priv->rps.rpe_delay);
Line 3865... Line 3619...
3865
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3619
 
Line 3866... Line 3620...
3866
			 vlv_gpu_freq(dev_priv->mem_freq,
3620
	dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3867
				      dev_priv->rps.min_delay),
3621
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
Line 3868... Line 3622...
3868
			 dev_priv->rps.min_delay);
3622
			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
3869
 
3623
			 dev_priv->rps.min_delay);
3870
	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3624
 
Line 3991... Line 3745...
3991
		return;
3745
		return;
3992
	}
3746
	}
Line 3993... Line 3747...
3993
 
3747
 
3994
	I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3748
	I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
-
 
3749
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-
 
3750
 
3995
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3751
	intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
Line 3996... Line 3752...
3996
}
3752
}
3997
 
3753
 
3998
static unsigned long intel_pxfreq(u32 vidfreq)
3754
static unsigned long intel_pxfreq(u32 vidfreq)
Line 4609... Line 4365...
4609
		ironlake_disable_drps(dev);
4365
		ironlake_disable_drps(dev);
4610
		ironlake_disable_rc6(dev);
4366
		ironlake_disable_rc6(dev);
4611
	} else if (INTEL_INFO(dev)->gen >= 6) {
4367
	} else if (INTEL_INFO(dev)->gen >= 6) {
4612
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4368
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4613
		cancel_work_sync(&dev_priv->rps.work);
4369
		cancel_work_sync(&dev_priv->rps.work);
4614
		if (IS_VALLEYVIEW(dev))
-
 
4615
			cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
-
 
4616
		mutex_lock(&dev_priv->rps.hw_lock);
4370
		mutex_lock(&dev_priv->rps.hw_lock);
4617
		if (IS_VALLEYVIEW(dev))
4371
		if (IS_VALLEYVIEW(dev))
4618
			valleyview_disable_rps(dev);
4372
			valleyview_disable_rps(dev);
4619
		else
4373
		else
4620
		gen6_disable_rps(dev);
4374
		gen6_disable_rps(dev);
-
 
4375
		dev_priv->rps.enabled = false;
4621
		mutex_unlock(&dev_priv->rps.hw_lock);
4376
		mutex_unlock(&dev_priv->rps.hw_lock);
4622
	}
4377
	}
4623
}
4378
}
Line 4624... Line 4379...
4624
 
4379
 
Line 4631... Line 4386...
4631
 
4386
 
Line 4632... Line 4387...
4632
	mutex_lock(&dev_priv->rps.hw_lock);
4387
	mutex_lock(&dev_priv->rps.hw_lock);
4633
 
4388
 
-
 
4389
	if (IS_VALLEYVIEW(dev)) {
-
 
4390
		valleyview_enable_rps(dev);
-
 
4391
	} else if (IS_BROADWELL(dev)) {
4634
	if (IS_VALLEYVIEW(dev)) {
4392
		gen8_enable_rps(dev);
4635
		valleyview_enable_rps(dev);
4393
		gen6_update_ring_freq(dev);
4636
	} else {
4394
	} else {
4637
	gen6_enable_rps(dev);
4395
	gen6_enable_rps(dev);
-
 
4396
	gen6_update_ring_freq(dev);
4638
	gen6_update_ring_freq(dev);
4397
	}
4639
	}
4398
	dev_priv->rps.enabled = true;
Line 4640... Line 4399...
4640
	mutex_unlock(&dev_priv->rps.hw_lock);
4399
	mutex_unlock(&dev_priv->rps.hw_lock);
4641
}
4400
}
Line 4678... Line 4437...
4678
 
4437
 
4679
	for_each_pipe(pipe) {
4438
	for_each_pipe(pipe) {
4680
		I915_WRITE(DSPCNTR(pipe),
4439
		I915_WRITE(DSPCNTR(pipe),
4681
			   I915_READ(DSPCNTR(pipe)) |
4440
			   I915_READ(DSPCNTR(pipe)) |
4682
			   DISPPLANE_TRICKLE_FEED_DISABLE);
4441
			   DISPPLANE_TRICKLE_FEED_DISABLE);
4683
		intel_flush_display_plane(dev_priv, pipe);
4442
		intel_flush_primary_plane(dev_priv, pipe);
4684
	}
4443
	}
Line -... Line 4444...
-
 
4444
}
-
 
4445
 
-
 
4446
static void ilk_init_lp_watermarks(struct drm_device *dev)
-
 
4447
{
-
 
4448
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4449
 
-
 
4450
	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
-
 
4451
	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
-
 
4452
	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
-
 
4453
 
-
 
4454
	/*
-
 
4455
	 * Don't touch WM1S_LP_EN here.
-
 
4456
	 * Doing so could cause underruns.
-
 
4457
	 */
4685
}
4458
}
4686
 
4459
 
4687
static void ironlake_init_clock_gating(struct drm_device *dev)
4460
static void ironlake_init_clock_gating(struct drm_device *dev)
4688
{
4461
{
Line 4715... Line 4488...
4715
		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4488
		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4716
	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4489
	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4717
	I915_WRITE(DISP_ARB_CTL,
4490
	I915_WRITE(DISP_ARB_CTL,
4718
		   (I915_READ(DISP_ARB_CTL) |
4491
		   (I915_READ(DISP_ARB_CTL) |
4719
		    DISP_FBC_WM_DIS));
4492
		    DISP_FBC_WM_DIS));
4720
	I915_WRITE(WM3_LP_ILK, 0);
-
 
4721
	I915_WRITE(WM2_LP_ILK, 0);
-
 
-
 
4493
 
4722
	I915_WRITE(WM1_LP_ILK, 0);
4494
	ilk_init_lp_watermarks(dev);
Line 4723... Line 4495...
4723
 
4495
 
4724
	/*
4496
	/*
4725
	 * Based on the document from hardware guys the following bits
4497
	 * Based on the document from hardware guys the following bits
4726
	 * should be set unconditionally in order to enable FBC.
4498
	 * should be set unconditionally in order to enable FBC.
Line 4824... Line 4596...
4824
	/* WaSetupGtModeTdRowDispatch:snb */
4596
	/* WaSetupGtModeTdRowDispatch:snb */
4825
	if (IS_SNB_GT1(dev))
4597
	if (IS_SNB_GT1(dev))
4826
		I915_WRITE(GEN6_GT_MODE,
4598
		I915_WRITE(GEN6_GT_MODE,
4827
			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4599
			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
Line 4828... Line -...
4828
 
-
 
4829
	I915_WRITE(WM3_LP_ILK, 0);
-
 
4830
	I915_WRITE(WM2_LP_ILK, 0);
4600
 
Line 4831... Line 4601...
4831
	I915_WRITE(WM1_LP_ILK, 0);
4601
	ilk_init_lp_watermarks(dev);
4832
 
4602
 
Line 4833... Line 4603...
4833
	I915_WRITE(CACHE_MODE_0,
4603
	I915_WRITE(CACHE_MODE_0,
Line 4938... Line 4708...
4938
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4708
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4939
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4709
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4940
	}
4710
	}
4941
}
4711
}
Line 4942... Line 4712...
4942
 
4712
 
4943
static void haswell_init_clock_gating(struct drm_device *dev)
4713
static void gen8_init_clock_gating(struct drm_device *dev)
4944
{
4714
{
-
 
4715
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4945... Line 4716...
4945
	struct drm_i915_private *dev_priv = dev->dev_private;
4716
	enum pipe i;
4946
 
4717
 
4947
	I915_WRITE(WM3_LP_ILK, 0);
4718
	I915_WRITE(WM3_LP_ILK, 0);
Line -... Line 4719...
-
 
4719
	I915_WRITE(WM2_LP_ILK, 0);
-
 
4720
	I915_WRITE(WM1_LP_ILK, 0);
-
 
4721
 
-
 
4722
	/* FIXME(BDW): Check all the w/a, some might only apply to
-
 
4723
	 * pre-production hw. */
-
 
4724
 
-
 
4725
	WARN(!i915_preliminary_hw_support,
-
 
4726
	     "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
-
 
4727
	I915_WRITE(HALF_SLICE_CHICKEN3,
-
 
4728
		   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
-
 
4729
	I915_WRITE(HALF_SLICE_CHICKEN3,
-
 
4730
		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
-
 
4731
	I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
-
 
4732
 
-
 
4733
	I915_WRITE(_3D_CHICKEN3,
-
 
4734
		   _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
-
 
4735
 
-
 
4736
	I915_WRITE(COMMON_SLICE_CHICKEN2,
-
 
4737
		   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
-
 
4738
 
-
 
4739
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-
 
4740
		   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
-
 
4741
 
-
 
4742
	/* WaSwitchSolVfFArbitrationPriority:bdw */
-
 
4743
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
-
 
4744
 
-
 
4745
	/* WaPsrDPAMaskVBlankInSRD:bdw */
-
 
4746
	I915_WRITE(CHICKEN_PAR1_1,
-
 
4747
		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
-
 
4748
 
-
 
4749
	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
-
 
4750
	for_each_pipe(i) {
-
 
4751
		I915_WRITE(CHICKEN_PIPESL_1(i),
-
 
4752
			   I915_READ(CHICKEN_PIPESL_1(i) |
-
 
4753
				     DPRS_MASK_VBLANK_SRD));
-
 
4754
	}
-
 
4755
 
-
 
4756
	/* Use Force Non-Coherent whenever executing a 3D context. This is a
-
 
4757
	 * workaround for for a possible hang in the unlikely event a TLB
-
 
4758
	 * invalidation occurs during a PSD flush.
-
 
4759
	 */
-
 
4760
	I915_WRITE(HDC_CHICKEN0,
-
 
4761
		   I915_READ(HDC_CHICKEN0) |
-
 
4762
		   _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
-
 
4763
 
-
 
4764
	/* WaVSRefCountFullforceMissDisable:bdw */
-
 
4765
	/* WaDSRefCountFullforceMissDisable:bdw */
-
 
4766
	I915_WRITE(GEN7_FF_THREAD_MODE,
-
 
4767
		   I915_READ(GEN7_FF_THREAD_MODE) &
-
 
4768
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
 
4769
}
-
 
4770
 
-
 
4771
static void haswell_init_clock_gating(struct drm_device *dev)
-
 
4772
{
-
 
4773
	struct drm_i915_private *dev_priv = dev->dev_private;
4948
	I915_WRITE(WM2_LP_ILK, 0);
4774
 
4949
	I915_WRITE(WM1_LP_ILK, 0);
4775
	ilk_init_lp_watermarks(dev);
4950
 
4776
 
4951
	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4777
	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Line 4993... Line 4819...
4993
static void ivybridge_init_clock_gating(struct drm_device *dev)
4819
static void ivybridge_init_clock_gating(struct drm_device *dev)
4994
{
4820
{
4995
	struct drm_i915_private *dev_priv = dev->dev_private;
4821
	struct drm_i915_private *dev_priv = dev->dev_private;
4996
	uint32_t snpcr;
4822
	uint32_t snpcr;
Line 4997... Line -...
4997
 
-
 
4998
	I915_WRITE(WM3_LP_ILK, 0);
-
 
4999
	I915_WRITE(WM2_LP_ILK, 0);
4823
 
Line 5000... Line 4824...
5000
	I915_WRITE(WM1_LP_ILK, 0);
4824
	ilk_init_lp_watermarks(dev);
Line 5001... Line 4825...
5001
 
4825
 
5002
	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
4826
	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
Line 5082... Line 4906...
5082
}
4906
}
Line 5083... Line 4907...
5083
 
4907
 
5084
static void valleyview_init_clock_gating(struct drm_device *dev)
4908
static void valleyview_init_clock_gating(struct drm_device *dev)
5085
{
4909
{
-
 
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4911
	u32 val;
-
 
4912
 
-
 
4913
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
4914
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
4915
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
4916
	switch ((val >> 6) & 3) {
-
 
4917
	case 0:
-
 
4918
		dev_priv->mem_freq = 800;
-
 
4919
		break;
-
 
4920
	case 1:
-
 
4921
		dev_priv->mem_freq = 1066;
-
 
4922
		break;
-
 
4923
	case 2:
-
 
4924
		dev_priv->mem_freq = 1333;
-
 
4925
		break;
-
 
4926
	case 3:
-
 
4927
		dev_priv->mem_freq = 1333;
-
 
4928
		break;
-
 
4929
	}
Line 5086... Line 4930...
5086
	struct drm_i915_private *dev_priv = dev->dev_private;
4930
	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
Line 5087... Line 4931...
5087
 
4931
 
5088
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
4932
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
Line 5261... Line 5105...
5261
{
5105
{
5262
	if (HAS_PCH_LPT(dev))
5106
	if (HAS_PCH_LPT(dev))
5263
		lpt_suspend_hw(dev);
5107
		lpt_suspend_hw(dev);
5264
}
5108
}
Line -... Line 5109...
-
 
5109
 
-
 
5110
#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
-
 
5111
	for (i = 0;							\
-
 
5112
	     i < (power_domains)->power_well_count &&			\
-
 
5113
		 ((power_well) = &(power_domains)->power_wells[i]);	\
-
 
5114
	     i++)							\
-
 
5115
		if ((power_well)->domains & (domain_mask))
-
 
5116
 
-
 
5117
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
-
 
5118
	for (i = (power_domains)->power_well_count - 1;			 \
-
 
5119
	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
-
 
5120
	     i--)							 \
-
 
5121
		if ((power_well)->domains & (domain_mask))
5265
 
5122
 
5266
/**
5123
/**
5267
 * We should only use the power well if we explicitly asked the hardware to
5124
 * We should only use the power well if we explicitly asked the hardware to
5268
 * enable it, so check if it's enabled and also check if we've requested it to
5125
 * enable it, so check if it's enabled and also check if we've requested it to
5269
 * be enabled.
5126
 * be enabled.
-
 
5127
 */
-
 
5128
static bool hsw_power_well_enabled(struct drm_device *dev,
-
 
5129
				   struct i915_power_well *power_well)
-
 
5130
{
-
 
5131
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5132
 
-
 
5133
	return I915_READ(HSW_PWR_WELL_DRIVER) ==
-
 
5134
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-
 
5135
}
-
 
5136
 
-
 
5137
bool intel_display_power_enabled_sw(struct drm_device *dev,
-
 
5138
				    enum intel_display_power_domain domain)
-
 
5139
{
-
 
5140
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5141
	struct i915_power_domains *power_domains;
-
 
5142
 
-
 
5143
	power_domains = &dev_priv->power_domains;
-
 
5144
 
-
 
5145
	return power_domains->domain_use_count[domain];
-
 
5146
}
5270
 */
5147
 
5271
bool intel_display_power_enabled(struct drm_device *dev,
5148
bool intel_display_power_enabled(struct drm_device *dev,
5272
				 enum intel_display_power_domain domain)
5149
				 enum intel_display_power_domain domain)
5273
{
5150
{
-
 
5151
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5152
	struct i915_power_domains *power_domains;
-
 
5153
	struct i915_power_well *power_well;
-
 
5154
	bool is_enabled;
Line 5274... Line 5155...
5274
	struct drm_i915_private *dev_priv = dev->dev_private;
5155
	int i;
5275
 
-
 
Line 5276... Line 5156...
5276
	if (!HAS_POWER_WELL(dev))
5156
 
-
 
5157
	power_domains = &dev_priv->power_domains;
5277
		return true;
5158
 
-
 
5159
	is_enabled = true;
5278
 
5160
 
5279
	switch (domain) {
5161
	mutex_lock(&power_domains->lock);
5280
	case POWER_DOMAIN_PIPE_A:
-
 
5281
	case POWER_DOMAIN_TRANSCODER_EDP:
-
 
5282
		return true;
-
 
5283
	case POWER_DOMAIN_PIPE_B:
-
 
5284
	case POWER_DOMAIN_PIPE_C:
-
 
5285
	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-
 
5286
	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-
 
5287
	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-
 
-
 
5162
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5288
	case POWER_DOMAIN_TRANSCODER_A:
5163
		if (power_well->always_on)
5289
	case POWER_DOMAIN_TRANSCODER_B:
-
 
5290
	case POWER_DOMAIN_TRANSCODER_C:
5164
			continue;
5291
		return I915_READ(HSW_PWR_WELL_DRIVER) ==
5165
 
5292
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5166
		if (!power_well->is_enabled(dev, power_well)) {
5293
	default:
5167
			is_enabled = false;
-
 
5168
			break;
Line -... Line 5169...
-
 
5169
		}
-
 
5170
	}
-
 
5171
	mutex_unlock(&power_domains->lock);
-
 
5172
 
-
 
5173
	return is_enabled;
-
 
5174
}
-
 
5175
 
-
 
5176
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
-
 
5177
{
-
 
5178
	struct drm_device *dev = dev_priv->dev;
-
 
5179
	unsigned long irqflags;
-
 
5180
 
-
 
5181
	/*
-
 
5182
	 * After we re-enable the power well, if we touch VGA register 0x3d5
-
 
5183
	 * we'll get unclaimed register interrupts. This stops after we write
-
 
5184
	 * anything to the VGA MSR register. The vgacon module uses this
-
 
5185
	 * register all the time, so if we unbind our driver and, as a
-
 
5186
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
-
 
5187
	 * console_unlock(). So make here we touch the VGA MSR register, making
-
 
5188
	 * sure vgacon can keep working normally without triggering interrupts
-
 
5189
	 * and error messages.
-
 
5190
	 */
-
 
5191
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-
 
5192
    outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-
 
5193
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
 
5194
 
-
 
5195
	if (IS_BROADWELL(dev)) {
-
 
5196
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
5197
		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
-
 
5198
			   dev_priv->de_irq_mask[PIPE_B]);
-
 
5199
		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
-
 
5200
			   ~dev_priv->de_irq_mask[PIPE_B] |
-
 
5201
			   GEN8_PIPE_VBLANK);
-
 
5202
		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
-
 
5203
			   dev_priv->de_irq_mask[PIPE_C]);
-
 
5204
		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
-
 
5205
			   ~dev_priv->de_irq_mask[PIPE_C] |
-
 
5206
			   GEN8_PIPE_VBLANK);
-
 
5207
		POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
-
 
5208
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
5209
	}
-
 
5210
}
-
 
5211
 
-
 
5212
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
-
 
5213
{
-
 
5214
	struct drm_device *dev = dev_priv->dev;
-
 
5215
	enum pipe p;
-
 
5216
	unsigned long irqflags;
-
 
5217
 
-
 
5218
	/*
-
 
5219
	 * After this, the registers on the pipes that are part of the power
-
 
5220
	 * well will become zero, so we have to adjust our counters according to
-
 
5221
	 * that.
-
 
5222
	 *
-
 
5223
	 * FIXME: Should we do this in general in drm_vblank_post_modeset?
-
 
5224
	 */
-
 
5225
//   spin_lock_irqsave(&dev->vbl_lock, irqflags);
-
 
5226
//   for_each_pipe(p)
-
 
5227
//       if (p != PIPE_A)
5294
		BUG();
5228
//           dev->vblank[p].last = 0;
-
 
5229
//   spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5295
	}
5230
}
5296
}
5231
 
5297
 
5232
static void hsw_set_power_well(struct drm_device *dev,
5298
static void __intel_set_power_well(struct drm_device *dev, bool enable)
5233
			       struct i915_power_well *power_well, bool enable)
Line -... Line 5234...
-
 
5234
{
-
 
5235
	struct drm_i915_private *dev_priv = dev->dev_private;
5299
{
5236
	bool is_enabled, enable_requested;
5300
	struct drm_i915_private *dev_priv = dev->dev_private;
5237
	uint32_t tmp;
5301
	bool is_enabled, enable_requested;
5238
 
Line 5302... Line 5239...
5302
	uint32_t tmp;
5239
	WARN_ON(dev_priv->pc8.enabled);
Line 5314... Line 5251...
5314
			DRM_DEBUG_KMS("Enabling power well\n");
5251
			DRM_DEBUG_KMS("Enabling power well\n");
5315
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5252
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5316
				      HSW_PWR_WELL_STATE_ENABLED), 20))
5253
				      HSW_PWR_WELL_STATE_ENABLED), 20))
5317
				DRM_ERROR("Timeout enabling power well\n");
5254
				DRM_ERROR("Timeout enabling power well\n");
5318
		}
5255
		}
-
 
5256
 
-
 
5257
		hsw_power_well_post_enable(dev_priv);
5319
	} else {
5258
	} else {
5320
		if (enable_requested) {
5259
		if (enable_requested) {
5321
			unsigned long irqflags;
-
 
5322
			enum pipe p;
-
 
5323
 
-
 
5324
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5260
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5325
			POSTING_READ(HSW_PWR_WELL_DRIVER);
5261
			POSTING_READ(HSW_PWR_WELL_DRIVER);
5326
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
5262
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
Line 5327... Line -...
5327
 
-
 
5328
			/*
-
 
5329
			 * After this, the registers on the pipes that are part
5263
 
5330
			 * of the power well will become zero, so we have to
-
 
5331
			 * adjust our counters according to that.
5264
			hsw_power_well_post_disable(dev_priv);
5332
			 *
-
 
5333
			 * FIXME: Should we do this in general in
-
 
5334
			 * drm_vblank_post_modeset?
-
 
5335
			 */
-
 
5336
			spin_lock_irqsave(&dev->vbl_lock, irqflags);
-
 
5337
			for_each_pipe(p)
-
 
5338
				if (p != PIPE_A)
-
 
5339
					dev->last_vblank[p] = 0;
-
 
5340
			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5265
		}
5341
		}
5266
		}
-
 
5267
}
-
 
5268
 
-
 
5269
static void __intel_power_well_get(struct drm_device *dev,
-
 
5270
				   struct i915_power_well *power_well)
-
 
5271
{
-
 
5272
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5273
 
-
 
5274
	if (!power_well->count++ && power_well->set) {
-
 
5275
		hsw_disable_package_c8(dev_priv);
-
 
5276
		power_well->set(dev, power_well, true);
-
 
5277
		}
-
 
5278
}
-
 
5279
 
-
 
5280
static void __intel_power_well_put(struct drm_device *dev,
-
 
5281
				   struct i915_power_well *power_well)
-
 
5282
{
-
 
5283
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5284
 
-
 
5285
	WARN_ON(!power_well->count);
-
 
5286
 
-
 
5287
	if (!--power_well->count && power_well->set &&
-
 
5288
	    i915_disable_power_well) {
-
 
5289
		power_well->set(dev, power_well, false);
-
 
5290
		hsw_enable_package_c8(dev_priv);
-
 
5291
	}
-
 
5292
}
-
 
5293
 
-
 
5294
void intel_display_power_get(struct drm_device *dev,
-
 
5295
			     enum intel_display_power_domain domain)
-
 
5296
{
-
 
5297
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5298
	struct i915_power_domains *power_domains;
-
 
5299
	struct i915_power_well *power_well;
-
 
5300
	int i;
-
 
5301
 
-
 
5302
	power_domains = &dev_priv->power_domains;
-
 
5303
 
-
 
5304
	mutex_lock(&power_domains->lock);
-
 
5305
 
-
 
5306
	for_each_power_well(i, power_well, BIT(domain), power_domains)
-
 
5307
		__intel_power_well_get(dev, power_well);
-
 
5308
 
-
 
5309
	power_domains->domain_use_count[domain]++;
-
 
5310
 
-
 
5311
	mutex_unlock(&power_domains->lock);
-
 
5312
}
-
 
5313
 
-
 
5314
void intel_display_power_put(struct drm_device *dev,
-
 
5315
			     enum intel_display_power_domain domain)
-
 
5316
{
-
 
5317
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5318
	struct i915_power_domains *power_domains;
-
 
5319
	struct i915_power_well *power_well;
-
 
5320
	int i;
-
 
5321
 
-
 
5322
	power_domains = &dev_priv->power_domains;
-
 
5323
 
-
 
5324
	mutex_lock(&power_domains->lock);
-
 
5325
 
-
 
5326
	WARN_ON(!power_domains->domain_use_count[domain]);
-
 
5327
	power_domains->domain_use_count[domain]--;
-
 
5328
 
-
 
5329
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
-
 
5330
		__intel_power_well_put(dev, power_well);
-
 
5331
 
5342
		}
5332
	mutex_unlock(&power_domains->lock);
Line 5343... Line 5333...
5343
}
5333
}
Line 5344... Line 5334...
5344
 
5334
 
5345
static struct i915_power_well *hsw_pwr;
5335
static struct i915_power_domains *hsw_pwr;
5346
 
5336
 
-
 
5337
/* Display audio driver power well request */
-
 
5338
void i915_request_power_well(void)
5347
/* Display audio driver power well request */
5339
{
5348
void i915_request_power_well(void)
5340
	struct drm_i915_private *dev_priv;
Line 5349... Line 5341...
5349
{
5341
 
5350
	if (WARN_ON(!hsw_pwr))
-
 
5351
		return;
5342
	if (WARN_ON(!hsw_pwr))
5352
 
5343
		return;
5353
	spin_lock_irq(&hsw_pwr->lock);
-
 
5354
	if (!hsw_pwr->count++ &&
5344
 
5355
			!hsw_pwr->i915_request)
5345
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
Line 5356... Line 5346...
5356
		__intel_set_power_well(hsw_pwr->device, true);
5346
				power_domains);
5357
	spin_unlock_irq(&hsw_pwr->lock);
5347
	intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
5358
}
5348
}
-
 
5349
EXPORT_SYMBOL_GPL(i915_request_power_well);
-
 
5350
 
5359
EXPORT_SYMBOL_GPL(i915_request_power_well);
5351
/* Display audio driver power well release */
5360
 
5352
void i915_release_power_well(void)
Line 5361... Line 5353...
5361
/* Display audio driver power well release */
5353
{
5362
void i915_release_power_well(void)
5354
	struct drm_i915_private *dev_priv;
5363
{
-
 
5364
	if (WARN_ON(!hsw_pwr))
-
 
5365
		return;
5355
 
5366
 
-
 
5367
	spin_lock_irq(&hsw_pwr->lock);
5356
	if (WARN_ON(!hsw_pwr))
5368
	WARN_ON(!hsw_pwr->count);
5357
		return;
Line -... Line 5358...
-
 
5358
 
-
 
5359
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-
 
5360
				power_domains);
-
 
5361
	intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
-
 
5362
}
-
 
5363
EXPORT_SYMBOL_GPL(i915_release_power_well);
-
 
5364
 
-
 
5365
static struct i915_power_well i9xx_always_on_power_well[] = {
-
 
5366
	{
-
 
5367
		.name = "always-on",
-
 
5368
		.always_on = 1,
-
 
5369
		.domains = POWER_DOMAIN_MASK,
-
 
5370
	},
-
 
5371
};
-
 
5372
 
-
 
5373
static struct i915_power_well hsw_power_wells[] = {
-
 
5374
	{
-
 
5375
		.name = "always-on",
-
 
5376
		.always_on = 1,
-
 
5377
		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
-
 
5378
	},
-
 
5379
	{
-
 
5380
		.name = "display",
-
 
5381
		.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
-
 
5382
		.is_enabled = hsw_power_well_enabled,
-
 
5383
		.set = hsw_set_power_well,
-
 
5384
	},
-
 
5385
};
-
 
5386
 
-
 
5387
static struct i915_power_well bdw_power_wells[] = {
-
 
5388
	{
-
 
5389
		.name = "always-on",
-
 
5390
		.always_on = 1,
-
 
5391
		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
-
 
5392
	},
-
 
5393
	{
-
 
5394
		.name = "display",
-
 
5395
		.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
-
 
5396
		.is_enabled = hsw_power_well_enabled,
-
 
5397
		.set = hsw_set_power_well,
-
 
5398
	},
5369
	if (!--hsw_pwr->count &&
5399
};
5370
		       !hsw_pwr->i915_request)
5400
 
5371
		__intel_set_power_well(hsw_pwr->device, false);
5401
#define set_power_wells(power_domains, __power_wells) ({		\
-
 
5402
	(power_domains)->power_wells = (__power_wells);			\
Line 5372... Line 5403...
5372
	spin_unlock_irq(&hsw_pwr->lock);
5403
	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
Line -... Line 5404...
-
 
5404
})
-
 
5405
 
-
 
5406
int intel_power_domains_init(struct drm_device *dev)
-
 
5407
{
-
 
5408
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5409
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5373
}
5410
 
-
 
5411
	mutex_init(&power_domains->lock);
5374
EXPORT_SYMBOL_GPL(i915_release_power_well);
5412
 
5375
 
5413
	/*
-
 
5414
	 * The enabling order will be from lower to higher indexed wells,
-
 
5415
	 * the disabling order is reversed.
-
 
5416
	 */
Line 5376... Line 5417...
5376
int i915_init_power_well(struct drm_device *dev)
5417
	if (IS_HASWELL(dev)) {
5377
{
5418
		set_power_wells(power_domains, hsw_power_wells);
Line 5378... Line 5419...
5378
	struct drm_i915_private *dev_priv = dev->dev_private;
5419
		hsw_pwr = power_domains;
5379
 
5420
	} else if (IS_BROADWELL(dev)) {
5380
	hsw_pwr = &dev_priv->power_well;
5421
		set_power_wells(power_domains, bdw_power_wells);
5381
 
5422
		hsw_pwr = power_domains;
Line 5382... Line 5423...
5382
	hsw_pwr->device = dev;
5423
	} else {
5383
	spin_lock_init(&hsw_pwr->lock);
5424
		set_power_wells(power_domains, i9xx_always_on_power_well);
5384
	hsw_pwr->count = 0;
5425
	}
5385
 
5426
 
5386
	return 0;
-
 
5387
}
-
 
5388
 
-
 
5389
void i915_remove_power_well(struct drm_device *dev)
-
 
5390
{
5427
	return 0;
5391
	hsw_pwr = NULL;
5428
}
5392
}
-
 
5393
 
-
 
5394
void intel_set_power_well(struct drm_device *dev, bool enable)
-
 
Line 5395... Line 5429...
5395
{
5429
 
-
 
5430
void intel_power_domains_remove(struct drm_device *dev)
5396
	struct drm_i915_private *dev_priv = dev->dev_private;
5431
{
5397
	struct i915_power_well *power_well = &dev_priv->power_well;
5432
	hsw_pwr = NULL;
5398
 
-
 
5399
	if (!HAS_POWER_WELL(dev))
5433
}
5400
		return;
-
 
5401
 
-
 
5402
	if (!i915_disable_power_well && !enable)
5434
 
5403
		return;
5435
static void intel_power_domains_resume(struct drm_device *dev)
Line 5404... Line 5436...
5404
 
5436
{
5405
	spin_lock_irq(&power_well->lock);
5437
	struct drm_i915_private *dev_priv = dev->dev_private;
5406
	power_well->i915_request = enable;
5438
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5407
 
5439
	struct i915_power_well *power_well;
5408
	/* only reject "disable" power well request */
5440
	int i;
5409
	if (power_well->count && !enable) {
5441
 
5410
		spin_unlock_irq(&power_well->lock);
5442
	mutex_lock(&power_domains->lock);
5411
		return;
5443
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
5412
	}
5444
		if (power_well->set)
Line 5413... Line -...
5413
 
-
 
5414
	__intel_set_power_well(dev, enable);
-
 
5415
	spin_unlock_irq(&power_well->lock);
-
 
5416
}
5445
			power_well->set(dev, power_well, power_well->count > 0);
5417
 
5446
	}
-
 
5447
	mutex_unlock(&power_domains->lock);
-
 
5448
}
-
 
5449
 
-
 
5450
/*
Line 5418... Line 5451...
5418
/*
5451
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5419
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5452
 * when not needed anymore. We have 4 registers that can request the power well
5420
 * when not needed anymore. We have 4 registers that can request the power well
5453
 * to be enabled, and it will only be disabled if none of the registers is
5421
 * to be enabled, and it will only be disabled if none of the registers is
5454
 * requesting it to be enabled.
Line 5446... Line 5479...
5446
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5479
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5447
{
5480
{
5448
	hsw_enable_package_c8(dev_priv);
5481
	hsw_enable_package_c8(dev_priv);
5449
}
5482
}
Line -... Line 5483...
-
 
5483
 
-
 
5484
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
-
 
5485
{
-
 
5486
	struct drm_device *dev = dev_priv->dev;
-
 
5487
	struct device *device = &dev->pdev->dev;
-
 
5488
 
-
 
5489
    return;
-
 
5490
}
-
 
5491
 
-
 
5492
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-
 
5493
{
-
 
5494
	struct drm_device *dev = dev_priv->dev;
-
 
5495
	struct device *device = &dev->pdev->dev;
-
 
5496
 
-
 
5497
    return;
-
 
5498
 
-
 
5499
}
-
 
5500
 
-
 
5501
void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
-
 
5502
{
-
 
5503
	struct drm_device *dev = dev_priv->dev;
-
 
5504
	struct device *device = &dev->pdev->dev;
-
 
5505
 
-
 
5506
	dev_priv->pm.suspended = false;
-
 
5507
 
-
 
5508
    return;
-
 
5509
 
-
 
5510
}
-
 
5511
 
-
 
5512
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
-
 
5513
{
-
 
5514
	struct drm_device *dev = dev_priv->dev;
-
 
5515
	struct device *device = &dev->pdev->dev;
-
 
5516
 
-
 
5517
    return;
-
 
5518
 
-
 
5519
}
5450
 
5520
 
5451
/* Set up chip specific power management-related functions */
5521
/* Set up chip specific power management-related functions */
5452
void intel_init_pm(struct drm_device *dev)
5522
void intel_init_pm(struct drm_device *dev)
5453
{
5523
{
Line 5454... Line 5524...
5454
	struct drm_i915_private *dev_priv = dev->dev_private;
5524
	struct drm_i915_private *dev_priv = dev->dev_private;
5455
 
5525
 
5456
	if (I915_HAS_FBC(dev)) {
5526
	if (HAS_FBC(dev)) {
5457
		if (HAS_PCH_SPLIT(dev)) {
5527
		if (INTEL_INFO(dev)->gen >= 7) {
5458
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5528
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5459
			if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5529
			dev_priv->display.enable_fbc = gen7_enable_fbc;
5460
				dev_priv->display.enable_fbc =
-
 
5461
					gen7_enable_fbc;
5530
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
5462
			else
5531
		} else if (INTEL_INFO(dev)->gen >= 5) {
5463
				dev_priv->display.enable_fbc =
5532
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5464
					ironlake_enable_fbc;
5533
			dev_priv->display.enable_fbc = ironlake_enable_fbc;
5465
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
5534
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
5466
		} else if (IS_GM45(dev)) {
5535
		} else if (IS_GM45(dev)) {
5467
			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5536
			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5468
			dev_priv->display.enable_fbc = g4x_enable_fbc;
5537
			dev_priv->display.enable_fbc = g4x_enable_fbc;
5469
			dev_priv->display.disable_fbc = g4x_disable_fbc;
5538
			dev_priv->display.disable_fbc = g4x_disable_fbc;
5470
		} else if (IS_CRESTLINE(dev)) {
5539
		} else {
5471
			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5540
			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-
 
5541
			dev_priv->display.enable_fbc = i8xx_enable_fbc;
-
 
5542
			dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
 
5543
 
5472
			dev_priv->display.enable_fbc = i8xx_enable_fbc;
5544
			/* This value was pulled out of someone's hat */
5473
			dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
 
5474
		}
5545
			I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
Line 5475... Line 5546...
5475
		/* 855GM needs testing */
5546
		}
5476
	}
5547
	}
5477
 
5548
 
Line 5483... Line 5554...
5483
 
5554
 
5484
	/* For FIFO watermark updates */
5555
	/* For FIFO watermark updates */
5485
	if (HAS_PCH_SPLIT(dev)) {
5556
	if (HAS_PCH_SPLIT(dev)) {
Line 5486... Line -...
5486
		intel_setup_wm_latency(dev);
-
 
5487
 
5557
		intel_setup_wm_latency(dev);
5488
		if (IS_GEN5(dev)) {
5558
 
5489
			if (dev_priv->wm.pri_latency[1] &&
-
 
5490
			    dev_priv->wm.spr_latency[1] &&
-
 
5491
			    dev_priv->wm.cur_latency[1])
-
 
5492
				dev_priv->display.update_wm = ironlake_update_wm;
-
 
5493
			else {
-
 
5494
				DRM_DEBUG_KMS("Failed to get proper latency. "
-
 
5495
					      "Disable CxSR\n");
-
 
5496
				dev_priv->display.update_wm = NULL;
-
 
5497
			}
-
 
5498
			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
-
 
5499
		} else if (IS_GEN6(dev)) {
5559
		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
5500
			if (dev_priv->wm.pri_latency[0] &&
5560
		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
5501
			    dev_priv->wm.spr_latency[0] &&
5561
		    (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
5502
			    dev_priv->wm.cur_latency[0]) {
5562
		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
5503
				dev_priv->display.update_wm = sandybridge_update_wm;
5563
			dev_priv->display.update_wm = ilk_update_wm;
5504
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5564
			dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
5505
			} else {
5565
			} else {
5506
				DRM_DEBUG_KMS("Failed to read display plane latency. "
-
 
5507
					      "Disable CxSR\n");
5566
				DRM_DEBUG_KMS("Failed to read display plane latency. "
-
 
5567
					      "Disable CxSR\n");
-
 
5568
			}
-
 
5569
 
-
 
5570
		if (IS_GEN5(dev))
5508
				dev_priv->display.update_wm = NULL;
5571
			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5509
			}
5572
		else if (IS_GEN6(dev))
5510
			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-
 
5511
		} else if (IS_IVYBRIDGE(dev)) {
-
 
5512
			if (dev_priv->wm.pri_latency[0] &&
-
 
5513
			    dev_priv->wm.spr_latency[0] &&
-
 
5514
			    dev_priv->wm.cur_latency[0]) {
-
 
5515
				dev_priv->display.update_wm = ivybridge_update_wm;
-
 
5516
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-
 
5517
			} else {
-
 
5518
				DRM_DEBUG_KMS("Failed to read display plane latency. "
-
 
5519
					      "Disable CxSR\n");
-
 
5520
				dev_priv->display.update_wm = NULL;
5573
			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5521
			}
5574
		else if (IS_IVYBRIDGE(dev))
5522
			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-
 
5523
		} else if (IS_HASWELL(dev)) {
-
 
5524
			if (dev_priv->wm.pri_latency[0] &&
-
 
5525
			    dev_priv->wm.spr_latency[0] &&
-
 
5526
			    dev_priv->wm.cur_latency[0]) {
-
 
5527
				dev_priv->display.update_wm = haswell_update_wm;
-
 
5528
				dev_priv->display.update_sprite_wm =
-
 
5529
					haswell_update_sprite_wm;
-
 
5530
			} else {
-
 
5531
				DRM_DEBUG_KMS("Failed to read display plane latency. "
-
 
5532
					      "Disable CxSR\n");
-
 
5533
				dev_priv->display.update_wm = NULL;
5575
			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5534
			}
5576
		else if (IS_HASWELL(dev))
5535
			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5577
			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5536
		} else
5578
		else if (INTEL_INFO(dev)->gen == 8)
5537
			dev_priv->display.update_wm = NULL;
5579
			dev_priv->display.init_clock_gating = gen8_init_clock_gating;
5538
	} else if (IS_VALLEYVIEW(dev)) {
5580
	} else if (IS_VALLEYVIEW(dev)) {
5539
		dev_priv->display.update_wm = valleyview_update_wm;
5581
		dev_priv->display.update_wm = valleyview_update_wm;
5540
		dev_priv->display.init_clock_gating =
5582
		dev_priv->display.init_clock_gating =
Line 5566... Line 5608...
5566
			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5608
			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5567
	} else if (IS_GEN3(dev)) {
5609
	} else if (IS_GEN3(dev)) {
5568
		dev_priv->display.update_wm = i9xx_update_wm;
5610
		dev_priv->display.update_wm = i9xx_update_wm;
5569
		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5611
		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5570
		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5612
		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5571
	} else if (IS_I865G(dev)) {
5613
	} else if (IS_GEN2(dev)) {
5572
		dev_priv->display.update_wm = i830_update_wm;
5614
		if (INTEL_INFO(dev)->num_pipes == 1) {
5573
		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5615
			dev_priv->display.update_wm = i845_update_wm;
5574
		dev_priv->display.get_fifo_size = i830_get_fifo_size;
5616
			dev_priv->display.get_fifo_size = i845_get_fifo_size;
5575
	} else if (IS_I85X(dev)) {
5617
		} else {
5576
		dev_priv->display.update_wm = i9xx_update_wm;
5618
			dev_priv->display.update_wm = i9xx_update_wm;
5577
		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5619
		dev_priv->display.get_fifo_size = i830_get_fifo_size;
-
 
5620
		}
-
 
5621
 
-
 
5622
		if (IS_I85X(dev) || IS_I865G(dev))
5578
		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5623
		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5579
	} else {
-
 
5580
		dev_priv->display.update_wm = i830_update_wm;
-
 
5581
		dev_priv->display.init_clock_gating = i830_init_clock_gating;
-
 
5582
		if (IS_845G(dev))
-
 
5583
			dev_priv->display.get_fifo_size = i845_get_fifo_size;
-
 
5584
		else
5624
		else
5585
			dev_priv->display.get_fifo_size = i830_get_fifo_size;
5625
			dev_priv->display.init_clock_gating = i830_init_clock_gating;
-
 
5626
	} else {
-
 
5627
		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
5586
	}
5628
	}
5587
}
5629
}
Line 5588... Line 5630...
5588
 
5630
 
5589
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5631
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
Line 5631... Line 5673...
5631
	I915_WRITE(GEN6_PCODE_DATA, 0);
5673
	I915_WRITE(GEN6_PCODE_DATA, 0);
Line 5632... Line 5674...
5632
 
5674
 
5633
	return 0;
5675
	return 0;
Line 5634... Line 5676...
5634
}
5676
}
5635
 
5677
 
5636
int vlv_gpu_freq(int ddr_freq, int val)
5678
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
Line -... Line 5679...
-
 
5679
{
5637
{
5680
	int div;
5638
	int mult, base;
5681
 
5639
 
-
 
5640
	switch (ddr_freq) {
5682
	/* 4 x czclk */
5641
	case 800:
5683
	switch (dev_priv->mem_freq) {
5642
		mult = 20;
5684
	case 800:
5643
		base = 120;
5685
		div = 10;
5644
		break;
-
 
5645
	case 1066:
5686
		break;
5646
		mult = 22;
5687
	case 1066:
5647
		base = 133;
-
 
5648
		break;
5688
		div = 12;
5649
	case 1333:
5689
		break;
5650
		mult = 21;
5690
	case 1333:
5651
		base = 125;
5691
		div = 16;
5652
		break;
5692
		break;
Line 5653... Line 5693...
5653
	default:
5693
	default:
5654
		return -1;
5694
		return -1;
Line 5655... Line 5695...
5655
	}
5695
	}
5656
 
5696
 
5657
	return ((val - 0xbd) * mult) + base;
5697
	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
Line -... Line 5698...
-
 
5698
}
5658
}
5699
 
5659
 
5700
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
5660
int vlv_freq_opcode(int ddr_freq, int val)
5701
{
5661
{
-
 
5662
	int mult, base;
5702
	int mul;
5663
 
5703
 
5664
	switch (ddr_freq) {
5704
	/* 4 x czclk */
5665
	case 800:
-
 
5666
		mult = 20;
5705
	switch (dev_priv->mem_freq) {
5667
		base = 120;
5706
	case 800:
5668
		break;
5707
		mul = 10;
5669
	case 1066:
-
 
5670
		mult = 22;
5708
		break;
5671
		base = 133;
5709
	case 1066:
5672
		break;
5710
		mul = 12;
5673
	case 1333:
5711
		break;
Line 5674... Line -...
5674
		mult = 21;
-
 
5675
		base = 125;
5712
	case 1333:
5676
		break;
-
 
5677
	default:
-
 
5678
		return -1;
-
 
5679
	}
-
 
5680
 
-
 
5681
	val /= mult;
-
 
5682
	val -= base / mult;
5713
		mul = 16;
Line 5683... Line 5714...
5683
	val += 0xbd;
5714
		break;
5684
 
5715
	default:
5685
	if (val > 0xea)
5716
		return -1;
Line -... Line 5717...
-
 
5717
	}
-
 
5718
 
-
 
5719
	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
-
 
5720
}
-
 
5721
 
-
 
5722
void intel_pm_setup(struct drm_device *dev)
-
 
5723
{
-
 
5724
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5725
 
5686
		val = 0xea;
5726
	mutex_init(&dev_priv->rps.hw_lock);
5687
 
5727
 
5688
	return val;
-
 
5689
}
-
 
5690
 
5728
	mutex_init(&dev_priv->pc8.lock);
5691
void intel_pm_init(struct drm_device *dev)
-