Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5354 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4560 Rev 5060
Line 49... Line 49...
49
    u8 v;
49
    u8 v;
50
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
50
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
51
    return v;
51
    return v;
52
}
52
}
Line -... Line 53...
-
 
53
 
-
 
54
union ktime {
-
 
55
    s64 tv64;
-
 
56
};
-
 
57
 
-
 
58
typedef union ktime ktime_t;        /* Kill this */
-
 
59
 
Line -... Line 60...
-
 
60
#define ktime_to_ns(kt)         ((kt).tv64)
-
 
61
 
-
 
62
static inline u64 ktime_get_raw_ns(void)
-
 
63
{
53
 
64
    return 0; //ktime_to_ns(ktime_get_raw());
54
 
65
}
55
/**
66
/**
56
 * RC6 is a special power stage which allows the GPU to enter an very
67
 * RC6 is a special power stage which allows the GPU to enter an very
57
 * low-voltage mode when idle, using down to 0V while at this stage.  This
68
 * low-voltage mode when idle, using down to 0V while at this stage.  This
Line 108... Line 119...
108
 
119
 
109
static void i8xx_enable_fbc(struct drm_crtc *crtc)
120
static void i8xx_enable_fbc(struct drm_crtc *crtc)
110
{
121
{
111
	struct drm_device *dev = crtc->dev;
122
	struct drm_device *dev = crtc->dev;
112
	struct drm_i915_private *dev_priv = dev->dev_private;
123
	struct drm_i915_private *dev_priv = dev->dev_private;
113
	struct drm_framebuffer *fb = crtc->fb;
-
 
114
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
124
	struct drm_framebuffer *fb = crtc->primary->fb;
115
	struct drm_i915_gem_object *obj = intel_fb->obj;
125
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
116
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
126
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
117
	int cfb_pitch;
127
	int cfb_pitch;
118
	int plane, i;
128
	int i;
Line 119... Line 129...
119
	u32 fbc_ctl;
129
	u32 fbc_ctl;
120
 
130
 
121
	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
131
	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
Line 125... Line 135...
125
	/* FBC_CTL wants 32B or 64B units */
135
	/* FBC_CTL wants 32B or 64B units */
126
	if (IS_GEN2(dev))
136
	if (IS_GEN2(dev))
127
		cfb_pitch = (cfb_pitch / 32) - 1;
137
		cfb_pitch = (cfb_pitch / 32) - 1;
128
	else
138
	else
129
	cfb_pitch = (cfb_pitch / 64) - 1;
139
	cfb_pitch = (cfb_pitch / 64) - 1;
130
	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
 
Line 131... Line 140...
131
 
140
 
132
	/* Clear old tags */
141
	/* Clear old tags */
133
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
142
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
Line 134... Line 143...
134
		I915_WRITE(FBC_TAG + (i * 4), 0);
143
		I915_WRITE(FBC_TAG + (i * 4), 0);
135
 
144
 
Line 136... Line 145...
136
	if (IS_GEN4(dev)) {
145
	if (IS_GEN4(dev)) {
137
		u32 fbc_ctl2;
146
		u32 fbc_ctl2;
138
 
147
 
139
	/* Set it up... */
148
	/* Set it up... */
140
	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
149
	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
141
	fbc_ctl2 |= plane;
150
		fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
Line 142... Line 151...
142
	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
151
	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
Line 151... Line 160...
151
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
160
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
152
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
161
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
153
	fbc_ctl |= obj->fence_reg;
162
	fbc_ctl |= obj->fence_reg;
154
	I915_WRITE(FBC_CONTROL, fbc_ctl);
163
	I915_WRITE(FBC_CONTROL, fbc_ctl);
Line 155... Line 164...
155
 
164
 
156
	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
165
	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
157
		      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
166
		      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
Line 158... Line 167...
158
}
167
}
159
 
168
 
Line 166... Line 175...
166
 
175
 
167
static void g4x_enable_fbc(struct drm_crtc *crtc)
176
static void g4x_enable_fbc(struct drm_crtc *crtc)
168
{
177
{
169
	struct drm_device *dev = crtc->dev;
178
	struct drm_device *dev = crtc->dev;
170
	struct drm_i915_private *dev_priv = dev->dev_private;
179
	struct drm_i915_private *dev_priv = dev->dev_private;
171
	struct drm_framebuffer *fb = crtc->fb;
-
 
172
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
180
	struct drm_framebuffer *fb = crtc->primary->fb;
173
	struct drm_i915_gem_object *obj = intel_fb->obj;
181
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
174
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
175
	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
182
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line -... Line 183...
-
 
183
	u32 dpfc_ctl;
-
 
184
 
-
 
185
	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
-
 
186
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
176
	u32 dpfc_ctl;
187
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
177
 
188
	else
178
	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
-
 
Line 179... Line 189...
179
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
189
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
Line 180... Line 190...
180
	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
190
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
181
 
191
 
Line 182... Line 192...
182
	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
192
	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
183
 
193
 
Line 184... Line 194...
184
	/* enable it... */
194
	/* enable it... */
Line 236... Line 246...
236
 
246
 
237
static void ironlake_enable_fbc(struct drm_crtc *crtc)
247
static void ironlake_enable_fbc(struct drm_crtc *crtc)
238
{
248
{
239
	struct drm_device *dev = crtc->dev;
249
	struct drm_device *dev = crtc->dev;
240
	struct drm_i915_private *dev_priv = dev->dev_private;
250
	struct drm_i915_private *dev_priv = dev->dev_private;
241
	struct drm_framebuffer *fb = crtc->fb;
-
 
242
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
251
	struct drm_framebuffer *fb = crtc->primary->fb;
243
	struct drm_i915_gem_object *obj = intel_fb->obj;
252
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
244
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
245
	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
253
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line 246... Line 254...
246
	u32 dpfc_ctl;
254
	u32 dpfc_ctl;
-
 
255
 
-
 
256
	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
-
 
257
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-
 
258
		dev_priv->fbc.threshold++;
-
 
259
 
-
 
260
	switch (dev_priv->fbc.threshold) {
247
 
261
	case 4:
-
 
262
	case 3:
-
 
263
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
248
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
264
		break;
249
	dpfc_ctl &= DPFC_RESERVED;
265
	case 2:
-
 
266
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
250
	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
267
		break;
-
 
268
	case 1:
-
 
269
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
251
	/* Set persistent mode for front-buffer rendering, ala X. */
270
		break;
252
	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
271
	}
253
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
272
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
254
	if (IS_GEN5(dev))
-
 
Line 255... Line 273...
255
		dpfc_ctl |= obj->fence_reg;
273
	if (IS_GEN5(dev))
256
	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
274
		dpfc_ctl |= obj->fence_reg;
257
 
275
 
258
	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
276
	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
Line 294... Line 312...
294
 
312
 
295
static void gen7_enable_fbc(struct drm_crtc *crtc)
313
static void gen7_enable_fbc(struct drm_crtc *crtc)
296
{
314
{
297
	struct drm_device *dev = crtc->dev;
315
	struct drm_device *dev = crtc->dev;
298
	struct drm_i915_private *dev_priv = dev->dev_private;
316
	struct drm_i915_private *dev_priv = dev->dev_private;
299
	struct drm_framebuffer *fb = crtc->fb;
-
 
300
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
317
	struct drm_framebuffer *fb = crtc->primary->fb;
301
	struct drm_i915_gem_object *obj = intel_fb->obj;
318
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
 
319
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line 302... Line 320...
302
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
320
	u32 dpfc_ctl;
-
 
321
 
-
 
322
	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
Line -... Line 323...
-
 
323
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-
 
324
		dev_priv->fbc.threshold++;
-
 
325
 
-
 
326
	switch (dev_priv->fbc.threshold) {
-
 
327
	case 4:
-
 
328
	case 3:
-
 
329
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
-
 
330
		break;
-
 
331
	case 2:
303
 
332
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-
 
333
		break;
-
 
334
	case 1:
-
 
335
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
304
	I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
336
		break;
-
 
337
	}
305
 
338
 
Line 306... Line 339...
306
	I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
339
	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
307
		   IVB_DPFC_CTL_FENCE_EN |
340
 
308
		   intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
341
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
 
342
 
-
 
343
	if (IS_IVYBRIDGE(dev)) {
309
 
344
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
310
	if (IS_IVYBRIDGE(dev)) {
345
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
311
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
346
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
-
 
347
			   ILK_FBCQ_DIS);
312
		I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
348
	} else {
313
	} else {
349
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
Line 314... Line 350...
314
		/* WaFbcAsynchFlipDisableFbcQueue:hsw */
350
		I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
315
		I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
351
			   I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
316
			   HSW_BYPASS_FBC_QUEUE);
352
			   HSW_FBCQ_DIS);
Line 346... Line 382...
346
	mutex_lock(&dev->struct_mutex);
382
	mutex_lock(&dev->struct_mutex);
347
	if (work == dev_priv->fbc.fbc_work) {
383
	if (work == dev_priv->fbc.fbc_work) {
348
		/* Double check that we haven't switched fb without cancelling
384
		/* Double check that we haven't switched fb without cancelling
349
		 * the prior work.
385
		 * the prior work.
350
		 */
386
		 */
351
		if (work->crtc->fb == work->fb) {
387
		if (work->crtc->primary->fb == work->fb) {
352
			dev_priv->display.enable_fbc(work->crtc);
388
			dev_priv->display.enable_fbc(work->crtc);
Line 353... Line 389...
353
 
389
 
354
			dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
390
			dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
355
			dev_priv->fbc.fb_id = work->crtc->fb->base.id;
391
			dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
356
			dev_priv->fbc.y = work->crtc->y;
392
			dev_priv->fbc.y = work->crtc->y;
Line 357... Line 393...
357
		}
393
		}
358
 
394
 
Line 403... Line 439...
403
		dev_priv->display.enable_fbc(crtc);
439
		dev_priv->display.enable_fbc(crtc);
404
		return;
440
		return;
405
	}
441
	}
Line 406... Line 442...
406
 
442
 
407
	work->crtc = crtc;
443
	work->crtc = crtc;
408
	work->fb = crtc->fb;
444
	work->fb = crtc->primary->fb;
Line 409... Line 445...
409
	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
445
	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
Line 410... Line 446...
410
 
446
 
Line 472... Line 508...
472
{
508
{
473
	struct drm_i915_private *dev_priv = dev->dev_private;
509
	struct drm_i915_private *dev_priv = dev->dev_private;
474
	struct drm_crtc *crtc = NULL, *tmp_crtc;
510
	struct drm_crtc *crtc = NULL, *tmp_crtc;
475
	struct intel_crtc *intel_crtc;
511
	struct intel_crtc *intel_crtc;
476
	struct drm_framebuffer *fb;
512
	struct drm_framebuffer *fb;
477
	struct intel_framebuffer *intel_fb;
-
 
478
	struct drm_i915_gem_object *obj;
513
	struct drm_i915_gem_object *obj;
479
	const struct drm_display_mode *adjusted_mode;
514
	const struct drm_display_mode *adjusted_mode;
480
	unsigned int max_width, max_height;
515
	unsigned int max_width, max_height;
Line 481... Line 516...
481
 
516
 
482
	if (!HAS_FBC(dev)) {
517
	if (!HAS_FBC(dev)) {
483
		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
518
		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
484
		return;
519
		return;
Line 485... Line 520...
485
	}
520
	}
486
 
521
 
487
	if (!i915_powersave) {
522
	if (!i915.powersave) {
488
		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
523
		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
489
			DRM_DEBUG_KMS("fbc disabled per module param\n");
524
			DRM_DEBUG_KMS("fbc disabled per module param\n");
Line 497... Line 532...
497
	 *   - more than one pipe is active
532
	 *   - more than one pipe is active
498
	 *   - changing FBC params (stride, fence, mode)
533
	 *   - changing FBC params (stride, fence, mode)
499
	 *   - new fb is too large to fit in compressed buffer
534
	 *   - new fb is too large to fit in compressed buffer
500
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
535
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
501
	 */
536
	 */
502
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
537
	for_each_crtc(dev, tmp_crtc) {
503
		if (intel_crtc_active(tmp_crtc) &&
538
		if (intel_crtc_active(tmp_crtc) &&
504
		    to_intel_crtc(tmp_crtc)->primary_enabled) {
539
		    to_intel_crtc(tmp_crtc)->primary_enabled) {
505
			if (crtc) {
540
			if (crtc) {
506
				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
541
				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
507
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
542
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
Line 509... Line 544...
509
			}
544
			}
510
			crtc = tmp_crtc;
545
			crtc = tmp_crtc;
511
		}
546
		}
512
	}
547
	}
Line 513... Line 548...
513
 
548
 
514
	if (!crtc || crtc->fb == NULL) {
549
	if (!crtc || crtc->primary->fb == NULL) {
515
		if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
550
		if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
516
		DRM_DEBUG_KMS("no output, disabling\n");
551
		DRM_DEBUG_KMS("no output, disabling\n");
517
		goto out_disable;
552
		goto out_disable;
Line 518... Line 553...
518
	}
553
	}
519
 
554
 
520
	intel_crtc = to_intel_crtc(crtc);
-
 
521
	fb = crtc->fb;
555
	intel_crtc = to_intel_crtc(crtc);
522
	intel_fb = to_intel_framebuffer(fb);
556
	fb = crtc->primary->fb;
Line 523... Line 557...
523
	obj = intel_fb->obj;
557
	obj = intel_fb_obj(fb);
524
	adjusted_mode = &intel_crtc->config.adjusted_mode;
-
 
525
 
558
	adjusted_mode = &intel_crtc->config.adjusted_mode;
526
	if (i915_enable_fbc < 0 &&
559
 
527
	    INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
560
	if (i915.enable_fbc < 0) {
528
		if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
561
		if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
529
			DRM_DEBUG_KMS("disabled per chip default\n");
562
			DRM_DEBUG_KMS("disabled per chip default\n");
530
		goto out_disable;
563
		goto out_disable;
531
	}
564
	}
532
	if (!i915_enable_fbc) {
565
	if (!i915.enable_fbc) {
533
		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
566
		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
534
		DRM_DEBUG_KMS("fbc disabled per module param\n");
567
		DRM_DEBUG_KMS("fbc disabled per module param\n");
Line 540... Line 573...
540
		DRM_DEBUG_KMS("mode incompatible with compression, "
573
		DRM_DEBUG_KMS("mode incompatible with compression, "
541
			      "disabling\n");
574
			      "disabling\n");
542
		goto out_disable;
575
		goto out_disable;
543
	}
576
	}
Line -... Line 577...
-
 
577
 
-
 
578
	if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
-
 
579
		max_width = 4096;
544
 
580
		max_height = 4096;
545
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
581
	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
546
		max_width = 4096;
582
		max_width = 4096;
547
		max_height = 2048;
583
		max_height = 2048;
548
	} else {
584
	} else {
549
		max_width = 2048;
585
		max_width = 2048;
Line 553... Line 589...
553
	    intel_crtc->config.pipe_src_h > max_height) {
589
	    intel_crtc->config.pipe_src_h > max_height) {
554
		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
590
		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
555
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
591
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
556
		goto out_disable;
592
		goto out_disable;
557
	}
593
	}
558
	if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
594
	if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
559
	    intel_crtc->plane != PLANE_A) {
595
	    intel_crtc->plane != PLANE_A) {
560
		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
596
		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
561
			DRM_DEBUG_KMS("plane not A, disabling compression\n");
597
			DRM_DEBUG_KMS("plane not A, disabling compression\n");
562
		goto out_disable;
598
		goto out_disable;
563
	}
599
	}
Line 574... Line 610...
574
 
610
 
575
	/* If the kernel debugger is active, always disable compression */
611
	/* If the kernel debugger is active, always disable compression */
576
	if (in_dbg_master())
612
	if (in_dbg_master())
Line 577... Line 613...
577
		goto out_disable;
613
		goto out_disable;
-
 
614
 
578
 
615
	if (i915_gem_stolen_setup_compression(dev, obj->base.size,
579
	if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
616
					      drm_format_plane_cpp(fb->pixel_format, 0))) {
580
		if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
617
		if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
581
		DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
618
		DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
Line 633... Line 670...
633
	i915_gem_stolen_cleanup_compression(dev);
670
	i915_gem_stolen_cleanup_compression(dev);
634
}
671
}
Line 635... Line 672...
635
 
672
 
636
static void i915_pineview_get_mem_freq(struct drm_device *dev)
673
static void i915_pineview_get_mem_freq(struct drm_device *dev)
637
{
674
{
638
	drm_i915_private_t *dev_priv = dev->dev_private;
675
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 639... Line 676...
639
	u32 tmp;
676
	u32 tmp;
Line 640... Line 677...
640
 
677
 
Line 672... Line 709...
672
	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
709
	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
673
}
710
}
Line 674... Line 711...
674
 
711
 
675
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
712
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
676
{
713
{
677
	drm_i915_private_t *dev_priv = dev->dev_private;
714
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 678... Line 715...
678
	u16 ddrpll, csipll;
715
	u16 ddrpll, csipll;
679
 
716
 
Line 800... Line 837...
800
	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
837
	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
Line 801... Line 838...
801
 
838
 
802
	return NULL;
839
	return NULL;
Line 803... Line 840...
803
}
840
}
804
 
841
 
805
static void pineview_disable_cxsr(struct drm_device *dev)
842
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-
 
843
{
-
 
844
	struct drm_device *dev = dev_priv->dev;
-
 
845
	u32 val;
-
 
846
 
-
 
847
	if (IS_VALLEYVIEW(dev)) {
-
 
848
		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-
 
849
	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
-
 
850
		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
-
 
851
	} else if (IS_PINEVIEW(dev)) {
-
 
852
		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
-
 
853
		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
-
 
854
		I915_WRITE(DSPFW3, val);
-
 
855
	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
-
 
856
		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
-
 
857
			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
-
 
858
		I915_WRITE(FW_BLC_SELF, val);
-
 
859
	} else if (IS_I915GM(dev)) {
-
 
860
		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
-
 
861
			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
-
 
862
		I915_WRITE(INSTPM, val);
-
 
863
	} else {
Line 806... Line 864...
806
{
864
		return;
807
	struct drm_i915_private *dev_priv = dev->dev_private;
865
	}
808
 
866
 
Line 809... Line 867...
809
	/* deactivate cxsr */
867
	DRM_DEBUG_KMS("memory self-refresh is %s\n",
810
	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
868
		      enable ? "enabled" : "disabled");
811
}
869
}
Line 875... Line 933...
875
	return size;
933
	return size;
876
}
934
}
Line 877... Line 935...
877
 
935
 
878
/* Pineview has different values for various configs */
936
/* Pineview has different values for various configs */
879
static const struct intel_watermark_params pineview_display_wm = {
937
static const struct intel_watermark_params pineview_display_wm = {
880
	PINEVIEW_DISPLAY_FIFO,
938
	.fifo_size = PINEVIEW_DISPLAY_FIFO,
881
	PINEVIEW_MAX_WM,
939
	.max_wm = PINEVIEW_MAX_WM,
882
	PINEVIEW_DFT_WM,
940
	.default_wm = PINEVIEW_DFT_WM,
883
	PINEVIEW_GUARD_WM,
941
	.guard_size = PINEVIEW_GUARD_WM,
884
	PINEVIEW_FIFO_LINE_SIZE
942
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
885
};
943
};
886
static const struct intel_watermark_params pineview_display_hplloff_wm = {
944
static const struct intel_watermark_params pineview_display_hplloff_wm = {
887
	PINEVIEW_DISPLAY_FIFO,
945
	.fifo_size = PINEVIEW_DISPLAY_FIFO,
888
	PINEVIEW_MAX_WM,
946
	.max_wm = PINEVIEW_MAX_WM,
889
	PINEVIEW_DFT_HPLLOFF_WM,
947
	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
890
	PINEVIEW_GUARD_WM,
948
	.guard_size = PINEVIEW_GUARD_WM,
891
	PINEVIEW_FIFO_LINE_SIZE
949
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
892
};
950
};
893
static const struct intel_watermark_params pineview_cursor_wm = {
951
static const struct intel_watermark_params pineview_cursor_wm = {
894
	PINEVIEW_CURSOR_FIFO,
952
	.fifo_size = PINEVIEW_CURSOR_FIFO,
895
	PINEVIEW_CURSOR_MAX_WM,
953
	.max_wm = PINEVIEW_CURSOR_MAX_WM,
896
	PINEVIEW_CURSOR_DFT_WM,
954
	.default_wm = PINEVIEW_CURSOR_DFT_WM,
897
	PINEVIEW_CURSOR_GUARD_WM,
955
	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
898
	PINEVIEW_FIFO_LINE_SIZE,
956
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
899
};
957
};
900
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
958
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
901
	PINEVIEW_CURSOR_FIFO,
959
	.fifo_size = PINEVIEW_CURSOR_FIFO,
902
	PINEVIEW_CURSOR_MAX_WM,
960
	.max_wm = PINEVIEW_CURSOR_MAX_WM,
903
	PINEVIEW_CURSOR_DFT_WM,
961
	.default_wm = PINEVIEW_CURSOR_DFT_WM,
904
	PINEVIEW_CURSOR_GUARD_WM,
962
	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
905
	PINEVIEW_FIFO_LINE_SIZE
963
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
906
};
964
};
907
static const struct intel_watermark_params g4x_wm_info = {
965
static const struct intel_watermark_params g4x_wm_info = {
908
	G4X_FIFO_SIZE,
966
	.fifo_size = G4X_FIFO_SIZE,
909
	G4X_MAX_WM,
967
	.max_wm = G4X_MAX_WM,
910
	G4X_MAX_WM,
968
	.default_wm = G4X_MAX_WM,
911
	2,
969
	.guard_size = 2,
912
	G4X_FIFO_LINE_SIZE,
970
	.cacheline_size = G4X_FIFO_LINE_SIZE,
913
};
971
};
914
static const struct intel_watermark_params g4x_cursor_wm_info = {
972
static const struct intel_watermark_params g4x_cursor_wm_info = {
915
	I965_CURSOR_FIFO,
973
	.fifo_size = I965_CURSOR_FIFO,
916
	I965_CURSOR_MAX_WM,
974
	.max_wm = I965_CURSOR_MAX_WM,
917
	I965_CURSOR_DFT_WM,
975
	.default_wm = I965_CURSOR_DFT_WM,
918
	2,
976
	.guard_size = 2,
919
	G4X_FIFO_LINE_SIZE,
977
	.cacheline_size = G4X_FIFO_LINE_SIZE,
920
};
978
};
921
static const struct intel_watermark_params valleyview_wm_info = {
979
static const struct intel_watermark_params valleyview_wm_info = {
922
	VALLEYVIEW_FIFO_SIZE,
980
	.fifo_size = VALLEYVIEW_FIFO_SIZE,
923
	VALLEYVIEW_MAX_WM,
981
	.max_wm = VALLEYVIEW_MAX_WM,
924
	VALLEYVIEW_MAX_WM,
982
	.default_wm = VALLEYVIEW_MAX_WM,
925
	2,
983
	.guard_size = 2,
926
	G4X_FIFO_LINE_SIZE,
984
	.cacheline_size = G4X_FIFO_LINE_SIZE,
927
};
985
};
928
static const struct intel_watermark_params valleyview_cursor_wm_info = {
986
static const struct intel_watermark_params valleyview_cursor_wm_info = {
929
	I965_CURSOR_FIFO,
987
	.fifo_size = I965_CURSOR_FIFO,
930
	VALLEYVIEW_CURSOR_MAX_WM,
988
	.max_wm = VALLEYVIEW_CURSOR_MAX_WM,
931
	I965_CURSOR_DFT_WM,
989
	.default_wm = I965_CURSOR_DFT_WM,
932
	2,
990
	.guard_size = 2,
933
	G4X_FIFO_LINE_SIZE,
991
	.cacheline_size = G4X_FIFO_LINE_SIZE,
934
};
992
};
935
static const struct intel_watermark_params i965_cursor_wm_info = {
993
static const struct intel_watermark_params i965_cursor_wm_info = {
936
	I965_CURSOR_FIFO,
994
	.fifo_size = I965_CURSOR_FIFO,
937
	I965_CURSOR_MAX_WM,
995
	.max_wm = I965_CURSOR_MAX_WM,
938
	I965_CURSOR_DFT_WM,
996
	.default_wm = I965_CURSOR_DFT_WM,
939
	2,
997
	.guard_size = 2,
940
	I915_FIFO_LINE_SIZE,
998
	.cacheline_size = I915_FIFO_LINE_SIZE,
941
};
999
};
942
static const struct intel_watermark_params i945_wm_info = {
1000
static const struct intel_watermark_params i945_wm_info = {
943
	I945_FIFO_SIZE,
1001
	.fifo_size = I945_FIFO_SIZE,
944
	I915_MAX_WM,
1002
	.max_wm = I915_MAX_WM,
945
	1,
1003
	.default_wm = 1,
946
	2,
1004
	.guard_size = 2,
947
	I915_FIFO_LINE_SIZE
1005
	.cacheline_size = I915_FIFO_LINE_SIZE,
948
};
1006
};
949
static const struct intel_watermark_params i915_wm_info = {
1007
static const struct intel_watermark_params i915_wm_info = {
950
	I915_FIFO_SIZE,
1008
	.fifo_size = I915_FIFO_SIZE,
951
	I915_MAX_WM,
1009
	.max_wm = I915_MAX_WM,
952
	1,
1010
	.default_wm = 1,
953
	2,
1011
	.guard_size = 2,
954
	I915_FIFO_LINE_SIZE
1012
	.cacheline_size = I915_FIFO_LINE_SIZE,
955
};
1013
};
956
static const struct intel_watermark_params i830_wm_info = {
1014
static const struct intel_watermark_params i830_wm_info = {
957
	I855GM_FIFO_SIZE,
1015
	.fifo_size = I855GM_FIFO_SIZE,
958
	I915_MAX_WM,
1016
	.max_wm = I915_MAX_WM,
959
	1,
1017
	.default_wm = 1,
960
	2,
1018
	.guard_size = 2,
961
	I830_FIFO_LINE_SIZE
1019
	.cacheline_size = I830_FIFO_LINE_SIZE,
962
};
1020
};
963
static const struct intel_watermark_params i845_wm_info = {
1021
static const struct intel_watermark_params i845_wm_info = {
964
	I830_FIFO_SIZE,
1022
	.fifo_size = I830_FIFO_SIZE,
965
	I915_MAX_WM,
1023
	.max_wm = I915_MAX_WM,
966
	1,
1024
	.default_wm = 1,
967
	2,
1025
	.guard_size = 2,
968
	I830_FIFO_LINE_SIZE
1026
	.cacheline_size = I830_FIFO_LINE_SIZE,
Line 969... Line 1027...
969
};
1027
};
970
 
1028
 
971
/**
1029
/**
Line 1020... Line 1078...
1020
 
1078
 
1021
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1079
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1022
{
1080
{
Line 1023... Line 1081...
1023
	struct drm_crtc *crtc, *enabled = NULL;
1081
	struct drm_crtc *crtc, *enabled = NULL;
1024
 
1082
 
1025
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1083
	for_each_crtc(dev, crtc) {
1026
		if (intel_crtc_active(crtc)) {
1084
		if (intel_crtc_active(crtc)) {
1027
			if (enabled)
1085
			if (enabled)
1028
				return NULL;
1086
				return NULL;
Line 1044... Line 1102...
1044
 
1102
 
1045
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1103
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1046
					 dev_priv->fsb_freq, dev_priv->mem_freq);
1104
					 dev_priv->fsb_freq, dev_priv->mem_freq);
1047
	if (!latency) {
1105
	if (!latency) {
1048
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1106
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1049
		pineview_disable_cxsr(dev);
1107
		intel_set_memory_cxsr(dev_priv, false);
1050
		return;
1108
		return;
Line 1051... Line 1109...
1051
	}
1109
	}
1052
 
1110
 
1053
	crtc = single_enabled_crtc(dev);
1111
	crtc = single_enabled_crtc(dev);
1054
	if (crtc) {
1112
	if (crtc) {
1055
		const struct drm_display_mode *adjusted_mode;
1113
		const struct drm_display_mode *adjusted_mode;
Line 1056... Line 1114...
1056
		int pixel_size = crtc->fb->bits_per_pixel / 8;
1114
		int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1057
		int clock;
1115
		int clock;
Line 1095... Line 1153...
1095
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
1153
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
1096
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1154
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1097
		I915_WRITE(DSPFW3, reg);
1155
		I915_WRITE(DSPFW3, reg);
1098
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1156
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
Line 1099... Line -...
1099
 
-
 
1100
		/* activate cxsr */
-
 
1101
		I915_WRITE(DSPFW3,
-
 
1102
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1157
 
1103
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
1158
		intel_set_memory_cxsr(dev_priv, true);
1104
	} else {
1159
	} else {
1105
		pineview_disable_cxsr(dev);
-
 
1106
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
1160
		intel_set_memory_cxsr(dev_priv, false);
1107
	}
1161
	}
Line 1108... Line 1162...
1108
}
1162
}
1109
 
1163
 
Line 1131... Line 1185...
1131
 
1185
 
1132
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1186
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1133
	clock = adjusted_mode->crtc_clock;
1187
	clock = adjusted_mode->crtc_clock;
1134
	htotal = adjusted_mode->crtc_htotal;
1188
	htotal = adjusted_mode->crtc_htotal;
1135
	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1189
	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
Line 1136... Line 1190...
1136
	pixel_size = crtc->fb->bits_per_pixel / 8;
1190
	pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1137
 
1191
 
1138
	/* Use the small buffer method to calculate plane watermark */
1192
	/* Use the small buffer method to calculate plane watermark */
1139
	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1193
	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
Line 1144... Line 1198...
1144
	*plane_wm = entries + display->guard_size;
1198
	*plane_wm = entries + display->guard_size;
1145
	if (*plane_wm > (int)display->max_wm)
1199
	if (*plane_wm > (int)display->max_wm)
1146
		*plane_wm = display->max_wm;
1200
		*plane_wm = display->max_wm;
Line 1147... Line 1201...
1147
 
1201
 
1148
	/* Use the large buffer method to calculate cursor watermark */
1202
	/* Use the large buffer method to calculate cursor watermark */
1149
	line_time_us = ((htotal * 1000) / clock);
1203
	line_time_us = max(htotal * 1000 / clock, 1);
1150
	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1204
	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1151
	entries = line_count * 64 * pixel_size;
1205
	entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1152
	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1206
	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1153
	if (tlb_miss > 0)
1207
	if (tlb_miss > 0)
1154
		entries += tlb_miss;
1208
		entries += tlb_miss;
1155
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1209
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
Line 1218... Line 1272...
1218
	crtc = intel_get_crtc_for_plane(dev, plane);
1272
	crtc = intel_get_crtc_for_plane(dev, plane);
1219
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1273
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1220
	clock = adjusted_mode->crtc_clock;
1274
	clock = adjusted_mode->crtc_clock;
1221
	htotal = adjusted_mode->crtc_htotal;
1275
	htotal = adjusted_mode->crtc_htotal;
1222
	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1276
	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1223
	pixel_size = crtc->fb->bits_per_pixel / 8;
1277
	pixel_size = crtc->primary->fb->bits_per_pixel / 8;
Line 1224... Line 1278...
1224
 
1278
 
1225
	line_time_us = (htotal * 1000) / clock;
1279
	line_time_us = max(htotal * 1000 / clock, 1);
1226
	line_count = (latency_ns / line_time_us + 1000) / 1000;
1280
	line_count = (latency_ns / line_time_us + 1000) / 1000;
Line 1227... Line 1281...
1227
	line_size = hdisplay * pixel_size;
1281
	line_size = hdisplay * pixel_size;
1228
 
1282
 
Line 1232... Line 1286...
1232
 
1286
 
1233
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1287
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
Line 1234... Line 1288...
1234
	*display_wm = entries + display->guard_size;
1288
	*display_wm = entries + display->guard_size;
1235
 
1289
 
1236
	/* calculate the self-refresh watermark for display cursor */
1290
	/* calculate the self-refresh watermark for display cursor */
1237
	entries = line_count * pixel_size * 64;
1291
	entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
Line 1238... Line 1292...
1238
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1292
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1239
	*cursor_wm = entries + cursor->guard_size;
1293
	*cursor_wm = entries + cursor->guard_size;
Line 1257... Line 1311...
1257
	crtc = intel_get_crtc_for_plane(dev, plane);
1311
	crtc = intel_get_crtc_for_plane(dev, plane);
1258
	if (!intel_crtc_active(crtc))
1312
	if (!intel_crtc_active(crtc))
1259
		return false;
1313
		return false;
Line 1260... Line 1314...
1260
 
1314
 
1261
	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1315
	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
Line 1262... Line 1316...
1262
	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
1316
	pixel_size = crtc->primary->fb->bits_per_pixel / 8;	/* BPP */
1263
 
1317
 
1264
	entries = (clock / 1000) * pixel_size;
1318
	entries = (clock / 1000) * pixel_size;
1265
	*plane_prec_mult = (entries > 256) ?
1319
	*plane_prec_mult = (entries > 128) ?
1266
		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
-
 
Line 1267... Line 1320...
1267
	*plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1320
		DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1268
						     pixel_size);
1321
	*plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
1269
 
1322
 
1270
	entries = (clock / 1000) * 4;	/* BPP is always 4 for cursor */
1323
	entries = (clock / 1000) * 4;	/* BPP is always 4 for cursor */
Line 1271... Line 1324...
1271
	*cursor_prec_mult = (entries > 256) ?
1324
	*cursor_prec_mult = (entries > 128) ?
1272
		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1325
		DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
Line 1273... Line 1326...
1273
	*cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1326
	*cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
Line 1293... Line 1346...
1293
 
1346
 
1294
	/* For plane A, Cursor A */
1347
	/* For plane A, Cursor A */
1295
	if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1348
	if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1296
				      &cursor_prec_mult, &cursora_dl)) {
1349
				      &cursor_prec_mult, &cursora_dl)) {
1297
		cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1350
		cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1298
			DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1351
			DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1299
		planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1352
		planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
Line 1300... Line 1353...
1300
			DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1353
			DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
1301
 
1354
 
1302
		I915_WRITE(VLV_DDL1, cursora_prec |
1355
		I915_WRITE(VLV_DDL1, cursora_prec |
1303
				(cursora_dl << DDL_CURSORA_SHIFT) |
1356
				(cursora_dl << DDL_CURSORA_SHIFT) |
Line 1304... Line 1357...
1304
				planea_prec | planea_dl);
1357
				planea_prec | planea_dl);
1305
	}
1358
	}
1306
 
1359
 
1307
	/* For plane B, Cursor B */
1360
	/* For plane B, Cursor B */
1308
	if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1361
	if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1309
				      &cursor_prec_mult, &cursorb_dl)) {
1362
				      &cursor_prec_mult, &cursorb_dl)) {
1310
		cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1363
		cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
Line 1311... Line 1364...
1311
			DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1364
			DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
1312
		planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1365
		planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1313
			DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1366
			DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1314
 
1367
 
Line 1327... Line 1380...
1327
	struct drm_i915_private *dev_priv = dev->dev_private;
1380
	struct drm_i915_private *dev_priv = dev->dev_private;
1328
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1381
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1329
	int plane_sr, cursor_sr;
1382
	int plane_sr, cursor_sr;
1330
	int ignore_plane_sr, ignore_cursor_sr;
1383
	int ignore_plane_sr, ignore_cursor_sr;
1331
	unsigned int enabled = 0;
1384
	unsigned int enabled = 0;
-
 
1385
	bool cxsr_enabled;
Line 1332... Line 1386...
1332
 
1386
 
Line 1333... Line 1387...
1333
	vlv_update_drain_latency(dev);
1387
	vlv_update_drain_latency(dev);
1334
 
1388
 
Line 1353... Line 1407...
1353
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1407
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1354
			     2*sr_latency_ns,
1408
			     2*sr_latency_ns,
1355
			     &valleyview_wm_info,
1409
			     &valleyview_wm_info,
1356
			     &valleyview_cursor_wm_info,
1410
			     &valleyview_cursor_wm_info,
1357
			     &ignore_plane_sr, &cursor_sr)) {
1411
			     &ignore_plane_sr, &cursor_sr)) {
1358
		I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1412
		cxsr_enabled = true;
1359
	} else {
1413
	} else {
1360
		I915_WRITE(FW_BLC_SELF_VLV,
1414
		cxsr_enabled = false;
1361
			   I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1415
		intel_set_memory_cxsr(dev_priv, false);
1362
		plane_sr = cursor_sr = 0;
1416
		plane_sr = cursor_sr = 0;
1363
	}
1417
	}
Line 1364... Line 1418...
1364
 
1418
 
1365
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1419
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
Line 1376... Line 1430...
1376
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1430
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1377
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1431
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1378
	I915_WRITE(DSPFW3,
1432
	I915_WRITE(DSPFW3,
1379
		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1433
		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1380
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1434
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-
 
1435
 
-
 
1436
	if (cxsr_enabled)
-
 
1437
		intel_set_memory_cxsr(dev_priv, true);
1381
}
1438
}
Line 1382... Line 1439...
1382
 
1439
 
1383
static void g4x_update_wm(struct drm_crtc *crtc)
1440
static void g4x_update_wm(struct drm_crtc *crtc)
1384
{
1441
{
1385
	struct drm_device *dev = crtc->dev;
1442
	struct drm_device *dev = crtc->dev;
1386
	static const int sr_latency_ns = 12000;
1443
	static const int sr_latency_ns = 12000;
1387
	struct drm_i915_private *dev_priv = dev->dev_private;
1444
	struct drm_i915_private *dev_priv = dev->dev_private;
1388
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1445
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1389
	int plane_sr, cursor_sr;
1446
	int plane_sr, cursor_sr;
-
 
1447
	unsigned int enabled = 0;
Line 1390... Line 1448...
1390
	unsigned int enabled = 0;
1448
	bool cxsr_enabled;
1391
 
1449
 
1392
	if (g4x_compute_wm0(dev, PIPE_A,
1450
	if (g4x_compute_wm0(dev, PIPE_A,
1393
			    &g4x_wm_info, latency_ns,
1451
			    &g4x_wm_info, latency_ns,
Line 1405... Line 1463...
1405
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1463
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1406
			     sr_latency_ns,
1464
			     sr_latency_ns,
1407
			     &g4x_wm_info,
1465
			     &g4x_wm_info,
1408
			     &g4x_cursor_wm_info,
1466
			     &g4x_cursor_wm_info,
1409
			     &plane_sr, &cursor_sr)) {
1467
			     &plane_sr, &cursor_sr)) {
1410
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1468
		cxsr_enabled = true;
1411
	} else {
1469
	} else {
1412
		I915_WRITE(FW_BLC_SELF,
1470
		cxsr_enabled = false;
1413
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1471
		intel_set_memory_cxsr(dev_priv, false);
1414
		plane_sr = cursor_sr = 0;
1472
		plane_sr = cursor_sr = 0;
1415
	}
1473
	}
Line 1416... Line 1474...
1416
 
1474
 
1417
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1475
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
Line 1429... Line 1487...
1429
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1487
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1430
	/* HPLL off in SR has some issues on G4x... disable it */
1488
	/* HPLL off in SR has some issues on G4x... disable it */
1431
	I915_WRITE(DSPFW3,
1489
	I915_WRITE(DSPFW3,
1432
		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1490
		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1433
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1491
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-
 
1492
 
-
 
1493
	if (cxsr_enabled)
-
 
1494
		intel_set_memory_cxsr(dev_priv, true);
1434
}
1495
}
Line 1435... Line 1496...
1435
 
1496
 
1436
static void i965_update_wm(struct drm_crtc *unused_crtc)
1497
static void i965_update_wm(struct drm_crtc *unused_crtc)
1437
{
1498
{
1438
	struct drm_device *dev = unused_crtc->dev;
1499
	struct drm_device *dev = unused_crtc->dev;
1439
	struct drm_i915_private *dev_priv = dev->dev_private;
1500
	struct drm_i915_private *dev_priv = dev->dev_private;
1440
	struct drm_crtc *crtc;
1501
	struct drm_crtc *crtc;
1441
	int srwm = 1;
1502
	int srwm = 1;
-
 
1503
	int cursor_sr = 16;
Line 1442... Line 1504...
1442
	int cursor_sr = 16;
1504
	bool cxsr_enabled;
1443
 
1505
 
1444
	/* Calc sr entries for one plane configs */
1506
	/* Calc sr entries for one plane configs */
1445
	crtc = single_enabled_crtc(dev);
1507
	crtc = single_enabled_crtc(dev);
Line 1449... Line 1511...
1449
		const struct drm_display_mode *adjusted_mode =
1511
		const struct drm_display_mode *adjusted_mode =
1450
			&to_intel_crtc(crtc)->config.adjusted_mode;
1512
			&to_intel_crtc(crtc)->config.adjusted_mode;
1451
		int clock = adjusted_mode->crtc_clock;
1513
		int clock = adjusted_mode->crtc_clock;
1452
		int htotal = adjusted_mode->crtc_htotal;
1514
		int htotal = adjusted_mode->crtc_htotal;
1453
		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1515
		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1454
		int pixel_size = crtc->fb->bits_per_pixel / 8;
1516
		int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1455
		unsigned long line_time_us;
1517
		unsigned long line_time_us;
1456
		int entries;
1518
		int entries;
Line 1457... Line 1519...
1457
 
1519
 
Line 1458... Line 1520...
1458
		line_time_us = ((htotal * 1000) / clock);
1520
		line_time_us = max(htotal * 1000 / clock, 1);
1459
 
1521
 
1460
		/* Use ns/us then divide to preserve precision */
1522
		/* Use ns/us then divide to preserve precision */
1461
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1523
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
Line 1467... Line 1529...
1467
		srwm &= 0x1ff;
1529
		srwm &= 0x1ff;
1468
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1530
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1469
			      entries, srwm);
1531
			      entries, srwm);
Line 1470... Line 1532...
1470
 
1532
 
1471
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1533
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1472
			pixel_size * 64;
1534
			pixel_size * to_intel_crtc(crtc)->cursor_width;
1473
		entries = DIV_ROUND_UP(entries,
1535
		entries = DIV_ROUND_UP(entries,
1474
					  i965_cursor_wm_info.cacheline_size);
1536
					  i965_cursor_wm_info.cacheline_size);
1475
		cursor_sr = i965_cursor_wm_info.fifo_size -
1537
		cursor_sr = i965_cursor_wm_info.fifo_size -
Line 1479... Line 1541...
1479
			cursor_sr = i965_cursor_wm_info.max_wm;
1541
			cursor_sr = i965_cursor_wm_info.max_wm;
Line 1480... Line 1542...
1480
 
1542
 
1481
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1543
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
Line 1482... Line 1544...
1482
			      "cursor %d\n", srwm, cursor_sr);
1544
			      "cursor %d\n", srwm, cursor_sr);
1483
 
-
 
1484
		if (IS_CRESTLINE(dev))
1545
 
-
 
1546
		cxsr_enabled = true;
1485
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1547
	} else {
1486
	} else {
1548
		cxsr_enabled = false;
1487
		/* Turn off self refresh if both pipes are enabled */
-
 
1488
		if (IS_CRESTLINE(dev))
-
 
1489
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1549
		/* Turn off self refresh if both pipes are enabled */
Line 1490... Line 1550...
1490
				   & ~FW_BLC_SELF_EN);
1550
		intel_set_memory_cxsr(dev_priv, false);
1491
	}
1551
	}
Line 1497... Line 1557...
1497
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1557
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1498
		   (8 << 16) | (8 << 8) | (8 << 0));
1558
		   (8 << 16) | (8 << 8) | (8 << 0));
1499
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1559
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1500
	/* update cursor SR watermark */
1560
	/* update cursor SR watermark */
1501
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1561
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-
 
1562
 
-
 
1563
	if (cxsr_enabled)
-
 
1564
		intel_set_memory_cxsr(dev_priv, true);
1502
}
1565
}
Line 1503... Line 1566...
1503
 
1566
 
1504
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1567
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1505
{
1568
{
Line 1522... Line 1585...
1522
 
1585
 
1523
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1586
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1524
	crtc = intel_get_crtc_for_plane(dev, 0);
1587
	crtc = intel_get_crtc_for_plane(dev, 0);
1525
	if (intel_crtc_active(crtc)) {
1588
	if (intel_crtc_active(crtc)) {
1526
		const struct drm_display_mode *adjusted_mode;
1589
		const struct drm_display_mode *adjusted_mode;
1527
		int cpp = crtc->fb->bits_per_pixel / 8;
1590
		int cpp = crtc->primary->fb->bits_per_pixel / 8;
1528
		if (IS_GEN2(dev))
1591
		if (IS_GEN2(dev))
Line 1529... Line 1592...
1529
			cpp = 4;
1592
			cpp = 4;
1530
 
1593
 
Line 1538... Line 1601...
1538
 
1601
 
1539
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1602
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1540
	crtc = intel_get_crtc_for_plane(dev, 1);
1603
	crtc = intel_get_crtc_for_plane(dev, 1);
1541
	if (intel_crtc_active(crtc)) {
1604
	if (intel_crtc_active(crtc)) {
1542
		const struct drm_display_mode *adjusted_mode;
1605
		const struct drm_display_mode *adjusted_mode;
1543
		int cpp = crtc->fb->bits_per_pixel / 8;
1606
		int cpp = crtc->primary->fb->bits_per_pixel / 8;
1544
		if (IS_GEN2(dev))
1607
		if (IS_GEN2(dev))
Line 1545... Line 1608...
1545
			cpp = 4;
1608
			cpp = 4;
1546
 
1609
 
Line 1555... Line 1618...
1555
	} else
1618
	} else
1556
		planeb_wm = fifo_size - wm_info->guard_size;
1619
		planeb_wm = fifo_size - wm_info->guard_size;
Line 1557... Line 1620...
1557
 
1620
 
Line -... Line 1621...
-
 
1621
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
 
1622
 
-
 
1623
	if (IS_I915GM(dev) && enabled) {
-
 
1624
		struct drm_i915_gem_object *obj;
-
 
1625
 
-
 
1626
		obj = intel_fb_obj(enabled->primary->fb);
-
 
1627
 
-
 
1628
		/* self-refresh seems busted with untiled */
-
 
1629
		if (obj->tiling_mode == I915_TILING_NONE)
-
 
1630
			enabled = NULL;
1558
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1631
	}
1559
 
1632
 
1560
	/*
1633
	/*
1561
	 * Overlay gets an aggressive default since video jitter is bad.
1634
	 * Overlay gets an aggressive default since video jitter is bad.
Line 1562... Line 1635...
1562
	 */
1635
	 */
1563
	cwm = 2;
1636
	cwm = 2;
1564
 
-
 
1565
	/* Play safe and disable self-refresh before adjusting watermarks. */
-
 
1566
	if (IS_I945G(dev) || IS_I945GM(dev))
-
 
Line 1567... Line 1637...
1567
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1637
 
1568
	else if (IS_I915GM(dev))
1638
	/* Play safe and disable self-refresh before adjusting watermarks. */
1569
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1639
	intel_set_memory_cxsr(dev_priv, false);
1570
 
1640
 
1571
	/* Calc sr entries for one plane configs */
1641
	/* Calc sr entries for one plane configs */
1572
	if (HAS_FW_BLC(dev) && enabled) {
1642
	if (HAS_FW_BLC(dev) && enabled) {
1573
		/* self-refresh has much higher latency */
1643
		/* self-refresh has much higher latency */
1574
		static const int sr_latency_ns = 6000;
1644
		static const int sr_latency_ns = 6000;
1575
		const struct drm_display_mode *adjusted_mode =
1645
		const struct drm_display_mode *adjusted_mode =
1576
			&to_intel_crtc(enabled)->config.adjusted_mode;
1646
			&to_intel_crtc(enabled)->config.adjusted_mode;
1577
		int clock = adjusted_mode->crtc_clock;
1647
		int clock = adjusted_mode->crtc_clock;
1578
		int htotal = adjusted_mode->crtc_htotal;
1648
		int htotal = adjusted_mode->crtc_htotal;
Line 1579... Line 1649...
1579
		int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1649
		int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
Line 1580... Line 1650...
1580
		int pixel_size = enabled->fb->bits_per_pixel / 8;
1650
		int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1581
		unsigned long line_time_us;
1651
		unsigned long line_time_us;
1582
		int entries;
1652
		int entries;
1583
 
1653
 
Line 1610... Line 1680...
1610
	fwater_hi = fwater_hi | (1 << 8);
1680
	fwater_hi = fwater_hi | (1 << 8);
Line 1611... Line 1681...
1611
 
1681
 
1612
	I915_WRITE(FW_BLC, fwater_lo);
1682
	I915_WRITE(FW_BLC, fwater_lo);
Line 1613... Line -...
1613
	I915_WRITE(FW_BLC2, fwater_hi);
-
 
1614
 
1683
	I915_WRITE(FW_BLC2, fwater_hi);
1615
	if (HAS_FW_BLC(dev)) {
-
 
1616
		if (enabled) {
-
 
1617
			if (IS_I945G(dev) || IS_I945GM(dev))
-
 
1618
				I915_WRITE(FW_BLC_SELF,
-
 
1619
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
-
 
1620
			else if (IS_I915GM(dev))
-
 
1621
				I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
-
 
1622
			DRM_DEBUG_KMS("memory self refresh enabled\n");
1684
 
1623
		} else
-
 
1624
			DRM_DEBUG_KMS("memory self refresh disabled\n");
1685
	if (enabled)
Line 1625... Line 1686...
1625
	}
1686
		intel_set_memory_cxsr(dev_priv, true);
1626
}
1687
}
1627
 
1688
 
Line 1831... Line 1892...
1831
		return 768;
1892
		return 768;
1832
	else
1893
	else
1833
		return 512;
1894
		return 512;
1834
}
1895
}
Line -... Line 1896...
-
 
1896
 
-
 
1897
static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
-
 
1898
					 int level, bool is_sprite)
-
 
1899
{
-
 
1900
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1901
		/* BDW primary/sprite plane watermarks */
-
 
1902
		return level == 0 ? 255 : 2047;
-
 
1903
	else if (INTEL_INFO(dev)->gen >= 7)
-
 
1904
		/* IVB/HSW primary/sprite plane watermarks */
-
 
1905
		return level == 0 ? 127 : 1023;
-
 
1906
	else if (!is_sprite)
-
 
1907
		/* ILK/SNB primary plane watermarks */
-
 
1908
		return level == 0 ? 127 : 511;
-
 
1909
	else
-
 
1910
		/* ILK/SNB sprite plane watermarks */
-
 
1911
		return level == 0 ? 63 : 255;
-
 
1912
}
-
 
1913
 
-
 
1914
static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
-
 
1915
					  int level)
-
 
1916
{
-
 
1917
	if (INTEL_INFO(dev)->gen >= 7)
-
 
1918
		return level == 0 ? 63 : 255;
-
 
1919
	else
-
 
1920
		return level == 0 ? 31 : 63;
-
 
1921
}
-
 
1922
 
-
 
1923
static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
-
 
1924
{
-
 
1925
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1926
		return 31;
-
 
1927
	else
-
 
1928
		return 15;
-
 
1929
}
1835
 
1930
 
1836
/* Calculate the maximum primary/sprite plane watermark */
1931
/* Calculate the maximum primary/sprite plane watermark */
1837
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1932
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1838
				     int level,
1933
				     int level,
1839
				     const struct intel_wm_config *config,
1934
				     const struct intel_wm_config *config,
1840
				     enum intel_ddb_partitioning ddb_partitioning,
1935
				     enum intel_ddb_partitioning ddb_partitioning,
1841
				     bool is_sprite)
1936
				     bool is_sprite)
1842
{
1937
{
1843
	unsigned int fifo_size = ilk_display_fifo_size(dev);
-
 
Line 1844... Line 1938...
1844
	unsigned int max;
1938
	unsigned int fifo_size = ilk_display_fifo_size(dev);
1845
 
1939
 
1846
	/* if sprites aren't enabled, sprites get nothing */
1940
	/* if sprites aren't enabled, sprites get nothing */
Line 1870... Line 1964...
1870
			fifo_size /= 2;
1964
			fifo_size /= 2;
1871
		}
1965
		}
1872
	}
1966
	}
Line 1873... Line 1967...
1873
 
1967
 
1874
	/* clamp to max that the registers can hold */
-
 
1875
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1876
		max = level == 0 ? 255 : 2047;
-
 
1877
	else if (INTEL_INFO(dev)->gen >= 7)
-
 
1878
		/* IVB/HSW primary/sprite plane watermarks */
-
 
1879
		max = level == 0 ? 127 : 1023;
-
 
1880
	else if (!is_sprite)
-
 
1881
		/* ILK/SNB primary plane watermarks */
-
 
1882
		max = level == 0 ? 127 : 511;
-
 
1883
	else
-
 
1884
		/* ILK/SNB sprite plane watermarks */
-
 
1885
		max = level == 0 ? 63 : 255;
-
 
1886
 
1968
	/* clamp to max that the registers can hold */
1887
	return min(fifo_size, max);
1969
	return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
Line 1888... Line 1970...
1888
}
1970
}
1889
 
1971
 
1890
/* Calculate the maximum cursor plane watermark */
1972
/* Calculate the maximum cursor plane watermark */
Line 1895... Line 1977...
1895
	/* HSW LP1+ watermarks w/ multiple pipes */
1977
	/* HSW LP1+ watermarks w/ multiple pipes */
1896
	if (level > 0 && config->num_pipes_active > 1)
1978
	if (level > 0 && config->num_pipes_active > 1)
1897
		return 64;
1979
		return 64;
Line 1898... Line 1980...
1898
 
1980
 
1899
	/* otherwise just report max that registers can hold */
-
 
1900
	if (INTEL_INFO(dev)->gen >= 7)
1981
	/* otherwise just report max that registers can hold */
1901
		return level == 0 ? 63 : 255;
-
 
1902
	else
-
 
1903
		return level == 0 ? 31 : 63;
-
 
1904
}
-
 
1905
 
-
 
1906
/* Calculate the maximum FBC watermark */
-
 
1907
static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
-
 
1908
{
-
 
1909
	/* max that registers can hold */
-
 
1910
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1911
		return 31;
-
 
1912
	else
-
 
1913
	return 15;
1982
	return ilk_cursor_wm_reg_max(dev, level);
Line 1914... Line 1983...
1914
}
1983
}
1915
 
1984
 
1916
static void ilk_compute_wm_maximums(struct drm_device *dev,
1985
static void ilk_compute_wm_maximums(const struct drm_device *dev,
1917
		       int level,
1986
		       int level,
1918
		       const struct intel_wm_config *config,
1987
		       const struct intel_wm_config *config,
1919
		       enum intel_ddb_partitioning ddb_partitioning,
1988
		       enum intel_ddb_partitioning ddb_partitioning,
1920
				    struct ilk_wm_maximums *max)
1989
				    struct ilk_wm_maximums *max)
1921
{
1990
{
1922
	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1991
	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1923
	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1992
	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
-
 
1993
	max->cur = ilk_cursor_wm_max(dev, level, config);
-
 
1994
	max->fbc = ilk_fbc_wm_reg_max(dev);
-
 
1995
}
-
 
1996
 
-
 
1997
static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
-
 
1998
					int level,
-
 
1999
					struct ilk_wm_maximums *max)
-
 
2000
{
-
 
2001
	max->pri = ilk_plane_wm_reg_max(dev, level, false);
-
 
2002
	max->spr = ilk_plane_wm_reg_max(dev, level, true);
1924
	max->cur = ilk_cursor_wm_max(dev, level, config);
2003
	max->cur = ilk_cursor_wm_reg_max(dev, level);
Line 1925... Line 2004...
1925
	max->fbc = ilk_fbc_wm_max(dev);
2004
	max->fbc = ilk_fbc_wm_reg_max(dev);
1926
}
2005
}
1927
 
2006
 
Line 1964... Line 2043...
1964
	}
2043
	}
Line 1965... Line 2044...
1965
 
2044
 
1966
	return ret;
2045
	return ret;
Line 1967... Line 2046...
1967
}
2046
}
1968
 
2047
 
1969
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2048
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1970
				 int level,
2049
				 int level,
1971
				 const struct ilk_pipe_wm_parameters *p,
2050
				 const struct ilk_pipe_wm_parameters *p,
1972
				 struct intel_wm_level *result)
2051
				 struct intel_wm_level *result)
Line 2059... Line 2138...
2059
	/* WaDoubleCursorLP3Latency:ivb */
2138
	/* WaDoubleCursorLP3Latency:ivb */
2060
	if (IS_IVYBRIDGE(dev))
2139
	if (IS_IVYBRIDGE(dev))
2061
		wm[3] *= 2;
2140
		wm[3] *= 2;
2062
}
2141
}
Line 2063... Line 2142...
2063
 
2142
 
2064
static int ilk_wm_max_level(const struct drm_device *dev)
2143
int ilk_wm_max_level(const struct drm_device *dev)
2065
{
2144
{
2066
	/* how many WM levels are we expecting */
2145
	/* how many WM levels are we expecting */
2067
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2146
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2068
		return 4;
2147
		return 4;
Line 2095... Line 2174...
2095
			      name, level, wm[level],
2174
			      name, level, wm[level],
2096
			      latency / 10, latency % 10);
2175
			      latency / 10, latency % 10);
2097
	}
2176
	}
2098
}
2177
}
Line -... Line 2178...
-
 
2178
 
-
 
2179
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
-
 
2180
				    uint16_t wm[5], uint16_t min)
-
 
2181
{
-
 
2182
	int level, max_level = ilk_wm_max_level(dev_priv->dev);
-
 
2183
 
-
 
2184
	if (wm[0] >= min)
-
 
2185
		return false;
-
 
2186
 
-
 
2187
	wm[0] = max(wm[0], min);
-
 
2188
	for (level = 1; level <= max_level; level++)
-
 
2189
		wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
-
 
2190
 
-
 
2191
	return true;
-
 
2192
}
-
 
2193
 
-
 
2194
static void snb_wm_latency_quirk(struct drm_device *dev)
-
 
2195
{
-
 
2196
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2197
	bool changed;
-
 
2198
 
-
 
2199
	/*
-
 
2200
	 * The BIOS provided WM memory latency values are often
-
 
2201
	 * inadequate for high resolution displays. Adjust them.
-
 
2202
	 */
-
 
2203
	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
-
 
2204
		ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
-
 
2205
		ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
-
 
2206
 
-
 
2207
	if (!changed)
-
 
2208
		return;
-
 
2209
 
-
 
2210
	DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
-
 
2211
	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
-
 
2212
	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
-
 
2213
	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
-
 
2214
}
2099
 
2215
 
2100
static void intel_setup_wm_latency(struct drm_device *dev)
2216
static void ilk_setup_wm_latency(struct drm_device *dev)
2101
{
2217
{
Line 2102... Line 2218...
2102
	struct drm_i915_private *dev_priv = dev->dev_private;
2218
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 2112... Line 2228...
2112
	intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2228
	intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
Line 2113... Line 2229...
2113
 
2229
 
2114
	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2230
	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2115
	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2231
	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
-
 
2232
	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
-
 
2233
 
-
 
2234
	if (IS_GEN6(dev))
2116
	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2235
		snb_wm_latency_quirk(dev);
Line 2117... Line 2236...
2117
}
2236
}
2118
 
2237
 
2119
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
-
 
2120
				      struct ilk_pipe_wm_parameters *p,
2238
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2121
				      struct intel_wm_config *config)
2239
				      struct ilk_pipe_wm_parameters *p)
2122
{
2240
{
2123
	struct drm_device *dev = crtc->dev;
2241
	struct drm_device *dev = crtc->dev;
2124
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2242
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line 2125... Line 2243...
2125
	enum pipe pipe = intel_crtc->pipe;
2243
	enum pipe pipe = intel_crtc->pipe;
-
 
2244
	struct drm_plane *plane;
-
 
2245
 
2126
	struct drm_plane *plane;
2246
	if (!intel_crtc_active(crtc))
2127
 
2247
		return;
2128
		p->active = intel_crtc_active(crtc);
2248
 
2129
	if (p->active) {
2249
	p->active = true;
2130
		p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2250
		p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2131
		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2251
		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2132
		p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2252
		p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2133
		p->cur.bytes_per_pixel = 4;
2253
		p->cur.bytes_per_pixel = 4;
2134
		p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2254
		p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2135
		p->cur.horiz_pixels = 64;
2255
		p->cur.horiz_pixels = intel_crtc->cursor_width;
2136
		/* TODO: for now, assume primary and cursor planes are always enabled. */
-
 
Line 2137... Line -...
2137
		p->pri.enabled = true;
-
 
2138
		p->cur.enabled = true;
-
 
2139
	}
-
 
2140
 
2256
		/* TODO: for now, assume primary and cursor planes are always enabled. */
2141
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2257
		p->pri.enabled = true;
Line 2142... Line 2258...
2142
		config->num_pipes_active += intel_crtc_active(crtc);
2258
		p->cur.enabled = true;
2143
 
2259
 
-
 
2260
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
-
 
2261
		struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
2262
 
-
 
2263
		if (intel_plane->pipe == pipe) {
Line -... Line 2264...
-
 
2264
		p->spr = intel_plane->wm;
-
 
2265
			break;
-
 
2266
		}
-
 
2267
	}
-
 
2268
}
-
 
2269
 
-
 
2270
static void ilk_compute_wm_config(struct drm_device *dev,
-
 
2271
				  struct intel_wm_config *config)
-
 
2272
{
-
 
2273
	struct intel_crtc *intel_crtc;
-
 
2274
 
-
 
2275
	/* Compute the currently _active_ config */
2144
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2276
	for_each_intel_crtc(dev, intel_crtc) {
2145
		struct intel_plane *intel_plane = to_intel_plane(plane);
2277
		const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
-
 
2278
 
2146
 
2279
		if (!wm->pipe_enabled)
2147
		if (intel_plane->pipe == pipe)
2280
			continue;
Line 2148... Line 2281...
2148
		p->spr = intel_plane->wm;
2281
 
2149
 
2282
		config->sprites_enabled |= wm->sprites_enabled;
2150
		config->sprites_enabled |= intel_plane->wm.enabled;
2283
		config->sprites_scaled |= wm->sprites_scaled;
2151
		config->sprites_scaled |= intel_plane->wm.scaled;
2284
		config->num_pipes_active++;
2152
	}
2285
	}
2153
}
2286
}
2154
 
2287
 
2155
/* Compute new watermarks for the pipe */
2288
/* Compute new watermarks for the pipe */
2156
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2289
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2157
				  const struct ilk_pipe_wm_parameters *params,
2290
				  const struct ilk_pipe_wm_parameters *params,
2158
				  struct intel_pipe_wm *pipe_wm)
2291
				  struct intel_pipe_wm *pipe_wm)
2159
{
2292
{
2160
	struct drm_device *dev = crtc->dev;
2293
	struct drm_device *dev = crtc->dev;
2161
	struct drm_i915_private *dev_priv = dev->dev_private;
2294
	const struct drm_i915_private *dev_priv = dev->dev_private;
2162
	int level, max_level = ilk_wm_max_level(dev);
2295
	int level, max_level = ilk_wm_max_level(dev);
Line -... Line 2296...
-
 
2296
	/* LP0 watermark maximums depend on this pipe alone */
2163
	/* LP0 watermark maximums depend on this pipe alone */
2297
	struct intel_wm_config config = {
2164
	struct intel_wm_config config = {
2298
		.num_pipes_active = 1,
Line 2165... Line 2299...
2165
		.num_pipes_active = 1,
2299
		.sprites_enabled = params->spr.enabled,
2166
		.sprites_enabled = params->spr.enabled,
2300
		.sprites_scaled = params->spr.scaled,
2167
		.sprites_scaled = params->spr.scaled,
2301
	};
Line 2168... Line 2302...
2168
	};
2302
	struct ilk_wm_maximums max;
2169
	struct ilk_wm_maximums max;
2303
 
2170
 
2304
	pipe_wm->pipe_enabled = params->active;
Line 2171... Line -...
2171
	/* LP0 watermarks always use 1/2 DDB partitioning */
-
 
2172
	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2305
	pipe_wm->sprites_enabled = params->spr.enabled;
2173
 
-
 
Line 2174... Line 2306...
2174
	/* ILK/SNB: LP2+ watermarks only w/o sprites */
2306
	pipe_wm->sprites_scaled = params->spr.scaled;
2175
	if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2307
 
Line -... Line 2308...
-
 
2308
	/* ILK/SNB: LP2+ watermarks only w/o sprites */
-
 
2309
	if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
-
 
2310
		max_level = 1;
2176
		max_level = 1;
2311
 
2177
 
2312
	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
-
 
2313
	if (params->spr.scaled)
-
 
2314
		max_level = 0;
-
 
2315
 
-
 
2316
	ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
-
 
2317
 
-
 
2318
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
2319
		pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
-
 
2320
 
-
 
2321
	/* LP0 watermarks always use 1/2 DDB partitioning */
-
 
2322
	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
-
 
2323
 
-
 
2324
	/* At least LP0 must be valid */
-
 
2325
	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
-
 
2326
		return false;
-
 
2327
 
-
 
2328
	ilk_compute_wm_reg_maximums(dev, 1, &max);
-
 
2329
 
-
 
2330
	for (level = 1; level <= max_level; level++) {
-
 
2331
		struct intel_wm_level wm = {};
-
 
2332
 
-
 
2333
		ilk_compute_wm_level(dev_priv, level, params, &wm);
2178
	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2334
 
Line 2179... Line 2335...
2179
	if (params->spr.scaled)
2335
		/*
2180
		max_level = 0;
2336
		 * Disable any watermark level that exceeds the
2181
 
2337
		 * register maximums since such watermarks are
Line 2197... Line 2353...
2197
			       int level,
2353
			       int level,
2198
			       struct intel_wm_level *ret_wm)
2354
			       struct intel_wm_level *ret_wm)
2199
{
2355
{
2200
	const struct intel_crtc *intel_crtc;
2356
	const struct intel_crtc *intel_crtc;
Line -... Line 2357...
-
 
2357
 
-
 
2358
	ret_wm->enable = true;
2201
 
2359
 
2202
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2360
	for_each_intel_crtc(dev, intel_crtc) {
2203
		const struct intel_wm_level *wm =
2361
		const struct intel_pipe_wm *active = &intel_crtc->wm.active;
Line -... Line 2362...
-
 
2362
		const struct intel_wm_level *wm = &active->wm[level];
-
 
2363
 
-
 
2364
		if (!active->pipe_enabled)
-
 
2365
			continue;
-
 
2366
 
-
 
2367
		/*
-
 
2368
		 * The watermark values may have been used in the past,
-
 
2369
		 * so we must maintain them in the registers for some
2204
			&intel_crtc->wm.active.wm[level];
2370
		 * time even if the level is now disabled.
2205
 
2371
		 */
Line 2206... Line 2372...
2206
		if (!wm->enable)
2372
		if (!wm->enable)
2207
			return;
2373
			ret_wm->enable = false;
2208
 
2374
 
2209
		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2375
		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2210
		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2376
		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2211
		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
-
 
2212
		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
-
 
2213
	}
2377
		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
Line 2214... Line 2378...
2214
 
2378
		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2215
	ret_wm->enable = true;
2379
	}
2216
}
2380
}
Line 2222... Line 2386...
2222
			 const struct intel_wm_config *config,
2386
			 const struct intel_wm_config *config,
2223
			 const struct ilk_wm_maximums *max,
2387
			 const struct ilk_wm_maximums *max,
2224
			 struct intel_pipe_wm *merged)
2388
			 struct intel_pipe_wm *merged)
2225
{
2389
{
2226
	int level, max_level = ilk_wm_max_level(dev);
2390
	int level, max_level = ilk_wm_max_level(dev);
-
 
2391
	int last_enabled_level = max_level;
Line 2227... Line 2392...
2227
 
2392
 
2228
	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2393
	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2229
	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2394
	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2230
	    config->num_pipes_active > 1)
2395
	    config->num_pipes_active > 1)
Line 2237... Line 2402...
2237
	for (level = 1; level <= max_level; level++) {
2402
	for (level = 1; level <= max_level; level++) {
2238
		struct intel_wm_level *wm = &merged->wm[level];
2403
		struct intel_wm_level *wm = &merged->wm[level];
Line 2239... Line 2404...
2239
 
2404
 
Line -... Line 2405...
-
 
2405
		ilk_merge_wm_level(dev, level, wm);
-
 
2406
 
2240
		ilk_merge_wm_level(dev, level, wm);
2407
		if (level > last_enabled_level)
-
 
2408
			wm->enable = false;
2241
 
2409
		else if (!ilk_validate_wm_level(level, max, wm))
Line 2242... Line 2410...
2242
		if (!ilk_validate_wm_level(level, max, wm))
2410
			/* make sure all following levels get disabled */
2243
			break;
2411
			last_enabled_level = level - 1;
2244
 
2412
 
2245
		/*
2413
		/*
2246
		 * The spec says it is preferred to disable
2414
		 * The spec says it is preferred to disable
-
 
2415
		 * FBC WMs instead of disabling a WM level.
2247
		 * FBC WMs instead of disabling a WM level.
2416
		 */
2248
		 */
2417
		if (wm->fbc_val > max->fbc) {
2249
		if (wm->fbc_val > max->fbc) {
2418
			if (wm->enable)
2250
			merged->fbc_wm_enabled = false;
2419
			merged->fbc_wm_enabled = false;
Line 2300... Line 2469...
2300
		const struct intel_wm_level *r;
2469
		const struct intel_wm_level *r;
Line 2301... Line 2470...
2301
 
2470
 
Line 2302... Line 2471...
2302
		level = ilk_wm_lp_to_level(wm_lp, merged);
2471
		level = ilk_wm_lp_to_level(wm_lp, merged);
2303
 
-
 
2304
		r = &merged->wm[level];
-
 
Line -... Line 2472...
-
 
2472
 
-
 
2473
		r = &merged->wm[level];
-
 
2474
 
-
 
2475
		/*
2305
		if (!r->enable)
2476
		 * Maintain the watermark values even if the level is
2306
			break;
2477
		 * disabled. Doing otherwise could cause underruns.
2307
 
2478
		 */
2308
		results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2479
		results->wm_lp[wm_lp - 1] =
Line -... Line 2480...
-
 
2480
			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
-
 
2481
			(r->pri_val << WM1_LP_SR_SHIFT) |
-
 
2482
			r->cur_val;
2309
			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2483
 
2310
			(r->pri_val << WM1_LP_SR_SHIFT) |
2484
		if (r->enable)
2311
			r->cur_val;
2485
			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2312
 
2486
 
2313
		if (INTEL_INFO(dev)->gen >= 8)
2487
		if (INTEL_INFO(dev)->gen >= 8)
2314
			results->wm_lp[wm_lp - 1] |=
2488
			results->wm_lp[wm_lp - 1] |=
Line -... Line 2489...
-
 
2489
				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
-
 
2490
		else
-
 
2491
			results->wm_lp[wm_lp - 1] |=
-
 
2492
				r->fbc_val << WM1_LP_FBC_SHIFT;
2315
				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2493
 
2316
		else
2494
		/*
2317
			results->wm_lp[wm_lp - 1] |=
2495
		 * Always set WM1S_LP_EN when spr_val != 0, even if the
2318
				r->fbc_val << WM1_LP_FBC_SHIFT;
2496
		 * level is disabled. Doing otherwise could cause underruns.
2319
 
2497
		 */
2320
		if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2498
		if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
Line 2321... Line 2499...
2321
			WARN_ON(wm_lp != 1);
2499
			WARN_ON(wm_lp != 1);
2322
			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2500
			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2323
		} else
2501
		} else
2324
		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2502
		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2325
	}
2503
	}
Line 2326... Line 2504...
2326
 
2504
 
Line 2558... Line 2736...
2558
	enum intel_ddb_partitioning partitioning;
2736
	enum intel_ddb_partitioning partitioning;
2559
	struct intel_pipe_wm pipe_wm = {};
2737
	struct intel_pipe_wm pipe_wm = {};
2560
	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2738
	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2561
	struct intel_wm_config config = {};
2739
	struct intel_wm_config config = {};
Line 2562... Line 2740...
2562
 
2740
 
Line 2563... Line 2741...
2563
	ilk_compute_wm_parameters(crtc, ¶ms, &config);
2741
	ilk_compute_wm_parameters(crtc, ¶ms);
Line 2564... Line 2742...
2564
 
2742
 
2565
	intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2743
	intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
Line 2566... Line 2744...
2566
 
2744
 
Line -... Line 2745...
-
 
2745
	if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
-
 
2746
		return;
2567
	if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2747
 
2568
		return;
2748
	intel_crtc->wm.active = pipe_wm;
Line 2569... Line 2749...
2569
 
2749
 
2570
	intel_crtc->wm.active = pipe_wm;
2750
	ilk_compute_wm_config(dev, &config);
Line 2589... Line 2769...
2589
	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2769
	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
Line 2590... Line 2770...
2590
 
2770
 
2591
	ilk_write_wm_values(dev_priv, &results);
2771
	ilk_write_wm_values(dev_priv, &results);
Line -... Line 2772...
-
 
2772
}
2592
}
2773
 
2593
 
2774
static void
2594
static void ilk_update_sprite_wm(struct drm_plane *plane,
2775
ilk_update_sprite_wm(struct drm_plane *plane,
2595
				     struct drm_crtc *crtc,
2776
				     struct drm_crtc *crtc,
2596
				     uint32_t sprite_width, int pixel_size,
2777
		     uint32_t sprite_width, uint32_t sprite_height,
2597
				     bool enabled, bool scaled)
2778
		     int pixel_size, bool enabled, bool scaled)
2598
{
2779
{
Line 2599... Line 2780...
2599
	struct drm_device *dev = plane->dev;
2780
	struct drm_device *dev = plane->dev;
2600
		struct intel_plane *intel_plane = to_intel_plane(plane);
2781
		struct intel_plane *intel_plane = to_intel_plane(plane);
2601
 
2782
 
-
 
2783
	intel_plane->wm.enabled = enabled;
2602
	intel_plane->wm.enabled = enabled;
2784
	intel_plane->wm.scaled = scaled;
Line 2603... Line 2785...
2603
	intel_plane->wm.scaled = scaled;
2785
	intel_plane->wm.horiz_pixels = sprite_width;
2604
	intel_plane->wm.horiz_pixels = sprite_width;
2786
	intel_plane->wm.vert_pixels = sprite_width;
2605
			intel_plane->wm.bytes_per_pixel = pixel_size;
2787
			intel_plane->wm.bytes_per_pixel = pixel_size;
Line 2633... Line 2815...
2633
 
2815
 
2634
	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2816
	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2635
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2817
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Line 2636... Line 2818...
2636
		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2818
		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
-
 
2819
 
-
 
2820
	active->pipe_enabled = intel_crtc_active(crtc);
2637
 
2821
 
Line 2638... Line 2822...
2638
	if (intel_crtc_active(crtc)) {
2822
	if (active->pipe_enabled) {
2639
		u32 tmp = hw->wm_pipe[pipe];
2823
		u32 tmp = hw->wm_pipe[pipe];
2640
 
2824
 
Line 2666... Line 2850...
2666
{
2850
{
2667
	struct drm_i915_private *dev_priv = dev->dev_private;
2851
	struct drm_i915_private *dev_priv = dev->dev_private;
2668
	struct ilk_wm_values *hw = &dev_priv->wm.hw;
2852
	struct ilk_wm_values *hw = &dev_priv->wm.hw;
2669
	struct drm_crtc *crtc;
2853
	struct drm_crtc *crtc;
Line 2670... Line 2854...
2670
 
2854
 
2671
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2855
	for_each_crtc(dev, crtc)
Line 2672... Line 2856...
2672
		ilk_pipe_wm_get_hw_state(crtc);
2856
		ilk_pipe_wm_get_hw_state(crtc);
2673
 
2857
 
2674
	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2858
	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
Line 2675... Line 2859...
2675
	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2859
	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
-
 
2860
	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2676
	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2861
 
2677
 
2862
	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-
 
2863
	if (INTEL_INFO(dev)->gen >= 7) {
Line 2678... Line 2864...
2678
	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2864
	hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2679
	hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2865
	hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2680
	hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2866
	}
2681
 
2867
 
Line 2730... Line 2916...
2730
		dev_priv->display.update_wm(crtc);
2916
		dev_priv->display.update_wm(crtc);
2731
}
2917
}
Line 2732... Line 2918...
2732
 
2918
 
2733
void intel_update_sprite_watermarks(struct drm_plane *plane,
2919
void intel_update_sprite_watermarks(struct drm_plane *plane,
2734
				    struct drm_crtc *crtc,
2920
				    struct drm_crtc *crtc,
-
 
2921
				    uint32_t sprite_width,
-
 
2922
				    uint32_t sprite_height,
2735
				    uint32_t sprite_width, int pixel_size,
2923
				    int pixel_size,
2736
				    bool enabled, bool scaled)
2924
				    bool enabled, bool scaled)
2737
{
2925
{
Line 2738... Line 2926...
2738
	struct drm_i915_private *dev_priv = plane->dev->dev_private;
2926
	struct drm_i915_private *dev_priv = plane->dev->dev_private;
2739
 
2927
 
-
 
2928
	if (dev_priv->display.update_sprite_wm)
2740
	if (dev_priv->display.update_sprite_wm)
2929
		dev_priv->display.update_sprite_wm(plane, crtc,
2741
		dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2930
						   sprite_width, sprite_height,
Line 2742... Line 2931...
2742
						   pixel_size, enabled, scaled);
2931
						   pixel_size, enabled, scaled);
2743
}
2932
}
Line 2754... Line 2943...
2754
	if (!ctx) {
2943
	if (!ctx) {
2755
		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2944
		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2756
		return NULL;
2945
		return NULL;
2757
	}
2946
	}
Line 2758... Line 2947...
2758
 
2947
 
2759
	ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
2948
	ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2760
	if (ret) {
2949
	if (ret) {
2761
		DRM_ERROR("failed to pin power context: %d\n", ret);
2950
		DRM_ERROR("failed to pin power context: %d\n", ret);
2762
		goto err_unref;
2951
		goto err_unref;
Line 2769... Line 2958...
2769
	}
2958
	}
Line 2770... Line 2959...
2770
 
2959
 
Line 2771... Line 2960...
2771
	return ctx;
2960
	return ctx;
2772
 
2961
 
2773
err_unpin:
2962
err_unpin:
2774
	i915_gem_object_unpin(ctx);
2963
	i915_gem_object_ggtt_unpin(ctx);
2775
err_unref:
2964
err_unref:
2776
	drm_gem_object_unreference(&ctx->base);
2965
	drm_gem_object_unreference(&ctx->base);
Line 2869... Line 3058...
2869
 
3058
 
Line 2870... Line 3059...
2870
	ironlake_set_drps(dev, fstart);
3059
	ironlake_set_drps(dev, fstart);
2871
 
3060
 
2872
	dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3061
	dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2873
		I915_READ(0x112e0);
3062
		I915_READ(0x112e0);
2874
    dev_priv->ips.last_time1 = jiffies_to_msecs(GetTimerTicks());
3063
	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
Line 2875... Line 3064...
2875
	dev_priv->ips.last_count2 = I915_READ(0x112f4);
3064
	dev_priv->ips.last_count2 = I915_READ(0x112f4);
2876
	getrawmonotonic(&dev_priv->ips.last_time2);
3065
	dev_priv->ips.last_time2 = ktime_get_raw_ns();
Line 2877... Line 3066...
2877
 
3066
 
Line 2917... Line 3106...
2917
	 * getting more interrupts, otherwise leave this clear. This prevents a
3106
	 * getting more interrupts, otherwise leave this clear. This prevents a
2918
	 * race in the hw when coming out of rc6: There's a tiny window where
3107
	 * race in the hw when coming out of rc6: There's a tiny window where
2919
	 * the hw runs at the minimal clock before selecting the desired
3108
	 * the hw runs at the minimal clock before selecting the desired
2920
	 * frequency, if the down threshold expires in that window we will not
3109
	 * frequency, if the down threshold expires in that window we will not
2921
	 * receive a down interrupt. */
3110
	 * receive a down interrupt. */
2922
	limits = dev_priv->rps.max_delay << 24;
3111
	limits = dev_priv->rps.max_freq_softlimit << 24;
2923
	if (val <= dev_priv->rps.min_delay)
3112
	if (val <= dev_priv->rps.min_freq_softlimit)
2924
		limits |= dev_priv->rps.min_delay << 16;
3113
		limits |= dev_priv->rps.min_freq_softlimit << 16;
Line 2925... Line 3114...
2925
 
3114
 
2926
	return limits;
3115
	return limits;
Line 2927... Line 3116...
2927
}
3116
}
Line 2931... Line 3120...
2931
	int new_power;
3120
	int new_power;
Line 2932... Line 3121...
2932
 
3121
 
2933
	new_power = dev_priv->rps.power;
3122
	new_power = dev_priv->rps.power;
2934
	switch (dev_priv->rps.power) {
3123
	switch (dev_priv->rps.power) {
2935
	case LOW_POWER:
3124
	case LOW_POWER:
2936
		if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3125
		if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
2937
			new_power = BETWEEN;
3126
			new_power = BETWEEN;
Line 2938... Line 3127...
2938
		break;
3127
		break;
2939
 
3128
 
2940
	case BETWEEN:
3129
	case BETWEEN:
2941
		if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3130
		if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
2942
			new_power = LOW_POWER;
3131
			new_power = LOW_POWER;
2943
		else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3132
		else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
Line 2944... Line 3133...
2944
			new_power = HIGH_POWER;
3133
			new_power = HIGH_POWER;
2945
		break;
3134
		break;
2946
 
3135
 
2947
	case HIGH_POWER:
3136
	case HIGH_POWER:
2948
		if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3137
		if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
2949
			new_power = BETWEEN;
3138
			new_power = BETWEEN;
2950
		break;
3139
		break;
2951
	}
3140
	}
2952
	/* Max/min bins are special */
3141
	/* Max/min bins are special */
2953
	if (val == dev_priv->rps.min_delay)
3142
	if (val == dev_priv->rps.min_freq_softlimit)
2954
		new_power = LOW_POWER;
3143
		new_power = LOW_POWER;
2955
	if (val == dev_priv->rps.max_delay)
3144
	if (val == dev_priv->rps.max_freq_softlimit)
Line 2956... Line 3145...
2956
		new_power = HIGH_POWER;
3145
		new_power = HIGH_POWER;
Line 3016... Line 3205...
3016
 
3205
 
3017
	dev_priv->rps.power = new_power;
3206
	dev_priv->rps.power = new_power;
3018
	dev_priv->rps.last_adj = 0;
3207
	dev_priv->rps.last_adj = 0;
Line -... Line 3208...
-
 
3208
}
-
 
3209
 
-
 
3210
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
-
 
3211
{
-
 
3212
	u32 mask = 0;
-
 
3213
 
-
 
3214
	if (val > dev_priv->rps.min_freq_softlimit)
-
 
3215
		mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
-
 
3216
	if (val < dev_priv->rps.max_freq_softlimit)
-
 
3217
		mask |= GEN6_PM_RP_UP_THRESHOLD;
-
 
3218
 
-
 
3219
	mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
-
 
3220
	mask &= dev_priv->pm_rps_events;
-
 
3221
 
-
 
3222
	/* IVB and SNB hard hangs on looping batchbuffer
-
 
3223
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
-
 
3224
	 */
-
 
3225
	if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
-
 
3226
		mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
 
3227
 
-
 
3228
	if (IS_GEN8(dev_priv->dev))
-
 
3229
		mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
 
3230
 
-
 
3231
	return ~mask;
-
 
3232
}
-
 
3233
 
-
 
3234
/* gen6_set_rps is called to update the frequency request, but should also be
3019
}
3235
 * called when the range (min_delay and max_delay) is modified so that we can
3020
 
3236
 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3021
void gen6_set_rps(struct drm_device *dev, u8 val)
3237
void gen6_set_rps(struct drm_device *dev, u8 val)
Line 3022... Line 3238...
3022
{
3238
{
3023
	struct drm_i915_private *dev_priv = dev->dev_private;
3239
	struct drm_i915_private *dev_priv = dev->dev_private;
3024
 
3240
 
3025
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
 
3026
	WARN_ON(val > dev_priv->rps.max_delay);
-
 
3027
	WARN_ON(val < dev_priv->rps.min_delay);
-
 
Line -... Line 3241...
-
 
3241
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
 
3242
	WARN_ON(val > dev_priv->rps.max_freq_softlimit);
-
 
3243
	WARN_ON(val < dev_priv->rps.min_freq_softlimit);
-
 
3244
 
3028
 
3245
	/* min/max delay may still have been modified so be sure to
Line 3029... Line 3246...
3029
	if (val == dev_priv->rps.cur_delay)
3246
	 * write the limits value.
3030
		return;
3247
	 */
3031
 
3248
	if (val != dev_priv->rps.cur_freq) {
3032
	gen6_set_rps_thresholds(dev_priv, val);
3249
	gen6_set_rps_thresholds(dev_priv, val);
3033
 
3250
 
3034
	if (IS_HASWELL(dev))
3251
		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3035
		I915_WRITE(GEN6_RPNSWREQ,
3252
		I915_WRITE(GEN6_RPNSWREQ,
3036
			   HSW_FREQUENCY(val));
3253
			   HSW_FREQUENCY(val));
-
 
3254
	else
Line 3037... Line 3255...
3037
	else
3255
	I915_WRITE(GEN6_RPNSWREQ,
3038
	I915_WRITE(GEN6_RPNSWREQ,
3256
		   GEN6_FREQUENCY(val) |
3039
		   GEN6_FREQUENCY(val) |
3257
		   GEN6_OFFSET(0) |
3040
		   GEN6_OFFSET(0) |
3258
		   GEN6_AGGRESSIVE_TURBO);
3041
		   GEN6_AGGRESSIVE_TURBO);
3259
	}
Line 3042... Line 3260...
3042
 
3260
 
Line 3043... Line 3261...
3043
	/* Make sure we continue to get interrupts
3261
	/* Make sure we continue to get interrupts
3044
	 * until we hit the minimum or maximum frequencies.
-
 
3045
	 */
3262
	 * until we hit the minimum or maximum frequencies.
3046
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3263
	 */
Line -... Line 3264...
-
 
3264
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
-
 
3265
	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
 
3266
 
-
 
3267
	POSTING_READ(GEN6_RPNSWREQ);
-
 
3268
 
-
 
3269
	dev_priv->rps.cur_freq = val;
-
 
3270
	trace_intel_gpu_freq_change(val * 50);
-
 
3271
}
-
 
3272
 
-
 
3273
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
-
 
3274
 *
-
 
3275
 * * If Gfx is Idle, then
-
 
3276
 * 1. Mask Turbo interrupts
-
 
3277
 * 2. Bring up Gfx clock
-
 
3278
 * 3. Change the freq to Rpn and wait till P-Unit updates freq
-
 
3279
 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
-
 
3280
 * 5. Unmask Turbo interrupts
-
 
3281
*/
-
 
3282
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
-
 
3283
{
-
 
3284
	struct drm_device *dev = dev_priv->dev;
-
 
3285
 
-
 
3286
	/* Latest VLV doesn't need to force the gfx clock */
-
 
3287
	if (dev->pdev->revision >= 0xd) {
-
 
3288
		valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
-
 
3289
		return;
-
 
3290
	}
-
 
3291
 
-
 
3292
	/*
-
 
3293
	 * When we are idle.  Drop to min voltage state.
-
 
3294
	 */
-
 
3295
 
-
 
3296
	if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
-
 
3297
		return;
-
 
3298
 
-
 
3299
	/* Mask turbo interrupt so that they will not come in between */
-
 
3300
	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-
 
3301
 
-
 
3302
	vlv_force_gfx_clock(dev_priv, true);
-
 
3303
 
-
 
3304
	dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
-
 
3305
 
-
 
3306
	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
-
 
3307
					dev_priv->rps.min_freq_softlimit);
-
 
3308
 
-
 
3309
	if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3047
		   gen6_rps_limits(dev_priv, val));
3310
				& GENFREQSTATUS) == 0, 5))
3048
 
3311
		DRM_ERROR("timed out waiting for Punit\n");
3049
	POSTING_READ(GEN6_RPNSWREQ);
3312
 
Line 3050... Line 3313...
3050
 
3313
	vlv_force_gfx_clock(dev_priv, false);
3051
	dev_priv->rps.cur_delay = val;
3314
 
3052
 
3315
	I915_WRITE(GEN6_PMINTRMSK,
3053
	trace_intel_gpu_freq_change(val * 50);
3316
		   gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
-
 
3317
}
-
 
3318
 
3054
}
3319
void gen6_rps_idle(struct drm_i915_private *dev_priv)
3055
 
3320
{
3056
void gen6_rps_idle(struct drm_i915_private *dev_priv)
3321
	struct drm_device *dev = dev_priv->dev;
3057
{
3322
 
3058
	struct drm_device *dev = dev_priv->dev;
3323
	mutex_lock(&dev_priv->rps.hw_lock);
3059
 
3324
	if (dev_priv->rps.enabled) {
Line 3073... Line 3338...
3073
	struct drm_device *dev = dev_priv->dev;
3338
	struct drm_device *dev = dev_priv->dev;
Line 3074... Line 3339...
3074
 
3339
 
3075
	mutex_lock(&dev_priv->rps.hw_lock);
3340
	mutex_lock(&dev_priv->rps.hw_lock);
3076
	if (dev_priv->rps.enabled) {
3341
	if (dev_priv->rps.enabled) {
3077
		if (IS_VALLEYVIEW(dev))
3342
		if (IS_VALLEYVIEW(dev))
3078
			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3343
			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3079
		else
3344
		else
3080
			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3345
			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3081
		dev_priv->rps.last_adj = 0;
3346
		dev_priv->rps.last_adj = 0;
3082
	}
3347
	}
3083
	mutex_unlock(&dev_priv->rps.hw_lock);
3348
	mutex_unlock(&dev_priv->rps.hw_lock);
Line 3084... Line 3349...
3084
}
3349
}
3085
 
3350
 
3086
void valleyview_set_rps(struct drm_device *dev, u8 val)
3351
void valleyview_set_rps(struct drm_device *dev, u8 val)
Line 3087... Line 3352...
3087
{
3352
{
3088
	struct drm_i915_private *dev_priv = dev->dev_private;
3353
	struct drm_i915_private *dev_priv = dev->dev_private;
3089
 
3354
 
Line 3090... Line 3355...
3090
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3355
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3091
	WARN_ON(val > dev_priv->rps.max_delay);
3356
	WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3092
	WARN_ON(val < dev_priv->rps.min_delay);
3357
	WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3093
 
3358
 
Line 3094... Line 3359...
3094
	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3359
	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3095
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-
 
3096
			 dev_priv->rps.cur_delay,
-
 
3097
			 vlv_gpu_freq(dev_priv, val), val);
3360
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
Line 3098... Line 3361...
3098
 
3361
			 dev_priv->rps.cur_freq,
Line -... Line 3362...
-
 
3362
			 vlv_gpu_freq(dev_priv, val), val);
3099
	if (val == dev_priv->rps.cur_delay)
3363
 
3100
		return;
3364
	if (val != dev_priv->rps.cur_freq)
Line -... Line 3365...
-
 
3365
	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-
 
3366
 
-
 
3367
	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
 
3368
 
-
 
3369
	dev_priv->rps.cur_freq = val;
-
 
3370
	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
-
 
3371
}
-
 
3372
 
-
 
3373
static void gen8_disable_rps_interrupts(struct drm_device *dev)
-
 
3374
{
-
 
3375
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3376
 
-
 
3377
	I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-
 
3378
	I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
-
 
3379
				   ~dev_priv->pm_rps_events);
-
 
3380
	/* Complete PM interrupt masking here doesn't race with the rps work
-
 
3381
	 * item again unmasking PM interrupts because that is using a different
-
 
3382
	 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
-
 
3383
	 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
-
 
3384
	 * gen8_enable_rps will clean up. */
3101
 
3385
 
3102
	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3386
	spin_lock_irq(&dev_priv->irq_lock);
3103
 
3387
	dev_priv->rps.pm_iir = 0;
Line 3104... Line 3388...
3104
	dev_priv->rps.cur_delay = val;
3388
	spin_unlock_irq(&dev_priv->irq_lock);
3105
 
3389
 
-
 
3390
	I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3106
	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3391
}
3107
}
3392
 
3108
 
3393
static void gen6_disable_rps_interrupts(struct drm_device *dev)
3109
static void gen6_disable_rps_interrupts(struct drm_device *dev)
3394
{
Line 3110... Line 3395...
3110
{
3395
	struct drm_i915_private *dev_priv = dev->dev_private;
3111
	struct drm_i915_private *dev_priv = dev->dev_private;
3396
 
3112
 
3397
	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
Line 3113... Line 3398...
3113
	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3398
	I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3114
	I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3399
				~dev_priv->pm_rps_events);
Line 3115... Line 3400...
3115
	/* Complete PM interrupt masking here doesn't race with the rps work
3400
	/* Complete PM interrupt masking here doesn't race with the rps work
3116
	 * item again unmasking PM interrupts because that is using a different
3401
	 * item again unmasking PM interrupts because that is using a different
3117
	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3402
	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
Line 3118... Line 3403...
3118
	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3403
	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3119
 
3404
 
Line -... Line 3405...
-
 
3405
	spin_lock_irq(&dev_priv->irq_lock);
-
 
3406
	dev_priv->rps.pm_iir = 0;
-
 
3407
	spin_unlock_irq(&dev_priv->irq_lock);
3120
	spin_lock_irq(&dev_priv->irq_lock);
3408
 
3121
	dev_priv->rps.pm_iir = 0;
3409
	I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
Line -... Line 3410...
-
 
3410
}
-
 
3411
 
-
 
3412
static void gen6_disable_rps(struct drm_device *dev)
-
 
3413
{
-
 
3414
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3415
 
-
 
3416
	I915_WRITE(GEN6_RC_CONTROL, 0);
-
 
3417
	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
-
 
3418
 
3122
	spin_unlock_irq(&dev_priv->irq_lock);
3419
	if (IS_BROADWELL(dev))
3123
 
3420
		gen8_disable_rps_interrupts(dev);
3124
	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3421
	else
Line 3125... Line 3422...
3125
}
3422
	gen6_disable_rps_interrupts(dev);
Line 3126... Line 3423...
3126
 
3423
}
3127
static void gen6_disable_rps(struct drm_device *dev)
-
 
3128
{
-
 
3129
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3130
 
-
 
3131
	I915_WRITE(GEN6_RC_CONTROL, 0);
-
 
3132
	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3424
 
Line 3133... Line 3425...
3133
 
3425
static void cherryview_disable_rps(struct drm_device *dev)
3134
	gen6_disable_rps_interrupts(dev);
3426
{
3135
}
3427
	struct drm_i915_private *dev_priv = dev->dev_private;
3136
 
3428
 
-
 
3429
	I915_WRITE(GEN6_RC_CONTROL, 0);
3137
static void valleyview_disable_rps(struct drm_device *dev)
3430
 
3138
{
3431
	gen8_disable_rps_interrupts(dev);
3139
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3140
 
3432
}
3141
	I915_WRITE(GEN6_RC_CONTROL, 0);
3433
 
3142
 
3434
static void valleyview_disable_rps(struct drm_device *dev)
3143
	gen6_disable_rps_interrupts(dev);
3435
{
3144
 
3436
	struct drm_i915_private *dev_priv = dev->dev_private;
3145
	if (dev_priv->vlv_pctx) {
3437
 
Line 3146... Line 3438...
3146
		drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3438
	I915_WRITE(GEN6_RC_CONTROL, 0);
3147
		dev_priv->vlv_pctx = NULL;
3439
 
3148
	}
3440
	gen6_disable_rps_interrupts(dev);
3149
}
3441
}
3150
 
3442
 
Line -... Line 3443...
-
 
3443
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
-
 
3444
{
-
 
3445
	if (IS_VALLEYVIEW(dev)) {
-
 
3446
		if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3151
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3447
			mode = GEN6_RC_CTL_RC6_ENABLE;
3152
{
3448
		else
-
 
3449
			mode = 0;
-
 
3450
	}
-
 
3451
	DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-
 
3452
			(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-
 
3453
			(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-
 
3454
			(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
-
 
3455
}
-
 
3456
 
-
 
3457
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
-
 
3458
{
-
 
3459
	/* No RC6 before Ironlake */
-
 
3460
	if (INTEL_INFO(dev)->gen < 5)
3153
	if (IS_GEN6(dev))
3461
		return 0;
-
 
3462
 
Line 3154... Line 3463...
3154
		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3463
	/* RC6 is only on Ironlake mobile not on desktop */
3155
 
3464
	if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3156
	if (IS_HASWELL(dev))
3465
		return 0;
Line 3157... Line 3466...
3157
		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3466
 
3158
 
3467
	/* Respect the kernel parameter if it is set */
Line 3159... Line -...
3159
	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-
 
3160
			(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-
 
3161
			(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3468
	if (enable_rc6 >= 0) {
-
 
3469
		int mask;
Line 3162... Line 3470...
3162
			(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3470
 
-
 
3471
		if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
-
 
3472
			mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3163
}
3473
			       INTEL_RC6pp_ENABLE;
Line 3164... Line 3474...
3164
 
3474
		else
3165
int intel_enable_rc6(const struct drm_device *dev)
3475
			mask = INTEL_RC6_ENABLE;
3166
{
3476
 
3167
	/* No RC6 before Ironlake */
-
 
Line 3168... Line 3477...
3168
	if (INTEL_INFO(dev)->gen < 5)
3477
		if ((enable_rc6 & mask) != enable_rc6)
3169
		return 0;
3478
			DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3170
 
3479
				 enable_rc6 & mask, enable_rc6, mask);
3171
	/* Respect the kernel parameter if it is set */
3480
 
3172
	if (i915_enable_rc6 >= 0)
3481
		return enable_rc6 & mask;
-
 
3482
	}
Line 3173... Line 3483...
3173
		return i915_enable_rc6;
3483
 
-
 
3484
	/* Disable RC6 on Ironlake */
3174
 
3485
	if (INTEL_INFO(dev)->gen == 5)
Line -... Line 3486...
-
 
3486
		return 0;
-
 
3487
 
3175
	/* Disable RC6 on Ironlake */
3488
	if (IS_IVYBRIDGE(dev))
3176
	if (INTEL_INFO(dev)->gen == 5)
3489
		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
-
 
3490
 
-
 
3491
		return INTEL_RC6_ENABLE;
-
 
3492
}
-
 
3493
 
3177
		return 0;
3494
int intel_enable_rc6(const struct drm_device *dev)
3178
 
3495
{
-
 
3496
	return i915.enable_rc6;
-
 
3497
}
-
 
3498
 
-
 
3499
static void gen8_enable_rps_interrupts(struct drm_device *dev)
-
 
3500
{
-
 
3501
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3502
 
-
 
3503
	spin_lock_irq(&dev_priv->irq_lock);
-
 
3504
	WARN_ON(dev_priv->rps.pm_iir);
-
 
3505
	gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-
 
3506
	I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3179
	if (IS_HASWELL(dev))
3507
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
3508
}
Line 3180... Line 3509...
3180
		return INTEL_RC6_ENABLE;
3509
 
-
 
3510
static void gen6_enable_rps_interrupts(struct drm_device *dev)
3181
 
3511
{
Line 3182... Line 3512...
3182
	/* snb/ivb have more than one rc6 state. */
3512
	struct drm_i915_private *dev_priv = dev->dev_private;
3183
	if (INTEL_INFO(dev)->gen == 6)
3513
 
3184
		return INTEL_RC6_ENABLE;
3514
	spin_lock_irq(&dev_priv->irq_lock);
3185
 
3515
	WARN_ON(dev_priv->rps.pm_iir);
3186
	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3516
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3187
}
3517
	I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
Line 3188... Line 3518...
3188
 
3518
	spin_unlock_irq(&dev_priv->irq_lock);
3189
static void gen6_enable_rps_interrupts(struct drm_device *dev)
3519
}
Line 3225... Line 3555...
3225
 
3555
 
3226
	/* 2a: Disable RC states. */
3556
	/* 2a: Disable RC states. */
Line 3227... Line 3557...
3227
	I915_WRITE(GEN6_RC_CONTROL, 0);
3557
	I915_WRITE(GEN6_RC_CONTROL, 0);
-
 
3558
 
Line 3228... Line 3559...
3228
 
3559
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3229
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3560
	parse_rp_state_cap(dev_priv, rp_state_cap);
3230
 
3561
 
3231
	/* 2b: Program RC6 thresholds.*/
3562
	/* 2b: Program RC6 thresholds.*/
3232
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3563
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3233
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3564
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3234
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3565
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
 
3566
	for_each_ring(ring, dev_priv, unused)
-
 
3567
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
-
 
3568
	I915_WRITE(GEN6_RC_SLEEP, 0);
3235
	for_each_ring(ring, dev_priv, unused)
3569
	if (IS_BROADWELL(dev))
Line 3236... Line 3570...
3236
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3570
		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3237
	I915_WRITE(GEN6_RC_SLEEP, 0);
3571
	else
3238
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3572
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
 
3573
 
-
 
3574
	/* 3: Enable RC6 */
3239
 
3575
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-
 
3576
		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-
 
3577
	intel_print_rc6_info(dev, rc6_mask);
-
 
3578
	if (IS_BROADWELL(dev))
3240
	/* 3: Enable RC6 */
3579
		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3241
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3580
				GEN7_RC_CTL_TO_MODE |
3242
		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3581
				rc6_mask);
Line 3243... Line 3582...
3243
	DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3582
	else
3244
	I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3583
	I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-
 
3584
			GEN6_RC_CTL_EI_MODE(1) |
3245
			GEN6_RC_CTL_EI_MODE(1) |
3585
			rc6_mask);
-
 
3586
 
3246
			rc6_mask);
3587
	/* 4 Program defaults and thresholds for RPS*/
3247
 
3588
	I915_WRITE(GEN6_RPNSWREQ,
Line 3248... Line 3589...
3248
	/* 4 Program defaults and thresholds for RPS*/
3589
		   HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3249
	I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3590
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
3250
	I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3591
		   HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3251
	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3592
	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
Line 3252... Line 3593...
3252
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3593
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3253
 
3594
 
3254
	/* Docs recommend 900MHz, and 300 MHz respectively */
3595
	/* Docs recommend 900MHz, and 300 MHz respectively */
3255
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3596
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
Line 3274... Line 3615...
3274
 
3615
 
Line 3275... Line 3616...
3275
	/* 6: Ring frequency + overclocking (our driver does this later */
3616
	/* 6: Ring frequency + overclocking (our driver does this later */
Line 3276... Line 3617...
3276
 
3617
 
Line 3277... Line 3618...
3277
	gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3618
	gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3278
 
3619
 
Line 3279... Line 3620...
3279
	gen6_enable_rps_interrupts(dev);
3620
	gen8_enable_rps_interrupts(dev);
3280
 
3621
 
3281
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3622
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3282
}
3623
}
3283
 
3624
 
3284
static void gen6_enable_rps(struct drm_device *dev)
3625
static void gen6_enable_rps(struct drm_device *dev)
3285
{
3626
{
3286
	struct drm_i915_private *dev_priv = dev->dev_private;
3627
	struct drm_i915_private *dev_priv = dev->dev_private;
3287
	struct intel_ring_buffer *ring;
3628
	struct intel_engine_cs *ring;
3288
	u32 rp_state_cap;
3629
	u32 rp_state_cap;
Line 3289... Line 3630...
3289
	u32 gt_perf_status;
3630
	u32 gt_perf_status;
Line 3311... Line 3652...
3311
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3652
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
Line 3312... Line 3653...
3312
 
3653
 
3313
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3654
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
Line 3314... Line -...
3314
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
-
 
3315
 
-
 
3316
	/* In units of 50MHz */
-
 
3317
	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
-
 
3318
	dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
-
 
3319
	dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
3655
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3320
	dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
-
 
Line 3321... Line 3656...
3321
	dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3656
 
3322
	dev_priv->rps.cur_delay = 0;
3657
	parse_rp_state_cap(dev_priv, rp_state_cap);
Line 3323... Line 3658...
3323
 
3658
 
Line 3366... Line 3701...
3366
	/* Power down if completely idle for over 50ms */
3701
	/* Power down if completely idle for over 50ms */
3367
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3702
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3368
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3703
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
Line 3369... Line 3704...
3369
 
3704
 
3370
	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3705
	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3371
	if (!ret) {
3706
	if (ret)
-
 
3707
		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3372
		pcu_mbox = 0;
3708
 
3373
		ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3709
		ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3374
		if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3710
		if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3375
			DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3711
			DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3376
					 (dev_priv->rps.max_delay & 0xff) * 50,
3712
				 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3377
					 (pcu_mbox & 0xff) * 50);
3713
					 (pcu_mbox & 0xff) * 50);
3378
			dev_priv->rps.hw_max = pcu_mbox & 0xff;
-
 
3379
	}
-
 
3380
	} else {
-
 
3381
		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3714
		dev_priv->rps.max_freq = pcu_mbox & 0xff;
Line 3382... Line 3715...
3382
	}
3715
	}
3383
 
3716
 
Line 3384... Line 3717...
3384
	dev_priv->rps.power = HIGH_POWER; /* force a reset */
3717
	dev_priv->rps.power = HIGH_POWER; /* force a reset */
Line 3385... Line 3718...
3385
	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3718
	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3386
 
3719
 
Line 3401... Line 3734...
3401
	}
3734
	}
Line 3402... Line 3735...
3402
 
3735
 
3403
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3736
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
Line 3404... Line 3737...
3404
}
3737
}
3405
 
3738
 
3406
void gen6_update_ring_freq(struct drm_device *dev)
3739
static void __gen6_update_ring_freq(struct drm_device *dev)
3407
{
3740
{
3408
	struct drm_i915_private *dev_priv = dev->dev_private;
3741
	struct drm_i915_private *dev_priv = dev->dev_private;
3409
	int min_freq = 15;
3742
	int min_freq = 15;
Line 3414... Line 3747...
3414
 
3747
 
Line 3415... Line 3748...
3415
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3748
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3416
 
3749
 
3417
	max_ia_freq = cpufreq_quick_get_max(0);
3750
	max_ia_freq = cpufreq_quick_get_max(0);
3418
	/*
3751
	/*
3419
	 * Default to measured freq if none found, PCU will ensure we don't go
3752
		 * Default to measured freq if none found, PCU will ensure we
3420
	 * over
3753
		 * don't go over
Line 3421... Line 3754...
3421
	 */
3754
	 */
3422
		max_ia_freq = tsc_khz;
3755
		max_ia_freq = tsc_khz;
Line 3431... Line 3764...
3431
	/*
3764
	/*
3432
	 * For each potential GPU frequency, load a ring frequency we'd like
3765
	 * For each potential GPU frequency, load a ring frequency we'd like
3433
	 * to use for memory access.  We do this by specifying the IA frequency
3766
	 * to use for memory access.  We do this by specifying the IA frequency
3434
	 * the PCU should use as a reference to determine the ring frequency.
3767
	 * the PCU should use as a reference to determine the ring frequency.
3435
	 */
3768
	 */
3436
	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3769
	for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3437
	     gpu_freq--) {
3770
	     gpu_freq--) {
3438
		int diff = dev_priv->rps.max_delay - gpu_freq;
3771
		int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3439
		unsigned int ia_freq = 0, ring_freq = 0;
3772
		unsigned int ia_freq = 0, ring_freq = 0;
Line 3440... Line 3773...
3440
 
3773
 
3441
		if (INTEL_INFO(dev)->gen >= 8) {
3774
		if (INTEL_INFO(dev)->gen >= 8) {
3442
			/* max(2 * GT, DDR). NB: GT is 50MHz units */
3775
			/* max(2 * GT, DDR). NB: GT is 50MHz units */
Line 3466... Line 3799...
3466
					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3799
					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3467
					gpu_freq);
3800
					gpu_freq);
3468
	}
3801
	}
3469
}
3802
}
Line -... Line 3803...
-
 
3803
 
-
 
3804
void gen6_update_ring_freq(struct drm_device *dev)
-
 
3805
{
-
 
3806
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3807
 
-
 
3808
	if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
-
 
3809
		return;
-
 
3810
 
-
 
3811
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
3812
	__gen6_update_ring_freq(dev);
-
 
3813
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
3814
}
-
 
3815
 
-
 
3816
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
-
 
3817
{
-
 
3818
	u32 val, rp0;
-
 
3819
 
-
 
3820
	val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
-
 
3821
	rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
-
 
3822
 
-
 
3823
	return rp0;
-
 
3824
}
-
 
3825
 
-
 
3826
static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-
 
3827
{
-
 
3828
	u32 val, rpe;
-
 
3829
 
-
 
3830
	val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
-
 
3831
	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
-
 
3832
 
-
 
3833
	return rpe;
-
 
3834
}
-
 
3835
 
-
 
3836
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
-
 
3837
{
-
 
3838
	u32 val, rp1;
-
 
3839
 
-
 
3840
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
3841
	rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
-
 
3842
 
-
 
3843
	return rp1;
-
 
3844
}
-
 
3845
 
-
 
3846
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-
 
3847
{
-
 
3848
	u32 val, rpn;
-
 
3849
 
-
 
3850
	val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
-
 
3851
	rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
-
 
3852
	return rpn;
-
 
3853
}
-
 
3854
 
-
 
3855
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-
 
3856
{
-
 
3857
	u32 val, rp1;
-
 
3858
 
-
 
3859
	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
 
3860
 
-
 
3861
	rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
-
 
3862
 
-
 
3863
	return rp1;
-
 
3864
}
3470
 
3865
 
3471
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3866
static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3472
{
3867
{
Line 3473... Line 3868...
3473
	u32 val, rp0;
3868
	u32 val, rp0;
Line 3491... Line 3886...
3491
	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3886
	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
Line 3492... Line 3887...
3492
 
3887
 
3493
	return rpe;
3888
	return rpe;
Line 3494... Line 3889...
3494
}
3889
}
3495
 
3890
 
3496
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3891
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3497
{
3892
{
Line -... Line 3893...
-
 
3893
	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
-
 
3894
}
-
 
3895
 
-
 
3896
/* Check that the pctx buffer wasn't move under us. */
-
 
3897
static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
-
 
3898
{
-
 
3899
	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
 
3900
 
-
 
3901
	WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
-
 
3902
			     dev_priv->vlv_pctx->stolen->start);
-
 
3903
}
-
 
3904
 
-
 
3905
 
-
 
3906
/* Check that the pcbr address is not empty. */
-
 
3907
static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-
 
3908
{
-
 
3909
	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
 
3910
 
-
 
3911
	WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
-
 
3912
}
-
 
3913
 
-
 
3914
static void cherryview_setup_pctx(struct drm_device *dev)
-
 
3915
{
-
 
3916
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3917
	unsigned long pctx_paddr, paddr;
-
 
3918
	struct i915_gtt *gtt = &dev_priv->gtt;
-
 
3919
	u32 pcbr;
-
 
3920
	int pctx_size = 32*1024;
-
 
3921
 
-
 
3922
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 
3923
 
-
 
3924
	pcbr = I915_READ(VLV_PCBR);
-
 
3925
	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
-
 
3926
		paddr = (dev_priv->mm.stolen_base +
-
 
3927
			 (gtt->stolen_size - pctx_size));
-
 
3928
 
-
 
3929
		pctx_paddr = (paddr & (~4095));
-
 
3930
		I915_WRITE(VLV_PCBR, pctx_paddr);
3498
	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3931
	}
3499
}
3932
}
3500
 
3933
 
3501
static void valleyview_setup_pctx(struct drm_device *dev)
3934
static void valleyview_setup_pctx(struct drm_device *dev)
3502
{
3935
{
3503
	struct drm_i915_private *dev_priv = dev->dev_private;
3936
	struct drm_i915_private *dev_priv = dev->dev_private;
3504
	struct drm_i915_gem_object *pctx;
3937
	struct drm_i915_gem_object *pctx;
Line -... Line 3938...
-
 
3938
	unsigned long pctx_paddr;
-
 
3939
	u32 pcbr;
3505
	unsigned long pctx_paddr;
3940
	int pctx_size = 24*1024;
3506
	u32 pcbr;
3941
 
3507
	int pctx_size = 24*1024;
3942
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3508
 
3943
 
Line 3538... Line 3973...
3538
 
3973
 
3539
out:
3974
out:
3540
	dev_priv->vlv_pctx = pctx;
3975
	dev_priv->vlv_pctx = pctx;
Line -... Line 3976...
-
 
3976
}
-
 
3977
 
-
 
3978
static void valleyview_cleanup_pctx(struct drm_device *dev)
-
 
3979
{
-
 
3980
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3981
 
-
 
3982
	if (WARN_ON(!dev_priv->vlv_pctx))
-
 
3983
		return;
-
 
3984
 
-
 
3985
	drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
-
 
3986
	dev_priv->vlv_pctx = NULL;
-
 
3987
}
-
 
3988
 
-
 
3989
static void valleyview_init_gt_powersave(struct drm_device *dev)
-
 
3990
{
-
 
3991
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3992
 
-
 
3993
	valleyview_setup_pctx(dev);
-
 
3994
 
-
 
3995
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
3996
 
-
 
3997
	dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
-
 
3998
	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
-
 
3999
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-
 
4000
			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-
 
4001
			 dev_priv->rps.max_freq);
-
 
4002
 
-
 
4003
	dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
-
 
4004
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-
 
4005
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-
 
4006
			 dev_priv->rps.efficient_freq);
-
 
4007
 
-
 
4008
	dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
-
 
4009
	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-
 
4010
			 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-
 
4011
			 dev_priv->rps.rp1_freq);
-
 
4012
 
-
 
4013
	dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
-
 
4014
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-
 
4015
			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-
 
4016
			 dev_priv->rps.min_freq);
-
 
4017
 
-
 
4018
	/* Preserve min/max settings in case of re-init */
-
 
4019
	if (dev_priv->rps.max_freq_softlimit == 0)
-
 
4020
		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
 
4021
 
-
 
4022
	if (dev_priv->rps.min_freq_softlimit == 0)
-
 
4023
		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
 
4024
 
-
 
4025
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
4026
}
-
 
4027
 
-
 
4028
static void cherryview_init_gt_powersave(struct drm_device *dev)
-
 
4029
{
-
 
4030
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4031
 
-
 
4032
	cherryview_setup_pctx(dev);
-
 
4033
 
-
 
4034
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
4035
 
-
 
4036
	dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
-
 
4037
	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
-
 
4038
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-
 
4039
			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-
 
4040
			 dev_priv->rps.max_freq);
-
 
4041
 
-
 
4042
	dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
-
 
4043
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-
 
4044
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-
 
4045
			 dev_priv->rps.efficient_freq);
-
 
4046
 
-
 
4047
	dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
-
 
4048
	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-
 
4049
			 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-
 
4050
			 dev_priv->rps.rp1_freq);
-
 
4051
 
-
 
4052
	dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
-
 
4053
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-
 
4054
			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-
 
4055
			 dev_priv->rps.min_freq);
-
 
4056
 
-
 
4057
	/* Preserve min/max settings in case of re-init */
-
 
4058
	if (dev_priv->rps.max_freq_softlimit == 0)
-
 
4059
		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
 
4060
 
-
 
4061
	if (dev_priv->rps.min_freq_softlimit == 0)
-
 
4062
		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
 
4063
 
-
 
4064
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
4065
}
-
 
4066
 
-
 
4067
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
-
 
4068
{
-
 
4069
	valleyview_cleanup_pctx(dev);
-
 
4070
}
-
 
4071
 
-
 
4072
static void cherryview_enable_rps(struct drm_device *dev)
-
 
4073
{
-
 
4074
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4075
	struct intel_engine_cs *ring;
-
 
4076
	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
-
 
4077
	int i;
-
 
4078
 
-
 
4079
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
 
4080
 
-
 
4081
	gtfifodbg = I915_READ(GTFIFODBG);
-
 
4082
	if (gtfifodbg) {
-
 
4083
		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
-
 
4084
				 gtfifodbg);
-
 
4085
		I915_WRITE(GTFIFODBG, gtfifodbg);
-
 
4086
	}
-
 
4087
 
-
 
4088
	cherryview_check_pctx(dev_priv);
-
 
4089
 
-
 
4090
	/* 1a & 1b: Get forcewake during program sequence. Although the driver
-
 
4091
	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-
 
4092
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
-
 
4093
 
-
 
4094
	/* 2a: Program RC6 thresholds.*/
-
 
4095
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
-
 
4096
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-
 
4097
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
 
4098
 
-
 
4099
	for_each_ring(ring, dev_priv, i)
-
 
4100
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
-
 
4101
	I915_WRITE(GEN6_RC_SLEEP, 0);
-
 
4102
 
-
 
4103
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
 
4104
 
-
 
4105
	/* allows RC6 residency counter to work */
-
 
4106
	I915_WRITE(VLV_COUNTER_CONTROL,
-
 
4107
		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
-
 
4108
				      VLV_MEDIA_RC6_COUNT_EN |
-
 
4109
				      VLV_RENDER_RC6_COUNT_EN));
-
 
4110
 
-
 
4111
	/* For now we assume BIOS is allocating and populating the PCBR  */
-
 
4112
	pcbr = I915_READ(VLV_PCBR);
-
 
4113
 
-
 
4114
	DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
-
 
4115
 
-
 
4116
	/* 3: Enable RC6 */
-
 
4117
	if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
-
 
4118
						(pcbr >> VLV_PCBR_ADDR_SHIFT))
-
 
4119
		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
-
 
4120
 
-
 
4121
	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
-
 
4122
 
-
 
4123
	/* 4 Program defaults and thresholds for RPS*/
-
 
4124
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-
 
4125
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-
 
4126
	I915_WRITE(GEN6_RP_UP_EI, 66000);
-
 
4127
	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
 
4128
 
-
 
4129
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
 
4130
 
-
 
4131
	/* WaDisablePwrmtrEvent:chv (pre-production hw) */
-
 
4132
	I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
-
 
4133
	I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
 
4134
 
-
 
4135
	/* 5: Enable RPS */
-
 
4136
	I915_WRITE(GEN6_RP_CONTROL,
-
 
4137
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-
 
4138
		   GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
-
 
4139
		   GEN6_RP_ENABLE |
-
 
4140
		   GEN6_RP_UP_BUSY_AVG |
-
 
4141
		   GEN6_RP_DOWN_IDLE_AVG);
-
 
4142
 
-
 
4143
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
4144
 
-
 
4145
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
-
 
4146
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
 
4147
 
-
 
4148
	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
-
 
4149
	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-
 
4150
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
-
 
4151
			 dev_priv->rps.cur_freq);
-
 
4152
 
-
 
4153
	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-
 
4154
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-
 
4155
			 dev_priv->rps.efficient_freq);
-
 
4156
 
-
 
4157
	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
-
 
4158
 
-
 
4159
	gen8_enable_rps_interrupts(dev);
-
 
4160
 
-
 
4161
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3541
}
4162
}
3542
 
4163
 
3543
static void valleyview_enable_rps(struct drm_device *dev)
4164
static void valleyview_enable_rps(struct drm_device *dev)
3544
{
4165
{
3545
	struct drm_i915_private *dev_priv = dev->dev_private;
4166
	struct drm_i915_private *dev_priv = dev->dev_private;
3546
	struct intel_ring_buffer *ring;
4167
	struct intel_engine_cs *ring;
Line 3547... Line 4168...
3547
	u32 gtfifodbg, val, rc6_mode = 0;
4168
	u32 gtfifodbg, val, rc6_mode = 0;
Line -... Line 4169...
-
 
4169
	int i;
-
 
4170
 
3548
	int i;
4171
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3549
 
4172
 
3550
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4173
	valleyview_check_pctx(dev_priv);
3551
 
4174
 
3552
	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4175
	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
Line 3553... Line -...
3553
		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
-
 
3554
				 gtfifodbg);
-
 
3555
		I915_WRITE(GTFIFODBG, gtfifodbg);
4176
		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3556
	}
4177
				 gtfifodbg);
Line 3557... Line 4178...
3557
 
4178
		I915_WRITE(GTFIFODBG, gtfifodbg);
3558
	valleyview_setup_pctx(dev);
4179
	}
3559
 
4180
 
3560
	/* If VLV, Forcewake all wells, else re-direct to regular path */
4181
	/* If VLV, Forcewake all wells, else re-direct to regular path */
Line 3561... Line 4182...
3561
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4182
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
-
 
4183
 
Line 3562... Line 4184...
3562
 
4184
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3563
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4185
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3564
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4186
	I915_WRITE(GEN6_RP_UP_EI, 66000);
3565
	I915_WRITE(GEN6_RP_UP_EI, 66000);
4187
	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
Line 3584... Line 4206...
3584
 
4206
 
Line 3585... Line 4207...
3585
	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4207
	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
3586
 
4208
 
3587
	/* allows RC6 residency counter to work */
4209
	/* allows RC6 residency counter to work */
-
 
4210
	I915_WRITE(VLV_COUNTER_CONTROL,
3588
	I915_WRITE(VLV_COUNTER_CONTROL,
4211
		   _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
3589
		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4212
				      VLV_RENDER_RC0_COUNT_EN |
-
 
4213
				      VLV_MEDIA_RC6_COUNT_EN |
3590
				      VLV_MEDIA_RC6_COUNT_EN |
4214
				      VLV_RENDER_RC6_COUNT_EN));
3591
				      VLV_RENDER_RC6_COUNT_EN));
4215
 
Line 3592... Line 4216...
3592
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4216
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
Line 3599... Line 4223...
3599
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4223
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Line 3600... Line 4224...
3600
 
4224
 
3601
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4225
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
Line 3602... Line 4226...
3602
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4226
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3603
 
4227
 
3604
	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
4228
	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
3605
	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4229
	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3606
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-
 
3607
			 dev_priv->rps.cur_delay);
-
 
3608
 
-
 
3609
	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
-
 
3610
	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
-
 
3611
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-
 
3612
			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
-
 
3613
			 dev_priv->rps.max_delay);
-
 
3614
 
-
 
3615
	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
-
 
3616
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-
 
3617
			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-
 
3618
			 dev_priv->rps.rpe_delay);
-
 
3619
 
-
 
3620
	dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
-
 
3621
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-
 
Line 3622... Line 4230...
3622
			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
4230
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3623
			 dev_priv->rps.min_delay);
4231
			 dev_priv->rps.cur_freq);
3624
 
4232
 
Line 3625... Line 4233...
3625
	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4233
	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
Line 3626... Line 4234...
3626
			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
4234
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Line 3627... Line 4235...
3627
			 dev_priv->rps.rpe_delay);
4235
			 dev_priv->rps.efficient_freq);
3628
 
4236
 
Line 3636... Line 4244...
3636
void ironlake_teardown_rc6(struct drm_device *dev)
4244
void ironlake_teardown_rc6(struct drm_device *dev)
3637
{
4245
{
3638
	struct drm_i915_private *dev_priv = dev->dev_private;
4246
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3639... Line 4247...
3639
 
4247
 
3640
	if (dev_priv->ips.renderctx) {
4248
	if (dev_priv->ips.renderctx) {
3641
		i915_gem_object_unpin(dev_priv->ips.renderctx);
4249
		i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3642
		drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4250
		drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3643
		dev_priv->ips.renderctx = NULL;
4251
		dev_priv->ips.renderctx = NULL;
Line 3644... Line 4252...
3644
	}
4252
	}
3645
 
4253
 
3646
	if (dev_priv->ips.pwrctx) {
4254
	if (dev_priv->ips.pwrctx) {
3647
		i915_gem_object_unpin(dev_priv->ips.pwrctx);
4255
		i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3648
		drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4256
		drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3649
		dev_priv->ips.pwrctx = NULL;
4257
		dev_priv->ips.pwrctx = NULL;
Line 3688... Line 4296...
3688
}
4296
}
Line 3689... Line 4297...
3689
 
4297
 
3690
static void ironlake_enable_rc6(struct drm_device *dev)
4298
static void ironlake_enable_rc6(struct drm_device *dev)
3691
{
4299
{
3692
	struct drm_i915_private *dev_priv = dev->dev_private;
4300
	struct drm_i915_private *dev_priv = dev->dev_private;
3693
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
4301
	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
3694
	bool was_interruptible;
4302
	bool was_interruptible;
Line 3695... Line 4303...
3695
	int ret;
4303
	int ret;
3696
 
4304
 
Line 3746... Line 4354...
3746
	}
4354
	}
Line 3747... Line 4355...
3747
 
4355
 
3748
	I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4356
	I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
Line 3749... Line 4357...
3749
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4357
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3750
 
4358
 
Line 3751... Line 4359...
3751
	intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
4359
	intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
3752
}
4360
}
3753
 
4361
 
Line 3782... Line 4390...
3782
 
4390
 
3783
static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4391
static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3784
{
4392
{
3785
	u64 total_count, diff, ret;
4393
	u64 total_count, diff, ret;
3786
	u32 count1, count2, count3, m = 0, c = 0;
4394
	u32 count1, count2, count3, m = 0, c = 0;
3787
    unsigned long now = jiffies_to_msecs(GetTimerTicks()), diff1;
4395
	unsigned long now = jiffies_to_msecs(jiffies), diff1;
Line 3788... Line 4396...
3788
	int i;
4396
	int i;
Line 3789... Line 4397...
3789
 
4397
 
Line 3834... Line 4442...
3834
	return ret;
4442
	return ret;
3835
}
4443
}
Line 3836... Line 4444...
3836
 
4444
 
3837
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4445
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-
 
4446
{
3838
{
4447
	struct drm_device *dev = dev_priv->dev;
Line 3839... Line 4448...
3839
	unsigned long val;
4448
	unsigned long val;
3840
 
4449
 
Line 3841... Line 4450...
3841
	if (dev_priv->info->gen != 5)
4450
	if (INTEL_INFO(dev)->gen != 5)
Line 3842... Line 4451...
3842
		return 0;
4451
		return 0;
Line 3865... Line 4474...
3865
	return ((m * x) / 127) - b;
4474
	return ((m * x) / 127) - b;
3866
}
4475
}
Line 3867... Line 4476...
3867
 
4476
 
3868
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4477
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-
 
4478
{
3869
{
4479
	struct drm_device *dev = dev_priv->dev;
3870
	static const struct v_table {
4480
	static const struct v_table {
3871
		u16 vd; /* in .1 mil */
4481
		u16 vd; /* in .1 mil */
3872
		u16 vm; /* in .1 mil */
4482
		u16 vm; /* in .1 mil */
3873
	} v_table[] = {
4483
	} v_table[] = {
Line 3998... Line 4608...
3998
		{ 15750, 14625, },
4608
		{ 15750, 14625, },
3999
		{ 15875, 14750, },
4609
		{ 15875, 14750, },
4000
		{ 16000, 14875, },
4610
		{ 16000, 14875, },
4001
		{ 16125, 15000, },
4611
		{ 16125, 15000, },
4002
	};
4612
	};
4003
	if (dev_priv->info->is_mobile)
4613
	if (INTEL_INFO(dev)->is_mobile)
4004
		return v_table[pxvid].vm;
4614
		return v_table[pxvid].vm;
4005
	else
4615
	else
4006
		return v_table[pxvid].vd;
4616
		return v_table[pxvid].vd;
4007
}
4617
}
Line 4008... Line 4618...
4008
 
4618
 
4009
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4619
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4010
{
-
 
4011
	struct timespec now, diff1;
-
 
4012
	u64 diff;
4620
{
4013
	unsigned long diffms;
4621
	u64 now, diff, diffms;
Line 4014... Line 4622...
4014
	u32 count;
4622
	u32 count;
Line 4015... Line 4623...
4015
 
4623
 
4016
	assert_spin_locked(&mchdev_lock);
4624
	assert_spin_locked(&mchdev_lock);
-
 
4625
 
Line 4017... Line 4626...
4017
 
4626
	now = ktime_get_raw_ns();
4018
	getrawmonotonic(&now);
-
 
4019
	diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4627
	diffms = now - dev_priv->ips.last_time2;
4020
 
4628
	do_div(diffms, NSEC_PER_MSEC);
Line 4021... Line 4629...
4021
	/* Don't divide by 0 */
4629
 
Line 4041... Line 4649...
4041
	dev_priv->ips.gfx_power = diff;
4649
	dev_priv->ips.gfx_power = diff;
4042
}
4650
}
Line 4043... Line 4651...
4043
 
4651
 
4044
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4652
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-
 
4653
{
-
 
4654
	struct drm_device *dev = dev_priv->dev;
4045
{
4655
 
4046
	if (dev_priv->info->gen != 5)
4656
	if (INTEL_INFO(dev)->gen != 5)
Line 4047... Line 4657...
4047
		return;
4657
		return;
Line 4048... Line 4658...
4048
 
4658
 
Line 4058... Line 4668...
4058
	unsigned long t, corr, state1, corr2, state2;
4668
	unsigned long t, corr, state1, corr2, state2;
4059
	u32 pxvid, ext_v;
4669
	u32 pxvid, ext_v;
Line 4060... Line 4670...
4060
 
4670
 
Line 4061... Line 4671...
4061
	assert_spin_locked(&mchdev_lock);
4671
	assert_spin_locked(&mchdev_lock);
4062
 
4672
 
4063
	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
4673
	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
Line 4064... Line 4674...
4064
	pxvid = (pxvid >> 24) & 0x7f;
4674
	pxvid = (pxvid >> 24) & 0x7f;
Line 4090... Line 4700...
4090
	return dev_priv->ips.gfx_power + state2;
4700
	return dev_priv->ips.gfx_power + state2;
4091
}
4701
}
Line 4092... Line 4702...
4092
 
4702
 
4093
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4703
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-
 
4704
{
4094
{
4705
	struct drm_device *dev = dev_priv->dev;
Line 4095... Line 4706...
4095
	unsigned long val;
4706
	unsigned long val;
4096
 
4707
 
Line 4097... Line 4708...
4097
	if (dev_priv->info->gen != 5)
4708
	if (INTEL_INFO(dev)->gen != 5)
Line 4098... Line 4709...
4098
		return 0;
4709
		return 0;
Line 4195... Line 4806...
4195
 * Tell the IPS driver whether or not the GPU is busy.
4806
 * Tell the IPS driver whether or not the GPU is busy.
4196
 */
4807
 */
4197
bool i915_gpu_busy(void)
4808
bool i915_gpu_busy(void)
4198
{
4809
{
4199
	struct drm_i915_private *dev_priv;
4810
	struct drm_i915_private *dev_priv;
4200
	struct intel_ring_buffer *ring;
4811
	struct intel_engine_cs *ring;
4201
	bool ret = false;
4812
	bool ret = false;
4202
	int i;
4813
	int i;
Line 4203... Line 4814...
4203
 
4814
 
4204
	spin_lock_irq(&mchdev_lock);
4815
	spin_lock_irq(&mchdev_lock);
Line 4281... Line 4892...
4281
{
4892
{
4282
	spin_lock_irq(&mchdev_lock);
4893
	spin_lock_irq(&mchdev_lock);
4283
	i915_mch_dev = NULL;
4894
	i915_mch_dev = NULL;
4284
	spin_unlock_irq(&mchdev_lock);
4895
	spin_unlock_irq(&mchdev_lock);
4285
}
4896
}
-
 
4897
 
4286
static void intel_init_emon(struct drm_device *dev)
4898
static void intel_init_emon(struct drm_device *dev)
4287
{
4899
{
4288
	struct drm_i915_private *dev_priv = dev->dev_private;
4900
	struct drm_i915_private *dev_priv = dev->dev_private;
4289
	u32 lcfuse;
4901
	u32 lcfuse;
4290
	u8 pxw[16];
4902
	u8 pxw[16];
Line 4352... Line 4964...
4352
	lcfuse = I915_READ(LCFUSE02);
4964
	lcfuse = I915_READ(LCFUSE02);
Line 4353... Line 4965...
4353
 
4965
 
4354
	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4966
	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
Line -... Line 4967...
-
 
4967
}
-
 
4968
 
-
 
4969
void intel_init_gt_powersave(struct drm_device *dev)
-
 
4970
{
-
 
4971
	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
-
 
4972
 
-
 
4973
	if (IS_CHERRYVIEW(dev))
-
 
4974
		cherryview_init_gt_powersave(dev);
-
 
4975
	else if (IS_VALLEYVIEW(dev))
-
 
4976
		valleyview_init_gt_powersave(dev);
-
 
4977
}
-
 
4978
 
-
 
4979
void intel_cleanup_gt_powersave(struct drm_device *dev)
-
 
4980
{
-
 
4981
	if (IS_CHERRYVIEW(dev))
-
 
4982
		return;
-
 
4983
	else if (IS_VALLEYVIEW(dev))
-
 
4984
		valleyview_cleanup_gt_powersave(dev);
-
 
4985
}
-
 
4986
 
-
 
4987
/**
-
 
4988
 * intel_suspend_gt_powersave - suspend PM work and helper threads
-
 
4989
 * @dev: drm device
-
 
4990
 *
-
 
4991
 * We don't want to disable RC6 or other features here, we just want
-
 
4992
 * to make sure any work we've queued has finished and won't bother
-
 
4993
 * us while we're suspended.
-
 
4994
 */
-
 
4995
void intel_suspend_gt_powersave(struct drm_device *dev)
-
 
4996
{
-
 
4997
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4998
 
-
 
4999
	/* Interrupts should be disabled already to avoid re-arming. */
-
 
5000
	WARN_ON(intel_irqs_enabled(dev_priv));
-
 
5001
 
-
 
5002
//	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 
5003
 
-
 
5004
	cancel_work_sync(&dev_priv->rps.work);
-
 
5005
 
-
 
5006
	/* Force GPU to min freq during suspend */
-
 
5007
	gen6_rps_idle(dev_priv);
4355
}
5008
}
4356
 
5009
 
4357
void intel_disable_gt_powersave(struct drm_device *dev)
5010
void intel_disable_gt_powersave(struct drm_device *dev)
Line 4358... Line 5011...
4358
{
5011
{
4359
	struct drm_i915_private *dev_priv = dev->dev_private;
5012
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4360... Line 5013...
4360
 
5013
 
4361
	/* Interrupts should be disabled already to avoid re-arming. */
5014
	/* Interrupts should be disabled already to avoid re-arming. */
4362
	WARN_ON(dev->irq_enabled);
5015
	WARN_ON(intel_irqs_enabled(dev_priv));
4363
 
5016
 
4364
	if (IS_IRONLAKE_M(dev)) {
-
 
4365
		ironlake_disable_drps(dev);
5017
	if (IS_IRONLAKE_M(dev)) {
-
 
5018
		ironlake_disable_drps(dev);
4366
		ironlake_disable_rc6(dev);
5019
		ironlake_disable_rc6(dev);
-
 
5020
	} else if (INTEL_INFO(dev)->gen >= 6) {
-
 
5021
		intel_suspend_gt_powersave(dev);
4367
	} else if (INTEL_INFO(dev)->gen >= 6) {
5022
 
4368
		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
5023
		mutex_lock(&dev_priv->rps.hw_lock);
4369
		cancel_work_sync(&dev_priv->rps.work);
5024
		if (IS_CHERRYVIEW(dev))
4370
		mutex_lock(&dev_priv->rps.hw_lock);
5025
			cherryview_disable_rps(dev);
4371
		if (IS_VALLEYVIEW(dev))
5026
		else if (IS_VALLEYVIEW(dev))
4372
			valleyview_disable_rps(dev);
5027
			valleyview_disable_rps(dev);
Line 4384... Line 5039...
4384
			     rps.delayed_resume_work.work);
5039
			     rps.delayed_resume_work.work);
4385
	struct drm_device *dev = dev_priv->dev;
5040
	struct drm_device *dev = dev_priv->dev;
Line 4386... Line 5041...
4386
 
5041
 
Line 4387... Line 5042...
4387
	mutex_lock(&dev_priv->rps.hw_lock);
5042
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
5043
 
-
 
5044
	if (IS_CHERRYVIEW(dev)) {
4388
 
5045
		cherryview_enable_rps(dev);
4389
	if (IS_VALLEYVIEW(dev)) {
5046
	} else if (IS_VALLEYVIEW(dev)) {
4390
		valleyview_enable_rps(dev);
5047
		valleyview_enable_rps(dev);
4391
	} else if (IS_BROADWELL(dev)) {
5048
	} else if (IS_BROADWELL(dev)) {
4392
		gen8_enable_rps(dev);
5049
		gen8_enable_rps(dev);
4393
		gen6_update_ring_freq(dev);
5050
		__gen6_update_ring_freq(dev);
4394
	} else {
5051
	} else {
4395
	gen6_enable_rps(dev);
5052
	gen6_enable_rps(dev);
4396
	gen6_update_ring_freq(dev);
5053
		__gen6_update_ring_freq(dev);
4397
	}
5054
	}
-
 
5055
	dev_priv->rps.enabled = true;
-
 
5056
	mutex_unlock(&dev_priv->rps.hw_lock);
4398
	dev_priv->rps.enabled = true;
5057
 
Line 4399... Line 5058...
4399
	mutex_unlock(&dev_priv->rps.hw_lock);
5058
	intel_runtime_pm_put(dev_priv);
4400
}
5059
}
4401
 
5060
 
Line 4402... Line 5061...
4402
void intel_enable_gt_powersave(struct drm_device *dev)
5061
void intel_enable_gt_powersave(struct drm_device *dev)
-
 
5062
{
4403
{
5063
	struct drm_i915_private *dev_priv = dev->dev_private;
4404
	struct drm_i915_private *dev_priv = dev->dev_private;
5064
 
4405
 
5065
	if (IS_IRONLAKE_M(dev)) {
-
 
5066
		mutex_lock(&dev->struct_mutex);
4406
	if (IS_IRONLAKE_M(dev)) {
5067
		ironlake_enable_drps(dev);
4407
		ironlake_enable_drps(dev);
5068
		ironlake_enable_rc6(dev);
4408
		ironlake_enable_rc6(dev);
5069
		intel_init_emon(dev);
4409
		intel_init_emon(dev);
5070
		mutex_unlock(&dev->struct_mutex);
4410
	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
5071
	} else if (INTEL_INFO(dev)->gen >= 6) {
-
 
5072
		/*
-
 
5073
		 * PCU communication is slow and this doesn't need to be
-
 
5074
		 * done at any specific time, so do this out of our fast path
-
 
5075
		 * to make resume and init faster.
-
 
5076
		 *
-
 
5077
		 * We depend on the HW RC6 power context save/restore
-
 
5078
		 * mechanism when entering D3 through runtime PM suspend. So
4411
		/*
5079
		 * disable RPM until RPS/RC6 is properly setup. We can only
4412
		 * PCU communication is slow and this doesn't need to be
5080
		 * get here via the driver load/system resume/runtime resume
4413
		 * done at any specific time, so do this out of our fast path
5081
		 * paths, so the _noresume version is enough (and in case of
-
 
5082
		 * runtime resume it's necessary).
4414
		 * to make resume and init faster.
5083
		 */
4415
		 */
5084
		if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
Line -... Line 5085...
-
 
5085
					   round_jiffies_up_relative(HZ)))
-
 
5086
			intel_runtime_pm_get_noresume(dev_priv);
-
 
5087
	}
-
 
5088
}
-
 
5089
 
-
 
5090
void intel_reset_gt_powersave(struct drm_device *dev)
-
 
5091
{
-
 
5092
	struct drm_i915_private *dev_priv = dev->dev_private;
4416
		schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5093
 
4417
				      round_jiffies_up_relative(HZ));
5094
	dev_priv->rps.enabled = false;
4418
	}
5095
	intel_enable_gt_powersave(dev);
Line 4419... Line 5096...
4419
}
5096
}
Line 4521... Line 5198...
4521
 
5198
 
4522
	/* WaDisableRenderCachePipelinedFlush:ilk */
5199
	/* WaDisableRenderCachePipelinedFlush:ilk */
4523
	I915_WRITE(CACHE_MODE_0,
5200
	I915_WRITE(CACHE_MODE_0,
Line -... Line 5201...
-
 
5201
		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
 
5202
 
-
 
5203
	/* WaDisable_RenderCache_OperationalFlush:ilk */
4524
		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5204
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
Line 4525... Line 5205...
4525
 
5205
 
4526
	g4x_disable_trickle_feed(dev);
5206
	g4x_disable_trickle_feed(dev);
Line 4569... Line 5249...
4569
{
5249
{
4570
	struct drm_i915_private *dev_priv = dev->dev_private;
5250
	struct drm_i915_private *dev_priv = dev->dev_private;
4571
	uint32_t tmp;
5251
	uint32_t tmp;
Line 4572... Line 5252...
4572
 
5252
 
4573
	tmp = I915_READ(MCH_SSKPD);
5253
	tmp = I915_READ(MCH_SSKPD);
4574
	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
5254
	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
4575
		DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
-
 
4576
		DRM_INFO("This can cause pipe underruns and display issues.\n");
5255
		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
4577
		DRM_INFO("Please upgrade your BIOS to fix this.\n");
-
 
4578
	}
5256
			      tmp);
Line 4579... Line 5257...
4579
}
5257
}
4580
 
5258
 
4581
static void gen6_init_clock_gating(struct drm_device *dev)
5259
static void gen6_init_clock_gating(struct drm_device *dev)
Line 4596... Line 5274...
4596
	/* WaSetupGtModeTdRowDispatch:snb */
5274
	/* WaSetupGtModeTdRowDispatch:snb */
4597
	if (IS_SNB_GT1(dev))
5275
	if (IS_SNB_GT1(dev))
4598
		I915_WRITE(GEN6_GT_MODE,
5276
		I915_WRITE(GEN6_GT_MODE,
4599
			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5277
			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
Line -... Line 5278...
-
 
5278
 
-
 
5279
	/* WaDisable_RenderCache_OperationalFlush:snb */
-
 
5280
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
 
5281
 
-
 
5282
	/*
-
 
5283
	 * BSpec recoomends 8x4 when MSAA is used,
-
 
5284
	 * however in practice 16x4 seems fastest.
-
 
5285
	 *
-
 
5286
	 * Note that PS/WM thread counts depend on the WIZ hashing
-
 
5287
	 * disable bit, which we don't touch here, but it's good
-
 
5288
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-
 
5289
	 */
-
 
5290
	I915_WRITE(GEN6_GT_MODE,
-
 
5291
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4600
 
5292
 
Line 4601... Line 5293...
4601
	ilk_init_lp_watermarks(dev);
5293
	ilk_init_lp_watermarks(dev);
4602
 
5294
 
Line 4616... Line 5308...
4616
	 * alpha test or pixel discard.
5308
	 * alpha test or pixel discard.
4617
	 *
5309
	 *
4618
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
5310
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
4619
	 * but we didn't debug actual testcases to find it out.
5311
	 * but we didn't debug actual testcases to find it out.
4620
	 *
5312
	 *
4621
	 * Also apply WaDisableVDSUnitClockGating:snb and
5313
	 * WaDisableRCCUnitClockGating:snb
4622
	 * WaDisableRCPBUnitClockGating:snb.
5314
	 * WaDisableRCPBUnitClockGating:snb
4623
	 */
5315
	 */
4624
	I915_WRITE(GEN6_UCGCTL2,
5316
	I915_WRITE(GEN6_UCGCTL2,
4625
		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
-
 
4626
		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5317
		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4627
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5318
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
Line 4628... Line 5319...
4628
 
5319
 
4629
	/* Bspec says we need to always set all mask bits. */
5320
	/* WaStripsFansDisableFastClipPerformanceFix:snb */
4630
	I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
5321
	I915_WRITE(_3D_CHICKEN3,
-
 
5322
		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
-
 
5323
 
-
 
5324
	/*
-
 
5325
	 * Bspec says:
-
 
5326
	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
-
 
5327
	 * 3DSTATE_SF number of SF output attributes is more than 16."
-
 
5328
	 */
-
 
5329
	I915_WRITE(_3D_CHICKEN3,
Line 4631... Line 5330...
4631
		   _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
5330
		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
4632
 
5331
 
4633
	/*
5332
	/*
4634
	 * According to the spec the following bits should be
5333
	 * According to the spec the following bits should be
Line 4652... Line 5351...
4652
		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
5351
		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
4653
		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5352
		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
Line 4654... Line 5353...
4654
 
5353
 
Line 4655... Line -...
4655
	g4x_disable_trickle_feed(dev);
-
 
4656
 
-
 
4657
	/* The default value should be 0x200 according to docs, but the two
-
 
4658
	 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
-
 
4659
	I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
-
 
4660
	I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
5354
	g4x_disable_trickle_feed(dev);
Line 4661... Line 5355...
4661
 
5355
 
4662
	cpt_init_clock_gating(dev);
5356
	cpt_init_clock_gating(dev);
Line 4663... Line 5357...
4663
 
5357
 
4664
	gen6_check_mch_setup(dev);
5358
	gen6_check_mch_setup(dev);
4665
}
5359
}
Line -... Line 5360...
-
 
5360
 
-
 
5361
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
-
 
5362
{
-
 
5363
	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
-
 
5364
 
-
 
5365
	/*
4666
 
5366
	 * WaVSThreadDispatchOverride:ivb,vlv
4667
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5367
	 *
4668
{
5368
	 * This actually overrides the dispatch
4669
	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5369
	 * mode for all thread types.
Line 4670... Line -...
4670
 
-
 
4671
	reg &= ~GEN7_FF_SCHED_MASK;
-
 
4672
	reg |= GEN7_FF_TS_SCHED_HW;
-
 
4673
	reg |= GEN7_FF_VS_SCHED_HW;
5370
	 */
4674
	reg |= GEN7_FF_DS_SCHED_HW;
5371
	reg &= ~GEN7_FF_SCHED_MASK;
Line 4675... Line 5372...
4675
 
5372
	reg |= GEN7_FF_TS_SCHED_HW;
4676
	if (IS_HASWELL(dev_priv->dev))
5373
	reg |= GEN7_FF_VS_SCHED_HW;
Line 4711... Line 5408...
4711
}
5408
}
Line 4712... Line 5409...
4712
 
5409
 
4713
static void gen8_init_clock_gating(struct drm_device *dev)
5410
static void gen8_init_clock_gating(struct drm_device *dev)
4714
{
5411
{
4715
	struct drm_i915_private *dev_priv = dev->dev_private;
5412
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4716... Line 5413...
4716
	enum pipe i;
5413
	enum pipe pipe;
4717
 
5414
 
4718
	I915_WRITE(WM3_LP_ILK, 0);
5415
	I915_WRITE(WM3_LP_ILK, 0);
Line 4719... Line 5416...
4719
	I915_WRITE(WM2_LP_ILK, 0);
5416
	I915_WRITE(WM2_LP_ILK, 0);
4720
	I915_WRITE(WM1_LP_ILK, 0);
5417
	I915_WRITE(WM1_LP_ILK, 0);
Line -... Line 5418...
-
 
5418
 
-
 
5419
	/* FIXME(BDW): Check all the w/a, some might only apply to
-
 
5420
	 * pre-production hw. */
-
 
5421
 
-
 
5422
	/* WaDisablePartialInstShootdown:bdw */
-
 
5423
	I915_WRITE(GEN8_ROW_CHICKEN,
4721
 
5424
		   _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
-
 
5425
 
-
 
5426
	/* WaDisableThreadStallDopClockGating:bdw */
-
 
5427
	/* FIXME: Unclear whether we really need this on production bdw. */
4722
	/* FIXME(BDW): Check all the w/a, some might only apply to
5428
	I915_WRITE(GEN8_ROW_CHICKEN,
-
 
5429
		   _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
-
 
5430
 
4723
	 * pre-production hw. */
5431
	/*
4724
 
5432
	 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4725
	WARN(!i915_preliminary_hw_support,
5433
	 * pre-production hardware
4726
	     "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
5434
	 */
4727
	I915_WRITE(HALF_SLICE_CHICKEN3,
5435
	I915_WRITE(HALF_SLICE_CHICKEN3,
Line 4728... Line 5436...
4728
		   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5436
		   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
4729
	I915_WRITE(HALF_SLICE_CHICKEN3,
5437
	I915_WRITE(HALF_SLICE_CHICKEN3,
Line 4730... Line 5438...
4730
		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5438
		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
4731
	I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5439
	I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
Line 4732... Line 5440...
4732
 
5440
 
4733
	I915_WRITE(_3D_CHICKEN3,
5441
	I915_WRITE(_3D_CHICKEN3,
Line -... Line 5442...
-
 
5442
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
-
 
5443
 
-
 
5444
	I915_WRITE(COMMON_SLICE_CHICKEN2,
-
 
5445
		   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
4734
		   _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
5446
 
4735
 
5447
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
Line 4736... Line 5448...
4736
	I915_WRITE(COMMON_SLICE_CHICKEN2,
5448
		   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
4737
		   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5449
 
4738
 
5450
	/* WaDisableDopClockGating:bdw May not be needed for production */
Line 4739... Line 5451...
4739
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5451
	I915_WRITE(GEN7_ROW_CHICKEN2,
4740
		   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5452
		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4741
 
5453
 
4742
	/* WaSwitchSolVfFArbitrationPriority:bdw */
5454
	/* WaSwitchSolVfFArbitrationPriority:bdw */
4743
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5455
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4744
 
5456
 
Line 4745... Line 5457...
4745
	/* WaPsrDPAMaskVBlankInSRD:bdw */
5457
	/* WaPsrDPAMaskVBlankInSRD:bdw */
4746
	I915_WRITE(CHICKEN_PAR1_1,
5458
	I915_WRITE(CHICKEN_PAR1_1,
4747
		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5459
		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
Line 4764... Line 5476...
4764
	/* WaVSRefCountFullforceMissDisable:bdw */
5476
	/* WaVSRefCountFullforceMissDisable:bdw */
4765
	/* WaDSRefCountFullforceMissDisable:bdw */
5477
	/* WaDSRefCountFullforceMissDisable:bdw */
4766
	I915_WRITE(GEN7_FF_THREAD_MODE,
5478
	I915_WRITE(GEN7_FF_THREAD_MODE,
4767
		   I915_READ(GEN7_FF_THREAD_MODE) &
5479
		   I915_READ(GEN7_FF_THREAD_MODE) &
4768
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5480
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
 
5481
 
-
 
5482
	/*
-
 
5483
	 * BSpec recommends 8x4 when MSAA is used,
-
 
5484
	 * however in practice 16x4 seems fastest.
-
 
5485
	 *
-
 
5486
	 * Note that PS/WM thread counts depend on the WIZ hashing
-
 
5487
	 * disable bit, which we don't touch here, but it's good
-
 
5488
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-
 
5489
	 */
-
 
5490
	I915_WRITE(GEN7_GT_MODE,
-
 
5491
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
-
 
5492
 
-
 
5493
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-
 
5494
		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
 
5495
 
-
 
5496
	/* WaDisableSDEUnitClockGating:bdw */
-
 
5497
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
-
 
5498
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
 
5499
 
-
 
5500
	/* Wa4x4STCOptimizationDisable:bdw */
-
 
5501
	I915_WRITE(CACHE_MODE_1,
-
 
5502
		   _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
4769
}
5503
}
Line 4770... Line 5504...
4770
 
5504
 
4771
static void haswell_init_clock_gating(struct drm_device *dev)
5505
static void haswell_init_clock_gating(struct drm_device *dev)
4772
{
5506
{
Line 4773... Line 5507...
4773
	struct drm_i915_private *dev_priv = dev->dev_private;
5507
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4774... Line -...
4774
 
-
 
4775
	ilk_init_lp_watermarks(dev);
-
 
4776
 
-
 
4777
	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-
 
4778
	 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
-
 
4779
	 */
-
 
4780
	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
 
4781
 
-
 
4782
	/* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
-
 
4783
	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-
 
4784
		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
 
4785
 
-
 
4786
	/* WaApplyL3ControlAndL3ChickenMode:hsw */
-
 
4787
	I915_WRITE(GEN7_L3CNTLREG1,
-
 
4788
			GEN7_WA_FOR_GEN7_L3_CONTROL);
-
 
4789
	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5508
 
4790
			GEN7_WA_L3_CHICKEN_MODE);
5509
	ilk_init_lp_watermarks(dev);
4791
 
5510
 
4792
	/* L3 caching of data atomics doesn't work -- disable it. */
5511
	/* L3 caching of data atomics doesn't work -- disable it. */
Line 4798... Line 5517...
4798
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5517
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4799
			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5518
			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4800
			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5519
			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
Line 4801... Line 5520...
4801
 
5520
 
-
 
5521
	/* WaVSRefCountFullforceMissDisable:hsw */
-
 
5522
	I915_WRITE(GEN7_FF_THREAD_MODE,
-
 
5523
		   I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4802
	/* WaVSRefCountFullforceMissDisable:hsw */
5524
 
-
 
5525
	/* WaDisable_RenderCache_OperationalFlush:hsw */
-
 
5526
	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
 
5527
 
-
 
5528
	/* enable HiZ Raw Stall Optimization */
-
 
5529
	I915_WRITE(CACHE_MODE_0_GEN7,
Line 4803... Line 5530...
4803
	gen7_setup_fixed_func_scheduler(dev_priv);
5530
		   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
4804
 
5531
 
4805
	/* WaDisable4x2SubspanOptimization:hsw */
5532
	/* WaDisable4x2SubspanOptimization:hsw */
Line -... Line 5533...
-
 
5533
	I915_WRITE(CACHE_MODE_1,
-
 
5534
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
 
5535
 
-
 
5536
	/*
-
 
5537
	 * BSpec recommends 8x4 when MSAA is used,
-
 
5538
	 * however in practice 16x4 seems fastest.
-
 
5539
	 *
-
 
5540
	 * Note that PS/WM thread counts depend on the WIZ hashing
-
 
5541
	 * disable bit, which we don't touch here, but it's good
-
 
5542
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-
 
5543
	 */
4806
	I915_WRITE(CACHE_MODE_1,
5544
	I915_WRITE(GEN7_GT_MODE,
4807
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5545
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
Line 4808... Line 5546...
4808
 
5546
 
4809
	/* WaSwitchSolVfFArbitrationPriority:hsw */
5547
	/* WaSwitchSolVfFArbitrationPriority:hsw */
Line 4836... Line 5574...
4836
 
5574
 
4837
	/* WaDisablePSDDualDispatchEnable:ivb */
5575
	/* WaDisablePSDDualDispatchEnable:ivb */
4838
	if (IS_IVB_GT1(dev))
5576
	if (IS_IVB_GT1(dev))
4839
		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5577
		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4840
			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5578
			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4841
	else
5579
 
4842
		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
5580
	/* WaDisable_RenderCache_OperationalFlush:ivb */
Line 4843... Line 5581...
4843
			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5581
	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4844
 
5582
 
4845
	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5583
	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
Line 4852... Line 5590...
4852
	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5590
	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4853
			GEN7_WA_L3_CHICKEN_MODE);
5591
			GEN7_WA_L3_CHICKEN_MODE);
4854
	if (IS_IVB_GT1(dev))
5592
	if (IS_IVB_GT1(dev))
4855
		I915_WRITE(GEN7_ROW_CHICKEN2,
5593
		I915_WRITE(GEN7_ROW_CHICKEN2,
4856
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5594
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4857
	else
5595
	else {
-
 
5596
		/* must write both registers */
-
 
5597
		I915_WRITE(GEN7_ROW_CHICKEN2,
-
 
5598
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4858
		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5599
		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
4859
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5600
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4860
 
5601
	}
Line 4861... Line 5602...
4861
 
5602
 
4862
	/* WaForceL3Serialization:ivb */
5603
	/* WaForceL3Serialization:ivb */
4863
	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5604
	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
Line 4864... Line -...
4864
		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
 
4865
 
-
 
4866
	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-
 
4867
	 * gating disable must be set.  Failure to set it results in
-
 
4868
	 * flickering pixels due to Z write ordering failures after
-
 
4869
	 * some amount of runtime in the Mesa "fire" demo, and Unigine
-
 
4870
	 * Sanctuary and Tropics, and apparently anything else with
5605
		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4871
	 * alpha test or pixel discard.
-
 
4872
	 *
-
 
4873
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
-
 
4874
	 * but we didn't debug actual testcases to find it out.
5606
 
4875
	 *
5607
	/*
4876
	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5608
	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4877
	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5609
	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
4878
	 */
-
 
4879
	I915_WRITE(GEN6_UCGCTL2,
5610
	 */
Line 4880... Line 5611...
4880
		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5611
	I915_WRITE(GEN6_UCGCTL2,
4881
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5612
		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4882
 
5613
 
4883
	/* This is required by WaCatErrorRejectionIssue:ivb */
5614
	/* This is required by WaCatErrorRejectionIssue:ivb */
Line 4884... Line 5615...
4884
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5615
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
Line 4885... Line -...
4885
			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-
 
4886
			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5616
			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
Line -... Line 5617...
-
 
5617
			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
 
5618
 
-
 
5619
	g4x_disable_trickle_feed(dev);
-
 
5620
 
-
 
5621
	gen7_setup_fixed_func_scheduler(dev_priv);
-
 
5622
 
4887
 
5623
	if (0) { /* causes HiZ corruption on ivb:gt1 */
4888
	g4x_disable_trickle_feed(dev);
5624
		/* enable HiZ Raw Stall Optimization */
4889
 
5625
		I915_WRITE(CACHE_MODE_0_GEN7,
Line -... Line 5626...
-
 
5626
			   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
-
 
5627
	}
-
 
5628
 
-
 
5629
	/* WaDisable4x2SubspanOptimization:ivb */
-
 
5630
	I915_WRITE(CACHE_MODE_1,
-
 
5631
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
 
5632
 
-
 
5633
	/*
-
 
5634
	 * BSpec recommends 8x4 when MSAA is used,
-
 
5635
	 * however in practice 16x4 seems fastest.
-
 
5636
	 *
4890
	/* WaVSRefCountFullforceMissDisable:ivb */
5637
	 * Note that PS/WM thread counts depend on the WIZ hashing
4891
	gen7_setup_fixed_func_scheduler(dev_priv);
5638
	 * disable bit, which we don't touch here, but it's good
4892
 
5639
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4893
	/* WaDisable4x2SubspanOptimization:ivb */
5640
	 */
Line 4913... Line 5660...
4913
	mutex_lock(&dev_priv->rps.hw_lock);
5660
	mutex_lock(&dev_priv->rps.hw_lock);
4914
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5661
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4915
	mutex_unlock(&dev_priv->rps.hw_lock);
5662
	mutex_unlock(&dev_priv->rps.hw_lock);
4916
	switch ((val >> 6) & 3) {
5663
	switch ((val >> 6) & 3) {
4917
	case 0:
5664
	case 0:
4918
		dev_priv->mem_freq = 800;
-
 
4919
		break;
-
 
4920
	case 1:
5665
	case 1:
4921
		dev_priv->mem_freq = 1066;
5666
		dev_priv->mem_freq = 800;
4922
		break;
5667
		break;
4923
	case 2:
5668
	case 2:
4924
		dev_priv->mem_freq = 1333;
5669
		dev_priv->mem_freq = 1066;
4925
		break;
5670
		break;
4926
	case 3:
5671
	case 3:
4927
		dev_priv->mem_freq = 1333;
5672
		dev_priv->mem_freq = 1333;
4928
		break;
5673
		break;
4929
	}
5674
	}
Line 4938... Line 5683...
4938
	/* WaDisableBackToBackFlipFix:vlv */
5683
	/* WaDisableBackToBackFlipFix:vlv */
4939
	I915_WRITE(IVB_CHICKEN3,
5684
	I915_WRITE(IVB_CHICKEN3,
4940
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5685
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4941
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
5686
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
Line -... Line 5687...
-
 
5687
 
4942
 
5688
	/* WaPsdDispatchEnable:vlv */
4943
	/* WaDisablePSDDualDispatchEnable:vlv */
5689
	/* WaDisablePSDDualDispatchEnable:vlv */
4944
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5690
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4945
		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5691
		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
Line 4946... Line -...
4946
				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
 
4947
 
-
 
4948
	/* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
-
 
4949
	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-
 
4950
		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5692
				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4951
 
-
 
4952
	/* WaApplyL3ControlAndL3ChickenMode:vlv */
5693
 
Line 4953... Line 5694...
4953
	I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
5694
	/* WaDisable_RenderCache_OperationalFlush:vlv */
4954
	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
5695
	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
4955
 
5696
 
Line 4964... Line 5705...
4964
	/* This is required by WaCatErrorRejectionIssue:vlv */
5705
	/* This is required by WaCatErrorRejectionIssue:vlv */
4965
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5706
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4966
		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5707
		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4967
		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5708
		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
Line 4968... Line -...
4968
 
-
 
4969
	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-
 
4970
	 * gating disable must be set.  Failure to set it results in
-
 
4971
	 * flickering pixels due to Z write ordering failures after
-
 
4972
	 * some amount of runtime in the Mesa "fire" demo, and Unigine
-
 
4973
	 * Sanctuary and Tropics, and apparently anything else with
5709
 
4974
	 * alpha test or pixel discard.
5710
	gen7_setup_fixed_func_scheduler(dev_priv);
4975
	 *
-
 
4976
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
-
 
4977
	 * but we didn't debug actual testcases to find it out.
5711
 
4978
	 *
5712
	/*
4979
	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5713
	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4980
	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
-
 
4981
	 *
-
 
4982
	 * Also apply WaDisableVDSUnitClockGating:vlv and
-
 
4983
	 * WaDisableRCPBUnitClockGating:vlv.
5714
	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
4984
	 */
5715
	 */
4985
	I915_WRITE(GEN6_UCGCTL2,
-
 
4986
		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
-
 
4987
		   GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
-
 
4988
		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
-
 
4989
		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5716
	I915_WRITE(GEN6_UCGCTL2,
Line -... Line 5717...
-
 
5717
		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
 
5718
 
-
 
5719
	/* WaDisableL3Bank2xClockGate:vlv
-
 
5720
	 * Disabling L3 clock gating- MMIO 940c[25] = 1
4990
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5721
	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
Line 4991... Line 5722...
4991
 
5722
	I915_WRITE(GEN7_UCGCTL4,
Line -... Line 5723...
-
 
5723
		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
-
 
5724
 
-
 
5725
	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
 
5726
 
4992
	I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5727
	/*
4993
 
5728
	 * BSpec says this must be set, even though
Line 4994... Line 5729...
4994
	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5729
	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
-
 
5730
	 */
-
 
5731
	I915_WRITE(CACHE_MODE_1,
-
 
5732
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
 
5733
 
-
 
5734
	/*
-
 
5735
	 * WaIncreaseL3CreditsForVLVB0:vlv
4995
 
5736
	 * This is the hardware default actually.
4996
	I915_WRITE(CACHE_MODE_1,
5737
	 */
4997
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5738
	I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
4998
 
5739
 
4999
	/*
5740
	/*
-
 
5741
	 * WaDisableVLVClockGating_VBIIssue:vlv
-
 
5742
	 * Disable clock gating on th GCFG unit to prevent a delay
-
 
5743
	 * in the reporting of vblank events.
-
 
5744
	 */
-
 
5745
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
-
 
5746
}
-
 
5747
 
-
 
5748
static void cherryview_init_clock_gating(struct drm_device *dev)
-
 
5749
{
-
 
5750
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5751
	u32 val;
-
 
5752
 
-
 
5753
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
5754
	val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
-
 
5755
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
5756
	switch ((val >> 2) & 0x7) {
-
 
5757
	case 0:
-
 
5758
	case 1:
-
 
5759
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
-
 
5760
			dev_priv->mem_freq = 1600;
-
 
5761
			break;
-
 
5762
	case 2:
-
 
5763
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
-
 
5764
			dev_priv->mem_freq = 1600;
-
 
5765
			break;
-
 
5766
	case 3:
-
 
5767
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
-
 
5768
			dev_priv->mem_freq = 2000;
-
 
5769
			break;
-
 
5770
	case 4:
-
 
5771
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
-
 
5772
			dev_priv->mem_freq = 1600;
-
 
5773
			break;
-
 
5774
	case 5:
-
 
5775
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
-
 
5776
			dev_priv->mem_freq = 1600;
-
 
5777
			break;
-
 
5778
	}
-
 
5779
	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
-
 
5780
 
-
 
5781
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
 
5782
 
-
 
5783
	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
 
5784
 
-
 
5785
	/* WaDisablePartialInstShootdown:chv */
-
 
5786
	I915_WRITE(GEN8_ROW_CHICKEN,
-
 
5787
		   _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
-
 
5788
 
-
 
5789
	/* WaDisableThreadStallDopClockGating:chv */
-
 
5790
	I915_WRITE(GEN8_ROW_CHICKEN,
-
 
5791
		   _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
-
 
5792
 
-
 
5793
	/* WaVSRefCountFullforceMissDisable:chv */
-
 
5794
	/* WaDSRefCountFullforceMissDisable:chv */
-
 
5795
	I915_WRITE(GEN7_FF_THREAD_MODE,
-
 
5796
		   I915_READ(GEN7_FF_THREAD_MODE) &
-
 
5797
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
 
5798
 
-
 
5799
	/* WaDisableSemaphoreAndSyncFlipWait:chv */
-
 
5800
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-
 
5801
		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
 
5802
 
-
 
5803
	/* WaDisableCSUnitClockGating:chv */
-
 
5804
	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
Line 5000... Line 5805...
5000
	 * WaDisableVLVClockGating_VBIIssue:vlv
5805
		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5001
	 * Disable clock gating on th GCFG unit to prevent a delay
5806
 
-
 
5807
	/* WaDisableSDEUnitClockGating:chv */
-
 
5808
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
-
 
5809
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5002
	 * in the reporting of vblank events.
5810
 
-
 
5811
	/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
-
 
5812
	I915_WRITE(HALF_SLICE_CHICKEN3,
-
 
5813
		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5003
	 */
5814
 
-
 
5815
	/* WaDisableGunitClockGating:chv (pre-production hw) */
-
 
5816
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
-
 
5817
		   GINT_DIS);
5004
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
5818
 
5005
 
5819
	/* WaDisableFfDopClockGating:chv (pre-production hw) */
5006
	/* Conservative clock gating settings for now */
5820
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5007
	I915_WRITE(0x9400, 0xffffffff);
5821
		   _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5008
	I915_WRITE(0x9404, 0xffffffff);
5822
 
Line 5009... Line 5823...
5009
	I915_WRITE(0x9408, 0xffffffff);
5823
	/* WaDisableDopClockGating:chv (pre-production hw) */
5010
	I915_WRITE(0x940c, 0xffffffff);
5824
	I915_WRITE(GEN7_ROW_CHICKEN2,
5011
	I915_WRITE(0x9410, 0xffffffff);
5825
		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Line 5032... Line 5846...
5032
 
5846
 
5033
	/* WaDisableRenderCachePipelinedFlush */
5847
	/* WaDisableRenderCachePipelinedFlush */
5034
	I915_WRITE(CACHE_MODE_0,
5848
	I915_WRITE(CACHE_MODE_0,
Line -... Line 5849...
-
 
5849
		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
 
5850
 
-
 
5851
	/* WaDisable_RenderCache_OperationalFlush:g4x */
5035
		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5852
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5036
 
5853
 
Line 5037... Line 5854...
5037
	g4x_disable_trickle_feed(dev);
5854
	g4x_disable_trickle_feed(dev);
5038
}
5855
}
Line 5046... Line 5863...
5046
	I915_WRITE(DSPCLK_GATE_D, 0);
5863
	I915_WRITE(DSPCLK_GATE_D, 0);
5047
	I915_WRITE(RAMCLK_GATE_D, 0);
5864
	I915_WRITE(RAMCLK_GATE_D, 0);
5048
	I915_WRITE16(DEUC, 0);
5865
	I915_WRITE16(DEUC, 0);
5049
	I915_WRITE(MI_ARB_STATE,
5866
	I915_WRITE(MI_ARB_STATE,
5050
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5867
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-
 
5868
 
-
 
5869
	/* WaDisable_RenderCache_OperationalFlush:gen4 */
-
 
5870
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5051
}
5871
}
Line 5052... Line 5872...
5052
 
5872
 
5053
static void broadwater_init_clock_gating(struct drm_device *dev)
5873
static void broadwater_init_clock_gating(struct drm_device *dev)
5054
{
5874
{
Line 5060... Line 5880...
5060
		   I965_ISC_CLOCK_GATE_DISABLE |
5880
		   I965_ISC_CLOCK_GATE_DISABLE |
5061
		   I965_FBC_CLOCK_GATE_DISABLE);
5881
		   I965_FBC_CLOCK_GATE_DISABLE);
5062
	I915_WRITE(RENCLK_GATE_D2, 0);
5882
	I915_WRITE(RENCLK_GATE_D2, 0);
5063
	I915_WRITE(MI_ARB_STATE,
5883
	I915_WRITE(MI_ARB_STATE,
5064
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5884
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-
 
5885
 
-
 
5886
	/* WaDisable_RenderCache_OperationalFlush:gen4 */
-
 
5887
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5065
}
5888
}
Line 5066... Line 5889...
5066
 
5889
 
5067
static void gen3_init_clock_gating(struct drm_device *dev)
5890
static void gen3_init_clock_gating(struct drm_device *dev)
5068
{
5891
{
Line 5076... Line 5899...
5076
	if (IS_PINEVIEW(dev))
5899
	if (IS_PINEVIEW(dev))
5077
		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5900
		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
Line 5078... Line 5901...
5078
 
5901
 
5079
	/* IIR "flip pending" means done if this bit is set */
5902
	/* IIR "flip pending" means done if this bit is set */
-
 
5903
	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
 
5904
 
-
 
5905
	/* interrupts should cause a wake up from C3 */
-
 
5906
	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
-
 
5907
 
-
 
5908
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5080
	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5909
	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Line 5081... Line 5910...
5081
}
5910
}
5082
 
5911
 
5083
static void i85x_init_clock_gating(struct drm_device *dev)
5912
static void i85x_init_clock_gating(struct drm_device *dev)
Line 5084... Line 5913...
5084
{
5913
{
-
 
5914
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5915
 
-
 
5916
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-
 
5917
 
5085
	struct drm_i915_private *dev_priv = dev->dev_private;
5918
	/* interrupts should cause a wake up from C3 */
Line 5086... Line 5919...
5086
 
5919
	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5087
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5920
		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
5088
}
5921
}
Line 5123... Line 5956...
5123
/**
5956
/**
5124
 * We should only use the power well if we explicitly asked the hardware to
5957
 * We should only use the power well if we explicitly asked the hardware to
5125
 * enable it, so check if it's enabled and also check if we've requested it to
5958
 * enable it, so check if it's enabled and also check if we've requested it to
5126
 * be enabled.
5959
 * be enabled.
5127
 */
5960
 */
5128
static bool hsw_power_well_enabled(struct drm_device *dev,
5961
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5129
				   struct i915_power_well *power_well)
5962
				   struct i915_power_well *power_well)
5130
{
5963
{
5131
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5132
 
-
 
5133
	return I915_READ(HSW_PWR_WELL_DRIVER) ==
5964
	return I915_READ(HSW_PWR_WELL_DRIVER) ==
5134
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5965
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5135
}
5966
}
Line 5136... Line 5967...
5136
 
5967
 
5137
bool intel_display_power_enabled_sw(struct drm_device *dev,
-
 
5138
				    enum intel_display_power_domain domain)
-
 
5139
{
-
 
5140
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5141
	struct i915_power_domains *power_domains;
-
 
5142
 
-
 
5143
	power_domains = &dev_priv->power_domains;
-
 
5144
 
-
 
5145
	return power_domains->domain_use_count[domain];
-
 
5146
}
-
 
5147
 
-
 
5148
bool intel_display_power_enabled(struct drm_device *dev,
5968
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
5149
				 enum intel_display_power_domain domain)
5969
				 enum intel_display_power_domain domain)
5150
{
-
 
5151
	struct drm_i915_private *dev_priv = dev->dev_private;
5970
{
5152
	struct i915_power_domains *power_domains;
5971
	struct i915_power_domains *power_domains;
5153
	struct i915_power_well *power_well;
5972
	struct i915_power_well *power_well;
5154
	bool is_enabled;
5973
	bool is_enabled;
Line -... Line 5974...
-
 
5974
	int i;
-
 
5975
 
-
 
5976
	if (dev_priv->pm.suspended)
5155
	int i;
5977
		return false;
Line 5156... Line 5978...
5156
 
5978
 
Line 5157... Line -...
5157
	power_domains = &dev_priv->power_domains;
-
 
5158
 
5979
	power_domains = &dev_priv->power_domains;
5159
	is_enabled = true;
5980
 
5160
 
5981
	is_enabled = true;
Line 5161... Line 5982...
5161
	mutex_lock(&power_domains->lock);
5982
 
5162
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5983
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5163
		if (power_well->always_on)
5984
		if (power_well->always_on)
5164
			continue;
5985
			continue;
5165
 
5986
 
5166
		if (!power_well->is_enabled(dev, power_well)) {
-
 
Line 5167... Line 5987...
5167
			is_enabled = false;
5987
		if (!power_well->hw_enabled) {
5168
			break;
5988
			is_enabled = false;
Line -... Line 5989...
-
 
5989
			break;
-
 
5990
		}
-
 
5991
	}
-
 
5992
 
-
 
5993
	return is_enabled;
-
 
5994
}
-
 
5995
 
-
 
5996
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
-
 
5997
				 enum intel_display_power_domain domain)
-
 
5998
{
-
 
5999
	struct i915_power_domains *power_domains;
-
 
6000
	bool ret;
-
 
6001
 
-
 
6002
	power_domains = &dev_priv->power_domains;
-
 
6003
 
-
 
6004
	mutex_lock(&power_domains->lock);
-
 
6005
	ret = intel_display_power_enabled_unlocked(dev_priv, domain);
-
 
6006
	mutex_unlock(&power_domains->lock);
-
 
6007
 
-
 
6008
	return ret;
-
 
6009
}
5169
		}
6010
 
5170
	}
6011
/*
5171
	mutex_unlock(&power_domains->lock);
6012
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5172
 
-
 
Line 5173... Line 6013...
5173
	return is_enabled;
6013
 * when not needed anymore. We have 4 registers that can request the power well
5174
}
6014
 * to be enabled, and it will only be disabled if none of the registers is
5175
 
6015
 * requesting it to be enabled.
5176
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6016
 */
Line 5190... Line 6030...
5190
	 */
6030
	 */
5191
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6031
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5192
    outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6032
    outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5193
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6033
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
Line 5194... Line 6034...
5194
 
6034
 
5195
	if (IS_BROADWELL(dev)) {
-
 
5196
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
5197
		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
-
 
5198
			   dev_priv->de_irq_mask[PIPE_B]);
-
 
5199
		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
-
 
5200
			   ~dev_priv->de_irq_mask[PIPE_B] |
-
 
5201
			   GEN8_PIPE_VBLANK);
-
 
5202
		I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
-
 
5203
			   dev_priv->de_irq_mask[PIPE_C]);
-
 
5204
		I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
-
 
5205
			   ~dev_priv->de_irq_mask[PIPE_C] |
-
 
5206
			   GEN8_PIPE_VBLANK);
-
 
5207
		POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
-
 
5208
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
5209
	}
-
 
5210
}
-
 
5211
 
6035
	if (IS_BROADWELL(dev))
5212
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
-
 
5213
{
-
 
5214
	struct drm_device *dev = dev_priv->dev;
-
 
5215
	enum pipe p;
-
 
5216
	unsigned long irqflags;
-
 
5217
 
-
 
5218
	/*
-
 
5219
	 * After this, the registers on the pipes that are part of the power
-
 
5220
	 * well will become zero, so we have to adjust our counters according to
-
 
5221
	 * that.
-
 
5222
	 *
-
 
5223
	 * FIXME: Should we do this in general in drm_vblank_post_modeset?
-
 
5224
	 */
-
 
5225
//   spin_lock_irqsave(&dev->vbl_lock, irqflags);
-
 
5226
//   for_each_pipe(p)
-
 
5227
//       if (p != PIPE_A)
-
 
5228
//           dev->vblank[p].last = 0;
-
 
5229
//   spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
6036
		gen8_irq_power_well_post_enable(dev_priv);
Line 5230... Line 6037...
5230
}
6037
}
5231
 
6038
 
5232
static void hsw_set_power_well(struct drm_device *dev,
6039
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5233
			       struct i915_power_well *power_well, bool enable)
-
 
5234
{
6040
			       struct i915_power_well *power_well, bool enable)
5235
	struct drm_i915_private *dev_priv = dev->dev_private;
6041
{
Line 5236... Line -...
5236
	bool is_enabled, enable_requested;
-
 
5237
	uint32_t tmp;
-
 
5238
 
6042
	bool is_enabled, enable_requested;
5239
	WARN_ON(dev_priv->pc8.enabled);
6043
	uint32_t tmp;
5240
 
6044
 
Line 5241... Line 6045...
5241
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6045
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
Line 5258... Line 6062...
5258
	} else {
6062
	} else {
5259
		if (enable_requested) {
6063
		if (enable_requested) {
5260
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6064
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5261
			POSTING_READ(HSW_PWR_WELL_DRIVER);
6065
			POSTING_READ(HSW_PWR_WELL_DRIVER);
5262
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
6066
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
-
 
6067
		}
-
 
6068
		}
-
 
6069
}
-
 
6070
 
-
 
6071
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
-
 
6072
				   struct i915_power_well *power_well)
-
 
6073
{
-
 
6074
	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
Line -... Line 6075...
-
 
6075
 
-
 
6076
	/*
-
 
6077
	 * We're taking over the BIOS, so clear any requests made by it since
-
 
6078
	 * the driver is in charge now.
-
 
6079
	 */
-
 
6080
	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
-
 
6081
		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-
 
6082
}
-
 
6083
 
-
 
6084
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6085
				  struct i915_power_well *power_well)
5263
 
6086
{
5264
			hsw_power_well_post_disable(dev_priv);
6087
	hsw_set_power_well(dev_priv, power_well, true);
-
 
6088
}
-
 
6089
 
-
 
6090
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6091
				   struct i915_power_well *power_well)
-
 
6092
{
5265
		}
6093
	hsw_set_power_well(dev_priv, power_well, false);
-
 
6094
}
-
 
6095
 
-
 
6096
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
-
 
6097
					   struct i915_power_well *power_well)
5266
		}
6098
{
Line 5267... Line 6099...
5267
}
6099
}
5268
 
6100
 
5269
static void __intel_power_well_get(struct drm_device *dev,
6101
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
-
 
6102
					     struct i915_power_well *power_well)
-
 
6103
{
-
 
6104
	return true;
-
 
6105
}
-
 
6106
 
-
 
6107
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5270
				   struct i915_power_well *power_well)
6108
			       struct i915_power_well *power_well, bool enable)
-
 
6109
{
-
 
6110
	enum punit_power_well power_well_id = power_well->data;
-
 
6111
	u32 mask;
-
 
6112
	u32 state;
-
 
6113
	u32 ctrl;
-
 
6114
 
-
 
6115
	mask = PUNIT_PWRGT_MASK(power_well_id);
-
 
6116
	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
-
 
6117
			 PUNIT_PWRGT_PWR_GATE(power_well_id);
-
 
6118
 
-
 
6119
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6120
 
-
 
6121
#define COND \
-
 
6122
	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
 
6123
 
Line -... Line 6124...
-
 
6124
	if (COND)
-
 
6125
		goto out;
-
 
6126
 
-
 
6127
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
-
 
6128
	ctrl &= ~mask;
-
 
6129
	ctrl |= state;
5271
{
6130
	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
 
6131
 
5272
	struct drm_i915_private *dev_priv = dev->dev_private;
6132
	if (wait_for(COND, 100))
-
 
6133
		DRM_ERROR("timout setting power well state %08x (%08x)\n",
-
 
6134
			  state,
-
 
6135
			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
 
6136
 
5273
 
6137
#undef COND
5274
	if (!power_well->count++ && power_well->set) {
6138
 
-
 
6139
out:
-
 
6140
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
6141
}
-
 
6142
 
-
 
6143
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
5275
		hsw_disable_package_c8(dev_priv);
6144
				   struct i915_power_well *power_well)
Line 5276... Line 6145...
5276
		power_well->set(dev, power_well, true);
6145
{
5277
		}
6146
	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
5278
}
6147
}
5279
 
6148
 
-
 
6149
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
Line -... Line 6150...
-
 
6150
				  struct i915_power_well *power_well)
-
 
6151
{
-
 
6152
	vlv_set_power_well(dev_priv, power_well, true);
-
 
6153
}
-
 
6154
 
-
 
6155
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6156
				   struct i915_power_well *power_well)
-
 
6157
{
-
 
6158
	vlv_set_power_well(dev_priv, power_well, false);
5280
static void __intel_power_well_put(struct drm_device *dev,
6159
}
-
 
6160
 
-
 
6161
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
-
 
6162
				   struct i915_power_well *power_well)
-
 
6163
{
Line -... Line 6164...
-
 
6164
	int power_well_id = power_well->data;
-
 
6165
	bool enabled = false;
-
 
6166
	u32 mask;
-
 
6167
	u32 state;
-
 
6168
	u32 ctrl;
-
 
6169
 
-
 
6170
	mask = PUNIT_PWRGT_MASK(power_well_id);
-
 
6171
	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
 
6172
 
-
 
6173
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6174
 
5281
				   struct i915_power_well *power_well)
6175
	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
-
 
6176
	/*
-
 
6177
	 * We only ever set the power-on and power-gate states, anything
-
 
6178
	 * else is unexpected.
-
 
6179
	 */
-
 
6180
	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
-
 
6181
		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
-
 
6182
	if (state == ctrl)
-
 
6183
		enabled = true;
-
 
6184
 
-
 
6185
	/*
-
 
6186
	 * A transient state at this point would mean some unexpected party
-
 
6187
	 * is poking at the power controls too.
-
 
6188
	 */
-
 
6189
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
-
 
6190
	WARN_ON(ctrl != state);
-
 
6191
 
5282
{
6192
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
6193
 
-
 
6194
	return enabled;
-
 
6195
}
5283
	struct drm_i915_private *dev_priv = dev->dev_private;
6196
 
-
 
6197
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6198
					  struct i915_power_well *power_well)
5284
 
6199
{
-
 
6200
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
 
6201
 
-
 
6202
	vlv_set_power_well(dev_priv, power_well, true);
-
 
6203
 
-
 
6204
	spin_lock_irq(&dev_priv->irq_lock);
-
 
6205
	valleyview_enable_display_irqs(dev_priv);
-
 
6206
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
6207
 
-
 
6208
	/*
-
 
6209
	 * During driver initialization/resume we can avoid restoring the
-
 
6210
	 * part of the HW/SW state that will be inited anyway explicitly.
-
 
6211
	 */
5285
	WARN_ON(!power_well->count);
6212
	if (dev_priv->power_domains.initializing)
-
 
6213
		return;
-
 
6214
 
-
 
6215
		intel_hpd_init(dev_priv->dev);
-
 
6216
 
-
 
6217
	i915_redisable_vga_power_on(dev_priv->dev);
-
 
6218
}
-
 
6219
 
-
 
6220
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6221
				   struct i915_power_well *power_well)
-
 
6222
{
-
 
6223
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5286
 
6224
 
Line -... Line 6225...
-
 
6225
	spin_lock_irq(&dev_priv->irq_lock);
-
 
6226
	valleyview_disable_display_irqs(dev_priv);
-
 
6227
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
6228
 
-
 
6229
	vlv_set_power_well(dev_priv, power_well, false);
-
 
6230
}
-
 
6231
 
-
 
6232
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6233
					   struct i915_power_well *power_well)
-
 
6234
{
-
 
6235
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
 
6236
 
-
 
6237
	/*
-
 
6238
	 * Enable the CRI clock source so we can get at the
-
 
6239
	 * display and the reference clock for VGA
-
 
6240
	 * hotplug / manual detection.
-
 
6241
	 */
-
 
6242
	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-
 
6243
		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-
 
6244
	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
 
6245
 
-
 
6246
	vlv_set_power_well(dev_priv, power_well, true);
-
 
6247
 
-
 
6248
	/*
-
 
6249
	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-
 
6250
	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
-
 
6251
	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
-
 
6252
	 *   b.	The other bits such as sfr settings / modesel may all
-
 
6253
	 *	be set to 0.
-
 
6254
	 *
-
 
6255
	 * This should only be done on init and resume from S3 with
-
 
6256
	 * both PLLs disabled, or we risk losing DPIO and PLL
-
 
6257
	 * synchronization.
-
 
6258
	 */
-
 
6259
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-
 
6260
}
-
 
6261
 
-
 
6262
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6263
				   struct i915_power_well *power_well)
-
 
6264
{
-
 
6265
	struct drm_device *dev = dev_priv->dev;
-
 
6266
	enum pipe pipe;
-
 
6267
 
-
 
6268
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
 
6269
 
-
 
6270
	for_each_pipe(pipe)
-
 
6271
		assert_pll_disabled(dev_priv, pipe);
-
 
6272
 
-
 
6273
	/* Assert common reset */
-
 
6274
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
 
6275
 
-
 
6276
	vlv_set_power_well(dev_priv, power_well, false);
-
 
6277
}
-
 
6278
 
-
 
6279
static void check_power_well_state(struct drm_i915_private *dev_priv,
-
 
6280
				   struct i915_power_well *power_well)
-
 
6281
{
-
 
6282
	bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
 
6283
 
-
 
6284
	if (power_well->always_on || !i915.disable_power_well) {
-
 
6285
		if (!enabled)
-
 
6286
			goto mismatch;
-
 
6287
 
-
 
6288
		return;
-
 
6289
	}
-
 
6290
 
-
 
6291
	if (enabled != (power_well->count > 0))
-
 
6292
		goto mismatch;
-
 
6293
 
-
 
6294
	return;
5287
	if (!--power_well->count && power_well->set &&
6295
 
5288
	    i915_disable_power_well) {
6296
mismatch:
5289
		power_well->set(dev, power_well, false);
6297
	WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
5290
		hsw_enable_package_c8(dev_priv);
-
 
5291
	}
6298
		  power_well->name, power_well->always_on, enabled,
5292
}
6299
		  power_well->count, i915.disable_power_well);
5293
 
6300
}
Line -... Line 6301...
-
 
6301
 
-
 
6302
void intel_display_power_get(struct drm_i915_private *dev_priv,
5294
void intel_display_power_get(struct drm_device *dev,
6303
			     enum intel_display_power_domain domain)
Line 5295... Line 6304...
5295
			     enum intel_display_power_domain domain)
6304
{
Line 5296... Line 6305...
5296
{
6305
	struct i915_power_domains *power_domains;
-
 
6306
	struct i915_power_well *power_well;
-
 
6307
	int i;
-
 
6308
 
-
 
6309
	intel_runtime_pm_get(dev_priv);
-
 
6310
 
-
 
6311
	power_domains = &dev_priv->power_domains;
5297
	struct drm_i915_private *dev_priv = dev->dev_private;
6312
 
-
 
6313
	mutex_lock(&power_domains->lock);
Line 5298... Line 6314...
5298
	struct i915_power_domains *power_domains;
6314
 
Line 5299... Line 6315...
5299
	struct i915_power_well *power_well;
6315
	for_each_power_well(i, power_well, BIT(domain), power_domains) {
5300
	int i;
6316
		if (!power_well->count++) {
Line 5301... Line 6317...
5301
 
6317
			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5302
	power_domains = &dev_priv->power_domains;
6318
			power_well->ops->enable(dev_priv, power_well);
5303
 
6319
			power_well->hw_enabled = true;
5304
	mutex_lock(&power_domains->lock);
-
 
5305
 
6320
		}
5306
	for_each_power_well(i, power_well, BIT(domain), power_domains)
6321
 
5307
		__intel_power_well_get(dev, power_well);
6322
		check_power_well_state(dev_priv, power_well);
Line 5308... Line 6323...
5308
 
6323
	}
Line 5324... Line 6339...
5324
	mutex_lock(&power_domains->lock);
6339
	mutex_lock(&power_domains->lock);
Line 5325... Line 6340...
5325
 
6340
 
5326
	WARN_ON(!power_domains->domain_use_count[domain]);
6341
	WARN_ON(!power_domains->domain_use_count[domain]);
Line 5327... Line 6342...
5327
	power_domains->domain_use_count[domain]--;
6342
	power_domains->domain_use_count[domain]--;
-
 
6343
 
-
 
6344
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-
 
6345
		WARN_ON(!power_well->count);
-
 
6346
 
-
 
6347
		if (!--power_well->count && i915.disable_power_well) {
-
 
6348
			DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-
 
6349
			power_well->hw_enabled = false;
-
 
6350
			power_well->ops->disable(dev_priv, power_well);
5328
 
6351
		}
-
 
6352
 
Line 5329... Line 6353...
5329
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
6353
		check_power_well_state(dev_priv, power_well);
-
 
6354
	}
-
 
6355
 
5330
		__intel_power_well_put(dev, power_well);
6356
	mutex_unlock(&power_domains->lock);
Line 5331... Line 6357...
5331
 
6357
 
Line 5332... Line 6358...
5332
	mutex_unlock(&power_domains->lock);
6358
	intel_runtime_pm_put(dev_priv);
5333
}
6359
}
5334
 
6360
 
5335
static struct i915_power_domains *hsw_pwr;
6361
static struct i915_power_domains *hsw_pwr;
Line 5336... Line 6362...
5336
 
6362
 
5337
/* Display audio driver power well request */
6363
/* Display audio driver power well request */
Line 5338... Line 6364...
5338
void i915_request_power_well(void)
6364
int i915_request_power_well(void)
5339
{
6365
{
5340
	struct drm_i915_private *dev_priv;
6366
	struct drm_i915_private *dev_priv;
-
 
6367
 
5341
 
6368
	if (!hsw_pwr)
5342
	if (WARN_ON(!hsw_pwr))
6369
		return -ENODEV;
Line 5343... Line 6370...
5343
		return;
6370
 
5344
 
6371
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5345
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6372
				power_domains);
5346
				power_domains);
6373
	intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
Line 5347... Line 6374...
5347
	intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
6374
	return 0;
5348
}
6375
}
Line 5349... Line 6376...
5349
EXPORT_SYMBOL_GPL(i915_request_power_well);
6376
EXPORT_SYMBOL_GPL(i915_request_power_well);
5350
 
6377
 
5351
/* Display audio driver power well release */
6378
/* Display audio driver power well release */
-
 
6379
int i915_release_power_well(void)
5352
void i915_release_power_well(void)
6380
{
5353
{
6381
	struct drm_i915_private *dev_priv;
Line -... Line 6382...
-
 
6382
 
-
 
6383
	if (!hsw_pwr)
-
 
6384
		return -ENODEV;
-
 
6385
 
-
 
6386
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-
 
6387
				power_domains);
-
 
6388
	intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
 
6389
	return 0;
-
 
6390
}
-
 
6391
EXPORT_SYMBOL_GPL(i915_release_power_well);
-
 
6392
 
-
 
6393
/*
-
 
6394
 * Private interface for the audio driver to get CDCLK in kHz.
-
 
6395
 *
-
 
6396
 * Caller must request power well using i915_request_power_well() prior to
-
 
6397
 * making the call.
-
 
6398
 */
-
 
6399
int i915_get_cdclk_freq(void)
-
 
6400
{
-
 
6401
	struct drm_i915_private *dev_priv;
-
 
6402
 
-
 
6403
	if (!hsw_pwr)
-
 
6404
		return -ENODEV;
-
 
6405
 
-
 
6406
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-
 
6407
				power_domains);
-
 
6408
 
-
 
6409
	return intel_ddi_get_cdclk_freq(dev_priv);
-
 
6410
}
-
 
6411
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
 
6412
 
-
 
6413
 
-
 
6414
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
 
6415
 
-
 
6416
#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
-
 
6417
	BIT(POWER_DOMAIN_PIPE_A) |			\
-
 
6418
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
-
 
6419
	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
-
 
6420
	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
-
 
6421
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
-
 
6422
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
-
 
6423
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
-
 
6424
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
-
 
6425
	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
-
 
6426
	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
-
 
6427
	BIT(POWER_DOMAIN_PORT_CRT) |			\
-
 
6428
	BIT(POWER_DOMAIN_PLLS) |			\
-
 
6429
	BIT(POWER_DOMAIN_INIT))
-
 
6430
#define HSW_DISPLAY_POWER_DOMAINS (				\
-
 
6431
	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
-
 
6432
	BIT(POWER_DOMAIN_INIT))
-
 
6433
 
-
 
6434
#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
-
 
6435
	HSW_ALWAYS_ON_POWER_DOMAINS |			\
-
 
6436
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-
 
6437
#define BDW_DISPLAY_POWER_DOMAINS (				\
-
 
6438
	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
-
 
6439
	BIT(POWER_DOMAIN_INIT))
-
 
6440
 
-
 
6441
#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
-
 
6442
#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
-
 
6443
 
-
 
6444
#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
-
 
6445
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
-
 
6446
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
6447
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
-
 
6448
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
-
 
6449
	BIT(POWER_DOMAIN_PORT_CRT) |		\
-
 
6450
	BIT(POWER_DOMAIN_INIT))
-
 
6451
 
-
 
6452
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
-
 
6453
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
-
 
6454
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
6455
	BIT(POWER_DOMAIN_INIT))
-
 
6456
 
-
 
6457
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
-
 
6458
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
6459
	BIT(POWER_DOMAIN_INIT))
-
 
6460
 
-
 
6461
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
-
 
6462
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
-
 
6463
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
-
 
6464
	BIT(POWER_DOMAIN_INIT))
-
 
6465
 
5354
	struct drm_i915_private *dev_priv;
6466
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
5355
 
6467
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
5356
	if (WARN_ON(!hsw_pwr))
6468
	BIT(POWER_DOMAIN_INIT))
5357
		return;
6469
 
5358
 
6470
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
-
 
6471
	.sync_hw = i9xx_always_on_power_well_noop,
5359
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6472
	.enable = i9xx_always_on_power_well_noop,
5360
				power_domains);
6473
	.disable = i9xx_always_on_power_well_noop,
Line -... Line 6474...
-
 
6474
	.is_enabled = i9xx_always_on_power_well_enabled,
-
 
6475
};
-
 
6476
 
-
 
6477
static struct i915_power_well i9xx_always_on_power_well[] = {
-
 
6478
	{
-
 
6479
		.name = "always-on",
-
 
6480
		.always_on = 1,
5361
	intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
6481
		.domains = POWER_DOMAIN_MASK,
5362
}
6482
		.ops = &i9xx_always_on_power_well_ops,
5363
EXPORT_SYMBOL_GPL(i915_release_power_well);
6483
	},
5364
 
6484
};
5365
static struct i915_power_well i9xx_always_on_power_well[] = {
6485
 
-
 
6486
static const struct i915_power_well_ops hsw_power_well_ops = {
5366
	{
6487
	.sync_hw = hsw_power_well_sync_hw,
5367
		.name = "always-on",
6488
	.enable = hsw_power_well_enable,
5368
		.always_on = 1,
6489
	.disable = hsw_power_well_disable,
5369
		.domains = POWER_DOMAIN_MASK,
6490
	.is_enabled = hsw_power_well_enabled,
5370
	},
-
 
5371
};
6491
};
5372
 
6492
 
5373
static struct i915_power_well hsw_power_wells[] = {
6493
static struct i915_power_well hsw_power_wells[] = {
Line 5374... Line 6494...
5374
	{
6494
	{
5375
		.name = "always-on",
6495
		.name = "always-on",
5376
		.always_on = 1,
6496
		.always_on = 1,
5377
		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6497
		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
5378
	},
6498
		.ops = &i9xx_always_on_power_well_ops,
-
 
6499
	},
5379
	{
6500
	{
5380
		.name = "display",
6501
		.name = "display",
5381
		.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
6502
		.domains = HSW_DISPLAY_POWER_DOMAINS,
5382
		.is_enabled = hsw_power_well_enabled,
6503
		.ops = &hsw_power_well_ops,
5383
		.set = hsw_set_power_well,
-
 
5384
	},
6504
	},
5385
};
6505
};
5386
 
6506
 
Line -... Line 6507...
-
 
6507
static struct i915_power_well bdw_power_wells[] = {
-
 
6508
	{
-
 
6509
		.name = "always-on",
-
 
6510
		.always_on = 1,
-
 
6511
		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
-
 
6512
		.ops = &i9xx_always_on_power_well_ops,
-
 
6513
	},
-
 
6514
	{
-
 
6515
		.name = "display",
-
 
6516
		.domains = BDW_DISPLAY_POWER_DOMAINS,
-
 
6517
		.ops = &hsw_power_well_ops,
-
 
6518
	},
-
 
6519
};
-
 
6520
 
-
 
6521
static const struct i915_power_well_ops vlv_display_power_well_ops = {
-
 
6522
	.sync_hw = vlv_power_well_sync_hw,
-
 
6523
	.enable = vlv_display_power_well_enable,
-
 
6524
	.disable = vlv_display_power_well_disable,
-
 
6525
	.is_enabled = vlv_power_well_enabled,
-
 
6526
};
-
 
6527
 
-
 
6528
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
-
 
6529
	.sync_hw = vlv_power_well_sync_hw,
-
 
6530
	.enable = vlv_dpio_cmn_power_well_enable,
-
 
6531
	.disable = vlv_dpio_cmn_power_well_disable,
-
 
6532
	.is_enabled = vlv_power_well_enabled,
-
 
6533
};
-
 
6534
 
-
 
6535
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
-
 
6536
	.sync_hw = vlv_power_well_sync_hw,
-
 
6537
	.enable = vlv_power_well_enable,
-
 
6538
	.disable = vlv_power_well_disable,
-
 
6539
	.is_enabled = vlv_power_well_enabled,
-
 
6540
};
-
 
6541
 
-
 
6542
static struct i915_power_well vlv_power_wells[] = {
-
 
6543
	{
-
 
6544
		.name = "always-on",
-
 
6545
		.always_on = 1,
-
 
6546
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
-
 
6547
		.ops = &i9xx_always_on_power_well_ops,
-
 
6548
	},
-
 
6549
	{
-
 
6550
		.name = "display",
-
 
6551
		.domains = VLV_DISPLAY_POWER_DOMAINS,
-
 
6552
		.data = PUNIT_POWER_WELL_DISP2D,
-
 
6553
		.ops = &vlv_display_power_well_ops,
-
 
6554
	},
-
 
6555
	{
-
 
6556
		.name = "dpio-tx-b-01",
-
 
6557
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6558
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6559
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6560
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6561
		.ops = &vlv_dpio_power_well_ops,
-
 
6562
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
-
 
6563
	},
-
 
6564
	{
-
 
6565
		.name = "dpio-tx-b-23",
-
 
6566
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6567
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6568
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6569
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6570
		.ops = &vlv_dpio_power_well_ops,
-
 
6571
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
-
 
6572
	},
-
 
6573
	{
-
 
6574
		.name = "dpio-tx-c-01",
-
 
6575
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6576
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6577
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6578
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6579
		.ops = &vlv_dpio_power_well_ops,
-
 
6580
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
-
 
6581
	},
-
 
6582
	{
-
 
6583
		.name = "dpio-tx-c-23",
-
 
6584
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6585
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6586
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6587
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6588
		.ops = &vlv_dpio_power_well_ops,
-
 
6589
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
-
 
6590
	},
-
 
6591
	{
-
 
6592
		.name = "dpio-common",
-
 
6593
		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-
 
6594
		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-
 
6595
		.ops = &vlv_dpio_cmn_power_well_ops,
-
 
6596
	},
-
 
6597
};
-
 
6598
 
-
 
6599
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
5387
static struct i915_power_well bdw_power_wells[] = {
6600
						 enum punit_power_well power_well_id)
5388
	{
6601
{
5389
		.name = "always-on",
6602
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5390
		.always_on = 1,
6603
	struct i915_power_well *power_well;
Line 5391... Line 6604...
5391
		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6604
	int i;
5392
	},
6605
 
5393
	{
-
 
5394
		.name = "display",
6606
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
Line 5395... Line 6607...
5395
		.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
6607
		if (power_well->data == power_well_id)
Line 5396... Line 6608...
5396
		.is_enabled = hsw_power_well_enabled,
6608
			return power_well;
5397
		.set = hsw_set_power_well,
6609
	}
5398
	},
6610
 
5399
};
6611
	return NULL;
5400
 
6612
}
5401
#define set_power_wells(power_domains, __power_wells) ({		\
6613
 
5402
	(power_domains)->power_wells = (__power_wells);			\
6614
#define set_power_wells(power_domains, __power_wells) ({		\
5403
	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
6615
	(power_domains)->power_wells = (__power_wells);			\
5404
})
6616
	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
5405
 
6617
})
-
 
6618
 
-
 
6619
int intel_power_domains_init(struct drm_i915_private *dev_priv)
5406
int intel_power_domains_init(struct drm_device *dev)
6620
{
5407
{
6621
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5408
	struct drm_i915_private *dev_priv = dev->dev_private;
6622
 
Line 5409... Line 6623...
5409
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
6623
	mutex_init(&power_domains->lock);
5410
 
6624
 
Line 5411... Line 6625...
5411
	mutex_init(&power_domains->lock);
6625
	/*
5412
 
6626
	 * The enabling order will be from lower to higher indexed wells,
5413
	/*
6627
	 * the disabling order is reversed.
5414
	 * The enabling order will be from lower to higher indexed wells,
6628
	 */
Line 5415... Line 6629...
5415
	 * the disabling order is reversed.
6629
	if (IS_HASWELL(dev_priv->dev)) {
5416
	 */
6630
		set_power_wells(power_domains, hsw_power_wells);
5417
	if (IS_HASWELL(dev)) {
-
 
5418
		set_power_wells(power_domains, hsw_power_wells);
6631
		hsw_pwr = power_domains;
5419
		hsw_pwr = power_domains;
6632
	} else if (IS_BROADWELL(dev_priv->dev)) {
5420
	} else if (IS_BROADWELL(dev)) {
6633
		set_power_wells(power_domains, bdw_power_wells);
Line 5421... Line 6634...
5421
		set_power_wells(power_domains, bdw_power_wells);
6634
		hsw_pwr = power_domains;
5422
		hsw_pwr = power_domains;
6635
	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
5423
	} else {
6636
		set_power_wells(power_domains, vlv_power_wells);
5424
		set_power_wells(power_domains, i9xx_always_on_power_well);
6637
	} else {
-
 
6638
		set_power_wells(power_domains, i9xx_always_on_power_well);
5425
	}
6639
	}
5426
 
6640
 
5427
	return 0;
6641
	return 0;
Line -... Line 6642...
-
 
6642
}
-
 
6643
 
-
 
6644
void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-
 
6645
{
-
 
6646
	hsw_pwr = NULL;
-
 
6647
}
-
 
6648
 
-
 
6649
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
-
 
6650
{
-
 
6651
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
6652
	struct i915_power_well *power_well;
-
 
6653
	int i;
-
 
6654
 
-
 
6655
	mutex_lock(&power_domains->lock);
-
 
6656
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-
 
6657
		power_well->ops->sync_hw(dev_priv, power_well);
-
 
6658
		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
-
 
6659
								     power_well);
-
 
6660
	}
-
 
6661
	mutex_unlock(&power_domains->lock);
-
 
6662
}
5428
}
6663
 
5429
 
6664
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5430
void intel_power_domains_remove(struct drm_device *dev)
6665
{
-
 
6666
	struct i915_power_well *cmn =
5431
{
6667
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
5432
	hsw_pwr = NULL;
6668
	struct i915_power_well *disp2d =
5433
}
6669
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
-
 
6670
 
-
 
6671
	/* nothing to do if common lane is already off */
-
 
6672
	if (!cmn->ops->is_enabled(dev_priv, cmn))
5434
 
6673
		return;
5435
static void intel_power_domains_resume(struct drm_device *dev)
6674
 
5436
{
6675
	/* If the display might be already active skip this */
-
 
6676
	if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
Line 5437... Line -...
5437
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5438
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
6677
	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
5439
	struct i915_power_well *power_well;
-
 
Line 5440... Line 6678...
5440
	int i;
6678
		return;
-
 
6679
 
-
 
6680
	DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
 
6681
 
5441
 
6682
	/* cmnlane needs DPLL registers */
Line 5442... Line 6683...
5442
	mutex_lock(&power_domains->lock);
6683
	disp2d->ops->enable(dev_priv, disp2d);
5443
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6684
 
5444
		if (power_well->set)
6685
	/*
5445
			power_well->set(dev, power_well, power_well->count > 0);
6686
	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5446
	}
6687
	 * Need to assert and de-assert PHY SB reset by gating the
Line 5447... Line -...
5447
	mutex_unlock(&power_domains->lock);
-
 
5448
}
6688
	 * common lane power, then un-gating it.
5449
 
6689
	 * Simply ungating isn't enough to reset the PHY enough to get
5450
/*
6690
	 * ports and lanes running.
5451
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6691
 */
Line 5452... Line 6692...
5452
 * when not needed anymore. We have 4 registers that can request the power well
6692
	cmn->ops->disable(dev_priv, cmn);
5453
 * to be enabled, and it will only be disabled if none of the registers is
6693
}
5454
 * requesting it to be enabled.
6694
 
5455
 */
6695
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
Line 5456... Line 6696...
5456
void intel_power_domains_init_hw(struct drm_device *dev)
6696
{
5457
{
6697
	struct drm_device *dev = dev_priv->dev;
5458
	struct drm_i915_private *dev_priv = dev->dev_private;
6698
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5459
 
6699
 
Line -... Line 6700...
-
 
6700
	power_domains->initializing = true;
5460
	/* For now, we need the power well to be always enabled. */
6701
 
-
 
6702
	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-
 
6703
		mutex_lock(&power_domains->lock);
-
 
6704
		vlv_cmnlane_wa(dev_priv);
-
 
6705
		mutex_unlock(&power_domains->lock);
-
 
6706
	}
-
 
6707
 
-
 
6708
	/* For now, we need the power well to be always enabled. */
-
 
6709
	intel_display_set_init_power(dev_priv, true);
-
 
6710
	intel_power_domains_resume(dev_priv);
-
 
6711
	power_domains->initializing = false;
-
 
6712
}
-
 
6713
 
-
 
6714
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-
 
6715
{
-
 
6716
	intel_runtime_pm_get(dev_priv);
5461
	intel_display_set_init_power(dev, true);
6717
}
Line 5462... Line 6718...
5462
	intel_power_domains_resume(dev);
6718
 
5463
 
6719
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5464
	if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
6720
{
5465
		return;
6721
	intel_runtime_pm_put(dev_priv);
Line -... Line 6722...
-
 
6722
}
5466
 
6723
 
Line 5467... Line 6724...
5467
	/* We're taking over the BIOS, so clear any requests made by it since
6724
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
Line 5468... Line 6725...
5468
	 * the driver is in charge now. */
6725
{
5469
	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6726
	struct drm_device *dev = dev_priv->dev;
5470
		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6727
	struct device *device = &dev->pdev->dev;
5471
}
6728
 
Line 5472... Line 6729...
5472
 
6729
	if (!HAS_RUNTIME_PM(dev))
-
 
6730
    return;
-
 
6731
 
Line -... Line 6732...
-
 
6732
//	pm_runtime_get_sync(device);
-
 
6733
	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
-
 
6734
}
-
 
6735
 
-
 
6736
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
-
 
6737
{
5473
/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
6738
	struct drm_device *dev = dev_priv->dev;
-
 
6739
	struct device *device = &dev->pdev->dev;
-
 
6740
 
Line 5474... Line 6741...
5474
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
6741
	if (!HAS_RUNTIME_PM(dev))
Line 5475... Line 6742...
5475
{
6742
		return;
5476
	hsw_disable_package_c8(dev_priv);
6743
 
5477
}
6744
	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
5478
 
6745
//   pm_runtime_get_noresume(device);
Line -... Line 6746...
-
 
6746
}
-
 
6747
 
-
 
6748
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-
 
6749
{
5479
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
6750
	struct drm_device *dev = dev_priv->dev;
Line 5480... Line 6751...
5480
{
6751
	struct device *device = &dev->pdev->dev;
Line 5481... Line 6752...
5481
	hsw_enable_package_c8(dev_priv);
6752
 
Line 5552... Line 6823...
5552
	else if (IS_GEN5(dev))
6823
	else if (IS_GEN5(dev))
5553
		i915_ironlake_get_mem_freq(dev);
6824
		i915_ironlake_get_mem_freq(dev);
Line 5554... Line 6825...
5554
 
6825
 
5555
	/* For FIFO watermark updates */
6826
	/* For FIFO watermark updates */
5556
	if (HAS_PCH_SPLIT(dev)) {
6827
	if (HAS_PCH_SPLIT(dev)) {
Line 5557... Line 6828...
5557
		intel_setup_wm_latency(dev);
6828
		ilk_setup_wm_latency(dev);
5558
 
6829
 
5559
		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6830
		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
5560
		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6831
		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
Line 5575... Line 6846...
5575
			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6846
			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5576
		else if (IS_HASWELL(dev))
6847
		else if (IS_HASWELL(dev))
5577
			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6848
			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5578
		else if (INTEL_INFO(dev)->gen == 8)
6849
		else if (INTEL_INFO(dev)->gen == 8)
5579
			dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6850
			dev_priv->display.init_clock_gating = gen8_init_clock_gating;
-
 
6851
	} else if (IS_CHERRYVIEW(dev)) {
-
 
6852
		dev_priv->display.update_wm = valleyview_update_wm;
-
 
6853
		dev_priv->display.init_clock_gating =
-
 
6854
			cherryview_init_clock_gating;
5580
	} else if (IS_VALLEYVIEW(dev)) {
6855
	} else if (IS_VALLEYVIEW(dev)) {
5581
		dev_priv->display.update_wm = valleyview_update_wm;
6856
		dev_priv->display.update_wm = valleyview_update_wm;
5582
		dev_priv->display.init_clock_gating =
6857
		dev_priv->display.init_clock_gating =
5583
			valleyview_init_clock_gating;
6858
			valleyview_init_clock_gating;
5584
	} else if (IS_PINEVIEW(dev)) {
6859
	} else if (IS_PINEVIEW(dev)) {
Line 5590... Line 6865...
5590
				 "(found ddr%s fsb freq %d, mem freq %d), "
6865
				 "(found ddr%s fsb freq %d, mem freq %d), "
5591
				 "disabling CxSR\n",
6866
				 "disabling CxSR\n",
5592
				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6867
				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
5593
				 dev_priv->fsb_freq, dev_priv->mem_freq);
6868
				 dev_priv->fsb_freq, dev_priv->mem_freq);
5594
			/* Disable CxSR and never update its watermark again */
6869
			/* Disable CxSR and never update its watermark again */
5595
			pineview_disable_cxsr(dev);
6870
			intel_set_memory_cxsr(dev_priv, false);
5596
			dev_priv->display.update_wm = NULL;
6871
			dev_priv->display.update_wm = NULL;
5597
		} else
6872
		} else
5598
			dev_priv->display.update_wm = pineview_update_wm;
6873
			dev_priv->display.update_wm = pineview_update_wm;
5599
		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6874
		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5600
	} else if (IS_G4X(dev)) {
6875
	} else if (IS_G4X(dev)) {
Line 5673... Line 6948...
5673
	I915_WRITE(GEN6_PCODE_DATA, 0);
6948
	I915_WRITE(GEN6_PCODE_DATA, 0);
Line 5674... Line 6949...
5674
 
6949
 
5675
	return 0;
6950
	return 0;
Line 5676... Line 6951...
5676
}
6951
}
5677
 
6952
 
5678
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6953
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
Line 5679... Line 6954...
5679
{
6954
{
5680
	int div;
6955
	int div;
Line 5695... Line 6970...
5695
	}
6970
	}
Line 5696... Line 6971...
5696
 
6971
 
5697
	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6972
	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
Line 5698... Line 6973...
5698
}
6973
}
5699
 
6974
 
5700
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6975
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
Line 5701... Line 6976...
5701
{
6976
{
5702
	int mul;
6977
	int mul;
Line 5717... Line 6992...
5717
	}
6992
	}
Line 5718... Line 6993...
5718
 
6993
 
5719
	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6994
	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
Line -... Line 6995...
-
 
6995
}
-
 
6996
 
-
 
6997
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-
 
6998
{
-
 
6999
	int div, freq;
-
 
7000
 
-
 
7001
	switch (dev_priv->rps.cz_freq) {
-
 
7002
	case 200:
-
 
7003
		div = 5;
-
 
7004
		break;
-
 
7005
	case 267:
-
 
7006
		div = 6;
-
 
7007
		break;
-
 
7008
	case 320:
-
 
7009
	case 333:
-
 
7010
	case 400:
-
 
7011
		div = 8;
-
 
7012
		break;
-
 
7013
	default:
-
 
7014
		return -1;
-
 
7015
	}
-
 
7016
 
-
 
7017
	freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
-
 
7018
 
-
 
7019
	return freq;
-
 
7020
}
-
 
7021
 
-
 
7022
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-
 
7023
{
-
 
7024
	int mul, opcode;
-
 
7025
 
-
 
7026
	switch (dev_priv->rps.cz_freq) {
-
 
7027
	case 200:
-
 
7028
		mul = 5;
-
 
7029
		break;
-
 
7030
	case 267:
-
 
7031
		mul = 6;
-
 
7032
		break;
-
 
7033
	case 320:
-
 
7034
	case 333:
-
 
7035
	case 400:
-
 
7036
		mul = 8;
-
 
7037
		break;
-
 
7038
	default:
-
 
7039
		return -1;
-
 
7040
	}
-
 
7041
 
-
 
7042
	opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
-
 
7043
 
-
 
7044
	return opcode;
-
 
7045
}
-
 
7046
 
-
 
7047
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-
 
7048
{
-
 
7049
	int ret = -1;
-
 
7050
 
-
 
7051
	if (IS_CHERRYVIEW(dev_priv->dev))
-
 
7052
		ret = chv_gpu_freq(dev_priv, val);
-
 
7053
	else if (IS_VALLEYVIEW(dev_priv->dev))
-
 
7054
		ret = byt_gpu_freq(dev_priv, val);
-
 
7055
 
-
 
7056
	return ret;
-
 
7057
}
-
 
7058
 
-
 
7059
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-
 
7060
{
-
 
7061
	int ret = -1;
-
 
7062
 
-
 
7063
	if (IS_CHERRYVIEW(dev_priv->dev))
-
 
7064
		ret = chv_freq_opcode(dev_priv, val);
-
 
7065
	else if (IS_VALLEYVIEW(dev_priv->dev))
-
 
7066
		ret = byt_freq_opcode(dev_priv, val);
-
 
7067
 
-
 
7068
	return ret;
5720
}
7069
}
5721
 
7070
 
5722
void intel_pm_setup(struct drm_device *dev)
7071
void intel_pm_setup(struct drm_device *dev)
Line 5723... Line 7072...
5723
{
7072
{
Line 5724... Line -...
5724
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5725
 
-
 
5726
	mutex_init(&dev_priv->rps.hw_lock);
-
 
5727
 
-
 
5728
	mutex_init(&dev_priv->pc8.lock);
-
 
5729
	dev_priv->pc8.requirements_met = false;
-
 
5730
	dev_priv->pc8.gpu_idle = false;
-
 
5731
	dev_priv->pc8.irqs_disabled = false;
7073
	struct drm_i915_private *dev_priv = dev->dev_private;
5732
	dev_priv->pc8.enabled = false;
7074
 
-
 
7075
	mutex_init(&dev_priv->rps.hw_lock);
-
 
7076
 
-
 
7077
	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5733
	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
7078
			  intel_gen6_powersave_work);