Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
6084 serge 1
/*
2
 * Copyright © 2014 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 */
23
 
24
/**
25
 * DOC: Frame Buffer Compression (FBC)
26
 *
27
 * FBC tries to save memory bandwidth (and so power consumption) by
28
 * compressing the amount of memory used by the display. It is total
29
 * transparent to user space and completely handled in the kernel.
30
 *
31
 * The benefits of FBC are mostly visible with solid backgrounds and
32
 * variation-less patterns. It comes from keeping the memory footprint small
33
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
35
 * i915 is responsible to reserve stolen memory for FBC and configure its
36
 * offset on proper registers. The hardware takes care of all
37
 * compress/decompress. However there are many known cases where we have to
38
 * forcibly disable it to allow proper screen updates.
39
 */
40
 
41
#include "intel_drv.h"
42
#include "i915_drv.h"
43
 
44
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45
{
7144 serge 46
	return HAS_FBC(dev_priv);
6084 serge 47
}
48
 
6937 serge 49
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
50
{
51
	return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
52
}
53
 
54
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
55
{
56
	return INTEL_INFO(dev_priv)->gen < 4;
57
}
58
 
7144 serge 59
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
60
{
61
	return INTEL_INFO(dev_priv)->gen <= 3;
62
}
63
 
6084 serge 64
/*
65
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
66
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
67
 * origin so the x and y offsets can actually fit the registers. As a
68
 * consequence, the fence doesn't really start exactly at the display plane
69
 * address we program because it starts at the real start of the buffer, so we
70
 * have to take this into consideration here.
71
 */
72
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
73
{
74
	return crtc->base.y - crtc->adjusted_y;
75
}
76
 
6937 serge 77
/*
78
 * For SKL+, the plane source size used by the hardware is based on the value we
79
 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
80
 * we wrote to PIPESRC.
81
 */
7144 serge 82
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
6937 serge 83
					    int *width, int *height)
6084 serge 84
{
6937 serge 85
	int w, h;
86
 
7144 serge 87
	if (intel_rotation_90_or_270(cache->plane.rotation)) {
88
		w = cache->plane.src_h;
89
		h = cache->plane.src_w;
6937 serge 90
	} else {
7144 serge 91
		w = cache->plane.src_w;
92
		h = cache->plane.src_h;
6937 serge 93
	}
94
 
95
	if (width)
96
		*width = w;
97
	if (height)
98
		*height = h;
99
}
100
 
7144 serge 101
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
102
					struct intel_fbc_state_cache *cache)
6937 serge 103
{
104
	int lines;
105
 
7144 serge 106
	intel_fbc_get_plane_source_size(cache, NULL, &lines);
6937 serge 107
	if (INTEL_INFO(dev_priv)->gen >= 7)
108
		lines = min(lines, 2048);
109
 
110
	/* Hardware needs the full buffer stride, not just the active area. */
7144 serge 111
	return lines * cache->fb.stride;
6937 serge 112
}
113
 
114
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
115
{
6084 serge 116
	u32 fbc_ctl;
117
 
118
	/* Disable compression */
119
	fbc_ctl = I915_READ(FBC_CONTROL);
120
	if ((fbc_ctl & FBC_CTL_EN) == 0)
121
		return;
122
 
123
	fbc_ctl &= ~FBC_CTL_EN;
124
	I915_WRITE(FBC_CONTROL, fbc_ctl);
125
 
126
	/* Wait for compressing bit to clear */
127
	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
128
		DRM_DEBUG_KMS("FBC idle timed out\n");
129
		return;
130
	}
131
}
132
 
7144 serge 133
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
6084 serge 134
{
7144 serge 135
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
6084 serge 136
	int cfb_pitch;
137
	int i;
138
	u32 fbc_ctl;
139
 
140
	/* Note: fbc.threshold == 1 for i8xx */
7144 serge 141
	cfb_pitch = params->cfb_size / FBC_LL_SIZE;
142
	if (params->fb.stride < cfb_pitch)
143
		cfb_pitch = params->fb.stride;
6084 serge 144
 
145
	/* FBC_CTL wants 32B or 64B units */
146
	if (IS_GEN2(dev_priv))
147
		cfb_pitch = (cfb_pitch / 32) - 1;
148
	else
149
		cfb_pitch = (cfb_pitch / 64) - 1;
150
 
151
	/* Clear old tags */
152
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
153
		I915_WRITE(FBC_TAG(i), 0);
154
 
155
	if (IS_GEN4(dev_priv)) {
156
		u32 fbc_ctl2;
157
 
158
		/* Set it up... */
159
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
7144 serge 160
		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
6084 serge 161
		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
7144 serge 162
		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
6084 serge 163
	}
164
 
165
	/* enable it... */
166
	fbc_ctl = I915_READ(FBC_CONTROL);
167
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
168
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
169
	if (IS_I945GM(dev_priv))
170
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
171
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
7144 serge 172
	fbc_ctl |= params->fb.fence_reg;
6084 serge 173
	I915_WRITE(FBC_CONTROL, fbc_ctl);
174
}
175
 
6937 serge 176
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
6084 serge 177
{
178
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
179
}
180
 
7144 serge 181
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
6084 serge 182
{
7144 serge 183
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
6084 serge 184
	u32 dpfc_ctl;
185
 
7144 serge 186
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
187
	if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
6084 serge 188
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
189
	else
190
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
7144 serge 191
	dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
6084 serge 192
 
7144 serge 193
	I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
6084 serge 194
 
195
	/* enable it... */
196
	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
197
}
198
 
6937 serge 199
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
6084 serge 200
{
201
	u32 dpfc_ctl;
202
 
203
	/* Disable compression */
204
	dpfc_ctl = I915_READ(DPFC_CONTROL);
205
	if (dpfc_ctl & DPFC_CTL_EN) {
206
		dpfc_ctl &= ~DPFC_CTL_EN;
207
		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
208
	}
209
}
210
 
6937 serge 211
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
6084 serge 212
{
213
	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
214
}
215
 
6937 serge 216
/* This function forces a CFB recompression through the nuke operation. */
217
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
6084 serge 218
{
219
	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
220
	POSTING_READ(MSG_FBC_REND_STATE);
221
}
222
 
7144 serge 223
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
6084 serge 224
{
7144 serge 225
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
6084 serge 226
	u32 dpfc_ctl;
227
	int threshold = dev_priv->fbc.threshold;
228
 
7144 serge 229
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
230
	if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
6084 serge 231
		threshold++;
232
 
233
	switch (threshold) {
234
	case 4:
235
	case 3:
236
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
237
		break;
238
	case 2:
239
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
240
		break;
241
	case 1:
242
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
243
		break;
244
	}
245
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
246
	if (IS_GEN5(dev_priv))
7144 serge 247
		dpfc_ctl |= params->fb.fence_reg;
6084 serge 248
 
7144 serge 249
	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
250
	I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
6084 serge 251
	/* enable it... */
252
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
253
 
254
	if (IS_GEN6(dev_priv)) {
255
		I915_WRITE(SNB_DPFC_CTL_SA,
7144 serge 256
			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
257
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
6084 serge 258
	}
259
 
6937 serge 260
	intel_fbc_recompress(dev_priv);
6084 serge 261
}
262
 
6937 serge 263
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
6084 serge 264
{
265
	u32 dpfc_ctl;
266
 
267
	/* Disable compression */
268
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269
	if (dpfc_ctl & DPFC_CTL_EN) {
270
		dpfc_ctl &= ~DPFC_CTL_EN;
271
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
272
	}
273
}
274
 
6937 serge 275
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
6084 serge 276
{
277
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
278
}
279
 
7144 serge 280
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
6084 serge 281
{
7144 serge 282
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
6084 serge 283
	u32 dpfc_ctl;
284
	int threshold = dev_priv->fbc.threshold;
285
 
286
	dpfc_ctl = 0;
287
	if (IS_IVYBRIDGE(dev_priv))
7144 serge 288
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
6084 serge 289
 
7144 serge 290
	if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
6084 serge 291
		threshold++;
292
 
293
	switch (threshold) {
294
	case 4:
295
	case 3:
296
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
297
		break;
298
	case 2:
299
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
300
		break;
301
	case 1:
302
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
303
		break;
304
	}
305
 
306
	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
307
 
308
	if (dev_priv->fbc.false_color)
309
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
310
 
311
	if (IS_IVYBRIDGE(dev_priv)) {
312
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
313
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
314
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
315
			   ILK_FBCQ_DIS);
316
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
317
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
7144 serge 318
		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
319
			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
6084 serge 320
			   HSW_FBCQ_DIS);
321
	}
322
 
323
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
324
 
325
	I915_WRITE(SNB_DPFC_CTL_SA,
7144 serge 326
		   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
327
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
6084 serge 328
 
6937 serge 329
	intel_fbc_recompress(dev_priv);
6084 serge 330
}
331
 
7144 serge 332
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
333
{
334
	if (INTEL_INFO(dev_priv)->gen >= 5)
335
		return ilk_fbc_is_active(dev_priv);
336
	else if (IS_GM45(dev_priv))
337
		return g4x_fbc_is_active(dev_priv);
338
	else
339
		return i8xx_fbc_is_active(dev_priv);
340
}
341
 
342
static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
343
{
344
	struct intel_fbc *fbc = &dev_priv->fbc;
345
 
346
	fbc->active = true;
347
 
348
	if (INTEL_INFO(dev_priv)->gen >= 7)
349
		gen7_fbc_activate(dev_priv);
350
	else if (INTEL_INFO(dev_priv)->gen >= 5)
351
		ilk_fbc_activate(dev_priv);
352
	else if (IS_GM45(dev_priv))
353
		g4x_fbc_activate(dev_priv);
354
	else
355
		i8xx_fbc_activate(dev_priv);
356
}
357
 
358
static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
359
{
360
	struct intel_fbc *fbc = &dev_priv->fbc;
361
 
362
	fbc->active = false;
363
 
364
	if (INTEL_INFO(dev_priv)->gen >= 5)
365
		ilk_fbc_deactivate(dev_priv);
366
	else if (IS_GM45(dev_priv))
367
		g4x_fbc_deactivate(dev_priv);
368
	else
369
		i8xx_fbc_deactivate(dev_priv);
370
}
371
 
6084 serge 372
/**
6937 serge 373
 * intel_fbc_is_active - Is FBC active?
6084 serge 374
 * @dev_priv: i915 device instance
375
 *
376
 * This function is used to verify the current state of FBC.
377
 * FIXME: This should be tracked in the plane config eventually
378
 *        instead of queried at runtime for most callers.
379
 */
6937 serge 380
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
6084 serge 381
{
6937 serge 382
	return dev_priv->fbc.active;
6084 serge 383
}
384
 
385
static void intel_fbc_work_fn(struct work_struct *__work)
386
{
6937 serge 387
	struct drm_i915_private *dev_priv =
388
		container_of(__work, struct drm_i915_private, fbc.work.work);
7144 serge 389
	struct intel_fbc *fbc = &dev_priv->fbc;
390
	struct intel_fbc_work *work = &fbc->work;
391
	struct intel_crtc *crtc = fbc->crtc;
392
	struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
6084 serge 393
 
7144 serge 394
	if (drm_crtc_vblank_get(&crtc->base)) {
395
		DRM_ERROR("vblank not available for FBC on pipe %c\n",
396
			  pipe_name(crtc->pipe));
397
 
398
		mutex_lock(&fbc->lock);
399
		work->scheduled = false;
400
		mutex_unlock(&fbc->lock);
401
		return;
402
	}
403
 
6937 serge 404
retry:
405
	/* Delay the actual enabling to let pageflipping cease and the
406
	 * display to settle before starting the compression. Note that
407
	 * this delay also serves a second purpose: it allows for a
408
	 * vblank to pass after disabling the FBC before we attempt
409
	 * to modify the control registers.
410
	 *
7144 serge 411
	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
6937 serge 412
	 *
7144 serge 413
	 * It is also worth mentioning that since work->scheduled_vblank can be
414
	 * updated multiple times by the other threads, hitting the timeout is
415
	 * not an error condition. We'll just end up hitting the "goto retry"
416
	 * case below.
6937 serge 417
	 */
7144 serge 418
	wait_event_timeout(vblank->queue,
419
		drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
420
		msecs_to_jiffies(50));
6937 serge 421
 
7144 serge 422
	mutex_lock(&fbc->lock);
6084 serge 423
 
6937 serge 424
	/* Were we cancelled? */
425
	if (!work->scheduled)
426
		goto out;
427
 
428
	/* Were we delayed again while this function was sleeping? */
7144 serge 429
	if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
430
		mutex_unlock(&fbc->lock);
6937 serge 431
		goto retry;
6084 serge 432
	}
6937 serge 433
 
7144 serge 434
	intel_fbc_hw_activate(dev_priv);
6937 serge 435
 
436
	work->scheduled = false;
437
 
438
out:
7144 serge 439
	mutex_unlock(&fbc->lock);
440
	drm_crtc_vblank_put(&crtc->base);
6084 serge 441
}
442
 
6937 serge 443
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
6084 serge 444
{
445
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 446
	struct intel_fbc *fbc = &dev_priv->fbc;
447
	struct intel_fbc_work *work = &fbc->work;
6084 serge 448
 
7144 serge 449
	WARN_ON(!mutex_is_locked(&fbc->lock));
6084 serge 450
 
7144 serge 451
	if (drm_crtc_vblank_get(&crtc->base)) {
452
		DRM_ERROR("vblank not available for FBC on pipe %c\n",
453
			  pipe_name(crtc->pipe));
454
		return;
455
	}
456
 
457
	/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
458
	 * this function since we're not releasing fbc.lock, so it won't have an
459
	 * opportunity to grab it to discover that it was cancelled. So we just
460
	 * update the expected jiffy count. */
6937 serge 461
	work->scheduled = true;
7144 serge 462
	work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
463
	drm_crtc_vblank_put(&crtc->base);
6084 serge 464
 
6937 serge 465
	schedule_work(&work->work);
6084 serge 466
}
467
 
7144 serge 468
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
6084 serge 469
{
7144 serge 470
	struct intel_fbc *fbc = &dev_priv->fbc;
6084 serge 471
 
7144 serge 472
	WARN_ON(!mutex_is_locked(&fbc->lock));
6084 serge 473
 
7144 serge 474
	/* Calling cancel_work() here won't help due to the fact that the work
475
	 * function grabs fbc->lock. Just set scheduled to false so the work
476
	 * function can know it was cancelled. */
477
	fbc->work.scheduled = false;
6084 serge 478
 
7144 serge 479
	if (fbc->active)
480
		intel_fbc_hw_deactivate(dev_priv);
6084 serge 481
}
482
 
7144 serge 483
static bool multiple_pipes_ok(struct intel_crtc *crtc)
6084 serge 484
{
6937 serge 485
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 486
	struct drm_plane *primary = crtc->base.primary;
487
	struct intel_fbc *fbc = &dev_priv->fbc;
488
	enum pipe pipe = crtc->pipe;
6084 serge 489
 
7144 serge 490
	/* Don't even bother tracking anything we don't need. */
491
	if (!no_fbc_on_multiple_pipes(dev_priv))
6084 serge 492
		return true;
493
 
7144 serge 494
	WARN_ON(!drm_modeset_is_locked(&primary->mutex));
6084 serge 495
 
7144 serge 496
	if (to_intel_plane_state(primary->state)->visible)
497
		fbc->visible_pipes_mask |= (1 << pipe);
498
	else
499
		fbc->visible_pipes_mask &= ~(1 << pipe);
6084 serge 500
 
7144 serge 501
	return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
6084 serge 502
}
503
 
504
static int find_compression_threshold(struct drm_i915_private *dev_priv,
505
				      struct drm_mm_node *node,
506
				      int size,
507
				      int fb_cpp)
508
{
509
	int compression_threshold = 1;
510
	int ret;
511
	u64 end;
512
 
513
	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
514
	 * reserved range size, so it always assumes the maximum (8mb) is used.
515
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
516
	 * underruns, even if that range is not reserved by the BIOS. */
6937 serge 517
	if (IS_BROADWELL(dev_priv) ||
518
	    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6084 serge 519
		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
520
	else
521
		end = dev_priv->gtt.stolen_usable_size;
522
 
523
	/* HACK: This code depends on what we will do in *_enable_fbc. If that
524
	 * code changes, this code needs to change as well.
525
	 *
526
	 * The enable_fbc code will attempt to use one of our 2 compression
527
	 * thresholds, therefore, in that case, we only have 1 resort.
528
	 */
529
 
530
	/* Try to over-allocate to reduce reallocations and fragmentation. */
531
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
532
						   4096, 0, end);
533
	if (ret == 0)
534
		return compression_threshold;
535
 
536
again:
537
	/* HW's ability to limit the CFB is 1:4 */
538
	if (compression_threshold > 4 ||
539
	    (fb_cpp == 2 && compression_threshold == 2))
540
		return 0;
541
 
542
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
543
						   4096, 0, end);
544
	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
545
		return 0;
546
	} else if (ret) {
547
		compression_threshold <<= 1;
548
		goto again;
549
	} else {
550
		return compression_threshold;
551
	}
552
}
553
 
6937 serge 554
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
6084 serge 555
{
6937 serge 556
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 557
	struct intel_fbc *fbc = &dev_priv->fbc;
6084 serge 558
	struct drm_mm_node *uninitialized_var(compressed_llb);
6937 serge 559
	int size, fb_cpp, ret;
6084 serge 560
 
7144 serge 561
	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
6937 serge 562
 
7144 serge 563
	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
564
	fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
6937 serge 565
 
7144 serge 566
	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
6084 serge 567
					 size, fb_cpp);
568
	if (!ret)
569
		goto err_llb;
570
	else if (ret > 1) {
571
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
572
 
573
	}
574
 
7144 serge 575
	fbc->threshold = ret;
6084 serge 576
 
577
	if (INTEL_INFO(dev_priv)->gen >= 5)
7144 serge 578
		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
6084 serge 579
	else if (IS_GM45(dev_priv)) {
7144 serge 580
		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
6084 serge 581
	} else {
582
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
583
		if (!compressed_llb)
584
			goto err_fb;
585
 
586
		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
587
						  4096, 4096);
588
		if (ret)
589
			goto err_fb;
590
 
7144 serge 591
		fbc->compressed_llb = compressed_llb;
6084 serge 592
 
593
		I915_WRITE(FBC_CFB_BASE,
7144 serge 594
			   dev_priv->mm.stolen_base + fbc->compressed_fb.start);
6084 serge 595
		I915_WRITE(FBC_LL_BASE,
596
			   dev_priv->mm.stolen_base + compressed_llb->start);
597
	}
598
 
599
	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
7144 serge 600
		      fbc->compressed_fb.size, fbc->threshold);
6084 serge 601
 
602
	return 0;
603
 
604
err_fb:
605
	kfree(compressed_llb);
7144 serge 606
	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
6084 serge 607
err_llb:
608
	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
609
	return -ENOSPC;
610
}
611
 
612
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
613
{
7144 serge 614
	struct intel_fbc *fbc = &dev_priv->fbc;
6084 serge 615
 
7144 serge 616
	if (drm_mm_node_allocated(&fbc->compressed_fb))
617
		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
618
 
619
	if (fbc->compressed_llb) {
620
		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
621
		kfree(fbc->compressed_llb);
6084 serge 622
	}
623
}
624
 
625
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
626
{
7144 serge 627
	struct intel_fbc *fbc = &dev_priv->fbc;
628
 
6084 serge 629
	if (!fbc_supported(dev_priv))
630
		return;
631
 
7144 serge 632
	mutex_lock(&fbc->lock);
6084 serge 633
	__intel_fbc_cleanup_cfb(dev_priv);
7144 serge 634
	mutex_unlock(&fbc->lock);
6084 serge 635
}
636
 
637
static bool stride_is_valid(struct drm_i915_private *dev_priv,
638
			    unsigned int stride)
639
{
640
	/* These should have been caught earlier. */
641
	WARN_ON(stride < 512);
642
	WARN_ON((stride & (64 - 1)) != 0);
643
 
644
	/* Below are the additional FBC restrictions. */
645
 
646
	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
647
		return stride == 4096 || stride == 8192;
648
 
649
	if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
650
		return false;
651
 
652
	if (stride > 16384)
653
		return false;
654
 
655
	return true;
656
}
657
 
7144 serge 658
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
659
				  uint32_t pixel_format)
6084 serge 660
{
7144 serge 661
	switch (pixel_format) {
6084 serge 662
	case DRM_FORMAT_XRGB8888:
663
	case DRM_FORMAT_XBGR8888:
664
		return true;
665
	case DRM_FORMAT_XRGB1555:
666
	case DRM_FORMAT_RGB565:
667
		/* 16bpp not supported on gen2 */
7144 serge 668
		if (IS_GEN2(dev_priv))
6084 serge 669
			return false;
670
		/* WaFbcOnly1to1Ratio:ctg */
671
		if (IS_G4X(dev_priv))
672
			return false;
673
		return true;
674
	default:
675
		return false;
676
	}
677
}
678
 
679
/*
680
 * For some reason, the hardware tracking starts looking at whatever we
681
 * programmed as the display plane base address register. It does not look at
682
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
683
 * variables instead of just looking at the pipe/plane size.
684
 */
685
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
686
{
687
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 688
	struct intel_fbc *fbc = &dev_priv->fbc;
6084 serge 689
	unsigned int effective_w, effective_h, max_w, max_h;
690
 
691
	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
692
		max_w = 4096;
693
		max_h = 4096;
694
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
695
		max_w = 4096;
696
		max_h = 2048;
697
	} else {
698
		max_w = 2048;
699
		max_h = 1536;
700
	}
701
 
7144 serge 702
	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
703
					&effective_h);
6084 serge 704
	effective_w += crtc->adjusted_x;
705
	effective_h += crtc->adjusted_y;
706
 
707
	return effective_w <= max_w && effective_h <= max_h;
708
}
709
 
7144 serge 710
static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
6084 serge 711
{
6937 serge 712
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 713
	struct intel_fbc *fbc = &dev_priv->fbc;
714
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
715
	struct intel_crtc_state *crtc_state =
716
		to_intel_crtc_state(crtc->base.state);
717
	struct intel_plane_state *plane_state =
718
		to_intel_plane_state(crtc->base.primary->state);
719
	struct drm_framebuffer *fb = plane_state->base.fb;
6084 serge 720
	struct drm_i915_gem_object *obj;
721
 
7144 serge 722
	WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
723
	WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
6084 serge 724
 
7144 serge 725
	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
726
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
727
		cache->crtc.hsw_bdw_pixel_rate =
728
			ilk_pipe_pixel_rate(crtc_state);
6084 serge 729
 
7144 serge 730
	cache->plane.rotation = plane_state->base.rotation;
731
	cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
732
	cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
733
	cache->plane.visible = plane_state->visible;
734
 
735
	if (!cache->plane.visible)
6937 serge 736
		return;
6084 serge 737
 
738
	obj = intel_fb_obj(fb);
739
 
7144 serge 740
	/* FIXME: We lack the proper locking here, so only run this on the
741
	 * platforms that need. */
742
	if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
743
		cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
744
	cache->fb.pixel_format = fb->pixel_format;
745
	cache->fb.stride = fb->pitches[0];
746
	cache->fb.fence_reg = obj->fence_reg;
747
	cache->fb.tiling_mode = obj->tiling_mode;
748
}
749
 
750
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
751
{
752
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
753
	struct intel_fbc *fbc = &dev_priv->fbc;
754
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
755
 
756
	if (!cache->plane.visible) {
757
		fbc->no_fbc_reason = "primary plane not visible";
758
		return false;
6084 serge 759
	}
760
 
7144 serge 761
	if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
762
	    (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
763
		fbc->no_fbc_reason = "incompatible mode";
764
		return false;
765
	}
766
 
6937 serge 767
	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
7144 serge 768
		fbc->no_fbc_reason = "mode too large for compression";
769
		return false;
6084 serge 770
	}
771
 
772
	/* The use of a CPU fence is mandatory in order to detect writes
773
	 * by the CPU to the scanout and trigger updates to the FBC.
774
	 */
7144 serge 775
	if (cache->fb.tiling_mode != I915_TILING_X ||
776
	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
777
		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
778
		return false;
6084 serge 779
	}
780
	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
7144 serge 781
	    cache->plane.rotation != BIT(DRM_ROTATE_0)) {
782
		fbc->no_fbc_reason = "rotation unsupported";
783
		return false;
6084 serge 784
	}
785
 
7144 serge 786
	if (!stride_is_valid(dev_priv, cache->fb.stride)) {
787
		fbc->no_fbc_reason = "framebuffer stride not supported";
788
		return false;
6084 serge 789
	}
790
 
7144 serge 791
	if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
792
		fbc->no_fbc_reason = "pixel format is invalid";
793
		return false;
6084 serge 794
	}
795
 
796
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
797
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
7144 serge 798
	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
799
		fbc->no_fbc_reason = "pixel rate is too big";
800
		return false;
6084 serge 801
	}
802
 
6937 serge 803
	/* It is possible for the required CFB size change without a
804
	 * crtc->disable + crtc->enable since it is possible to change the
805
	 * stride without triggering a full modeset. Since we try to
806
	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
807
	 * if this happens, but if we exceed the current CFB size we'll have to
808
	 * disable FBC. Notice that it would be possible to disable FBC, wait
809
	 * for a frame, free the stolen node, then try to reenable FBC in case
810
	 * we didn't get any invalidate/deactivate calls, but this would require
811
	 * a lot of tracking just for a specific case. If we conclude it's an
812
	 * important case, we can implement it later. */
7144 serge 813
	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
814
	    fbc->compressed_fb.size * fbc->threshold) {
815
		fbc->no_fbc_reason = "CFB requirements changed";
816
		return false;
6084 serge 817
	}
818
 
7144 serge 819
	return true;
820
}
821
 
822
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
823
{
824
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
825
	struct intel_fbc *fbc = &dev_priv->fbc;
826
	bool enable_by_default = IS_BROADWELL(dev_priv);
827
 
828
	if (intel_vgpu_active(dev_priv->dev)) {
829
		fbc->no_fbc_reason = "VGPU is active";
830
		return false;
831
	}
832
 
833
	if (i915.enable_fbc < 0 && !enable_by_default) {
834
		fbc->no_fbc_reason = "disabled per chip default";
835
		return false;
836
	}
837
 
838
	if (!i915.enable_fbc) {
839
		fbc->no_fbc_reason = "disabled per module param";
840
		return false;
841
	}
842
 
843
	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
844
		fbc->no_fbc_reason = "no enabled pipes can have FBC";
845
		return false;
846
	}
847
 
848
	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
849
		fbc->no_fbc_reason = "no enabled planes can have FBC";
850
		return false;
851
	}
852
 
853
	return true;
854
}
855
 
856
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
857
				     struct intel_fbc_reg_params *params)
858
{
859
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
860
	struct intel_fbc *fbc = &dev_priv->fbc;
861
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
862
 
863
	/* Since all our fields are integer types, use memset here so the
864
	 * comparison function can rely on memcmp because the padding will be
865
	 * zero. */
866
	memset(params, 0, sizeof(*params));
867
 
868
	params->crtc.pipe = crtc->pipe;
869
	params->crtc.plane = crtc->plane;
870
	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
871
 
872
	params->fb.pixel_format = cache->fb.pixel_format;
873
	params->fb.stride = cache->fb.stride;
874
	params->fb.fence_reg = cache->fb.fence_reg;
875
 
876
	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
877
 
878
	params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
879
}
880
 
881
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
882
				       struct intel_fbc_reg_params *params2)
883
{
884
	/* We can use this since intel_fbc_get_reg_params() does a memset. */
885
	return memcmp(params1, params2, sizeof(*params1)) == 0;
886
}
887
 
888
void intel_fbc_pre_update(struct intel_crtc *crtc)
889
{
890
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
891
	struct intel_fbc *fbc = &dev_priv->fbc;
892
 
893
	if (!fbc_supported(dev_priv))
894
		return;
895
 
896
	mutex_lock(&fbc->lock);
897
 
898
	if (!multiple_pipes_ok(crtc)) {
899
		fbc->no_fbc_reason = "more than one pipe active";
900
		goto deactivate;
901
	}
902
 
903
	if (!fbc->enabled || fbc->crtc != crtc)
904
		goto unlock;
905
 
906
	intel_fbc_update_state_cache(crtc);
907
 
908
deactivate:
909
	intel_fbc_deactivate(dev_priv);
910
unlock:
911
	mutex_unlock(&fbc->lock);
912
}
913
 
914
static void __intel_fbc_post_update(struct intel_crtc *crtc)
915
{
916
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
917
	struct intel_fbc *fbc = &dev_priv->fbc;
918
	struct intel_fbc_reg_params old_params;
919
 
920
	WARN_ON(!mutex_is_locked(&fbc->lock));
921
 
922
	if (!fbc->enabled || fbc->crtc != crtc)
923
		return;
924
 
925
	if (!intel_fbc_can_activate(crtc)) {
926
		WARN_ON(fbc->active);
927
		return;
928
	}
929
 
930
	old_params = fbc->params;
931
	intel_fbc_get_reg_params(crtc, &fbc->params);
932
 
6084 serge 933
	/* If the scanout has not changed, don't modify the FBC settings.
934
	 * Note that we make the fundamental assumption that the fb->obj
935
	 * cannot be unpinned (and have its GTT offset and fence revoked)
936
	 * without first being decoupled from the scanout and FBC disabled.
937
	 */
7144 serge 938
	if (fbc->active &&
939
	    intel_fbc_reg_params_equal(&old_params, &fbc->params))
6084 serge 940
		return;
941
 
7144 serge 942
	intel_fbc_deactivate(dev_priv);
6937 serge 943
	intel_fbc_schedule_activation(crtc);
7144 serge 944
	fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
6084 serge 945
}
946
 
7144 serge 947
void intel_fbc_post_update(struct intel_crtc *crtc)
6084 serge 948
{
6937 serge 949
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 950
	struct intel_fbc *fbc = &dev_priv->fbc;
6937 serge 951
 
6084 serge 952
	if (!fbc_supported(dev_priv))
953
		return;
954
 
7144 serge 955
	mutex_lock(&fbc->lock);
956
	__intel_fbc_post_update(crtc);
957
	mutex_unlock(&fbc->lock);
6084 serge 958
}
959
 
7144 serge 960
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
961
{
962
	if (fbc->enabled)
963
		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
964
	else
965
		return fbc->possible_framebuffer_bits;
966
}
967
 
6084 serge 968
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
969
			  unsigned int frontbuffer_bits,
970
			  enum fb_op_origin origin)
971
{
7144 serge 972
	struct intel_fbc *fbc = &dev_priv->fbc;
6084 serge 973
 
974
	if (!fbc_supported(dev_priv))
975
		return;
976
 
7144 serge 977
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
6084 serge 978
		return;
979
 
7144 serge 980
	mutex_lock(&fbc->lock);
6084 serge 981
 
7144 serge 982
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
6084 serge 983
 
7144 serge 984
	if (fbc->enabled && fbc->busy_bits)
985
		intel_fbc_deactivate(dev_priv);
6084 serge 986
 
7144 serge 987
	mutex_unlock(&fbc->lock);
6084 serge 988
}
989
 
990
void intel_fbc_flush(struct drm_i915_private *dev_priv,
991
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
992
{
7144 serge 993
	struct intel_fbc *fbc = &dev_priv->fbc;
994
 
6084 serge 995
	if (!fbc_supported(dev_priv))
996
		return;
997
 
7144 serge 998
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
6084 serge 999
		return;
1000
 
7144 serge 1001
	mutex_lock(&fbc->lock);
6084 serge 1002
 
7144 serge 1003
	fbc->busy_bits &= ~frontbuffer_bits;
6084 serge 1004
 
7144 serge 1005
	if (!fbc->busy_bits && fbc->enabled &&
1006
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1007
		if (fbc->active)
6937 serge 1008
			intel_fbc_recompress(dev_priv);
7144 serge 1009
		else
1010
			__intel_fbc_post_update(fbc->crtc);
1011
	}
1012
 
1013
	mutex_unlock(&fbc->lock);
1014
}
1015
 
1016
/**
1017
 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1018
 * @dev_priv: i915 device instance
1019
 * @state: the atomic state structure
1020
 *
1021
 * This function looks at the proposed state for CRTCs and planes, then chooses
1022
 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1023
 * true.
1024
 *
1025
 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1026
 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1027
 */
1028
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1029
			   struct drm_atomic_state *state)
1030
{
1031
	struct intel_fbc *fbc = &dev_priv->fbc;
1032
	struct drm_crtc *crtc;
1033
	struct drm_crtc_state *crtc_state;
1034
	struct drm_plane *plane;
1035
	struct drm_plane_state *plane_state;
1036
	bool fbc_crtc_present = false;
1037
	int i, j;
1038
 
1039
	mutex_lock(&fbc->lock);
1040
 
1041
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
1042
		if (fbc->crtc == to_intel_crtc(crtc)) {
1043
			fbc_crtc_present = true;
1044
			break;
6937 serge 1045
		}
1046
	}
7144 serge 1047
	/* This atomic commit doesn't involve the CRTC currently tied to FBC. */
1048
	if (!fbc_crtc_present && fbc->crtc != NULL)
1049
		goto out;
6937 serge 1050
 
7144 serge 1051
	/* Simply choose the first CRTC that is compatible and has a visible
1052
	 * plane. We could go for fancier schemes such as checking the plane
1053
	 * size, but this would just affect the few platforms that don't tie FBC
1054
	 * to pipe or plane A. */
1055
	for_each_plane_in_state(state, plane, plane_state, i) {
1056
		struct intel_plane_state *intel_plane_state =
1057
			to_intel_plane_state(plane_state);
1058
 
1059
		if (!intel_plane_state->visible)
1060
			continue;
1061
 
1062
		for_each_crtc_in_state(state, crtc, crtc_state, j) {
1063
			struct intel_crtc_state *intel_crtc_state =
1064
				to_intel_crtc_state(crtc_state);
1065
 
1066
			if (plane_state->crtc != crtc)
1067
				continue;
1068
 
1069
			if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
1070
				break;
1071
 
1072
			intel_crtc_state->enable_fbc = true;
1073
			goto out;
1074
		}
1075
	}
1076
 
1077
out:
1078
	mutex_unlock(&fbc->lock);
6937 serge 1079
}
1080
 
1081
/**
1082
 * intel_fbc_enable: tries to enable FBC on the CRTC
1083
 * @crtc: the CRTC
1084
 *
7144 serge 1085
 * This function checks if the given CRTC was chosen for FBC, then enables it if
1086
 * possible. Notice that it doesn't activate FBC. It is valid to call
1087
 * intel_fbc_enable multiple times for the same pipe without an
1088
 * intel_fbc_disable in the middle, as long as it is deactivated.
6937 serge 1089
 */
1090
void intel_fbc_enable(struct intel_crtc *crtc)
1091
{
1092
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 1093
	struct intel_fbc *fbc = &dev_priv->fbc;
6937 serge 1094
 
1095
	if (!fbc_supported(dev_priv))
1096
		return;
1097
 
7144 serge 1098
	mutex_lock(&fbc->lock);
6937 serge 1099
 
7144 serge 1100
	if (fbc->enabled) {
1101
		WARN_ON(fbc->crtc == NULL);
1102
		if (fbc->crtc == crtc) {
1103
			WARN_ON(!crtc->config->enable_fbc);
1104
			WARN_ON(fbc->active);
1105
		}
6937 serge 1106
		goto out;
1107
	}
1108
 
7144 serge 1109
	if (!crtc->config->enable_fbc)
6937 serge 1110
		goto out;
1111
 
7144 serge 1112
	WARN_ON(fbc->active);
1113
	WARN_ON(fbc->crtc != NULL);
6937 serge 1114
 
7144 serge 1115
	intel_fbc_update_state_cache(crtc);
6937 serge 1116
	if (intel_fbc_alloc_cfb(crtc)) {
7144 serge 1117
		fbc->no_fbc_reason = "not enough stolen memory";
6937 serge 1118
		goto out;
1119
	}
1120
 
1121
	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
7144 serge 1122
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
6937 serge 1123
 
7144 serge 1124
	fbc->enabled = true;
1125
	fbc->crtc = crtc;
6937 serge 1126
out:
7144 serge 1127
	mutex_unlock(&fbc->lock);
6937 serge 1128
}
1129
 
1130
/**
1131
 * __intel_fbc_disable - disable FBC
1132
 * @dev_priv: i915 device instance
1133
 *
1134
 * This is the low level function that actually disables FBC. Callers should
1135
 * grab the FBC lock.
1136
 */
1137
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1138
{
7144 serge 1139
	struct intel_fbc *fbc = &dev_priv->fbc;
1140
	struct intel_crtc *crtc = fbc->crtc;
6937 serge 1141
 
7144 serge 1142
	WARN_ON(!mutex_is_locked(&fbc->lock));
1143
	WARN_ON(!fbc->enabled);
1144
	WARN_ON(fbc->active);
1145
	WARN_ON(crtc->active);
6937 serge 1146
 
1147
	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1148
 
1149
	__intel_fbc_cleanup_cfb(dev_priv);
1150
 
7144 serge 1151
	fbc->enabled = false;
1152
	fbc->crtc = NULL;
6937 serge 1153
}
1154
 
1155
/**
7144 serge 1156
 * intel_fbc_disable - disable FBC if it's associated with crtc
6937 serge 1157
 * @crtc: the CRTC
1158
 *
1159
 * This function disables FBC if it's associated with the provided CRTC.
1160
 */
7144 serge 1161
void intel_fbc_disable(struct intel_crtc *crtc)
6937 serge 1162
{
1163
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
7144 serge 1164
	struct intel_fbc *fbc = &dev_priv->fbc;
6937 serge 1165
 
1166
	if (!fbc_supported(dev_priv))
1167
		return;
1168
 
7144 serge 1169
	mutex_lock(&fbc->lock);
1170
	if (fbc->crtc == crtc) {
1171
		WARN_ON(!fbc->enabled);
1172
		WARN_ON(fbc->active);
6084 serge 1173
		__intel_fbc_disable(dev_priv);
1174
	}
7144 serge 1175
	mutex_unlock(&fbc->lock);
6084 serge 1176
 
7144 serge 1177
	cancel_work_sync(&fbc->work.work);
1178
}
1179
 
6937 serge 1180
/**
7144 serge 1181
 * intel_fbc_global_disable - globally disable FBC
6937 serge 1182
 * @dev_priv: i915 device instance
1183
 *
1184
 * This function disables FBC regardless of which CRTC is associated with it.
1185
 */
7144 serge 1186
void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
6937 serge 1187
{
7144 serge 1188
	struct intel_fbc *fbc = &dev_priv->fbc;
1189
 
6937 serge 1190
	if (!fbc_supported(dev_priv))
1191
		return;
1192
 
7144 serge 1193
	mutex_lock(&fbc->lock);
1194
	if (fbc->enabled)
6937 serge 1195
		__intel_fbc_disable(dev_priv);
7144 serge 1196
	mutex_unlock(&fbc->lock);
1197
 
1198
	cancel_work_sync(&fbc->work.work);
6084 serge 1199
}
1200
 
1201
/**
7144 serge 1202
 * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
1203
 * @dev_priv: i915 device instance
1204
 *
1205
 * The FBC code needs to track CRTC visibility since the older platforms can't
1206
 * have FBC enabled while multiple pipes are used. This function does the
1207
 * initial setup at driver load to make sure FBC is matching the real hardware.
1208
 */
1209
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1210
{
1211
	struct intel_crtc *crtc;
1212
 
1213
	/* Don't even bother tracking anything if we don't need. */
1214
	if (!no_fbc_on_multiple_pipes(dev_priv))
1215
		return;
1216
 
1217
	for_each_intel_crtc(dev_priv->dev, crtc)
1218
		if (intel_crtc_active(&crtc->base) &&
1219
		    to_intel_plane_state(crtc->base.primary->state)->visible)
1220
			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1221
}
1222
 
1223
/**
6084 serge 1224
 * intel_fbc_init - Initialize FBC
1225
 * @dev_priv: the i915 device
1226
 *
1227
 * This function might be called during PM init process.
1228
 */
1229
void intel_fbc_init(struct drm_i915_private *dev_priv)
1230
{
7144 serge 1231
	struct intel_fbc *fbc = &dev_priv->fbc;
6084 serge 1232
	enum pipe pipe;
1233
 
7144 serge 1234
	INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
1235
	mutex_init(&fbc->lock);
1236
	fbc->enabled = false;
1237
	fbc->active = false;
1238
	fbc->work.scheduled = false;
6084 serge 1239
 
1240
	if (!HAS_FBC(dev_priv)) {
7144 serge 1241
		fbc->no_fbc_reason = "unsupported by this chipset";
6084 serge 1242
		return;
1243
	}
1244
 
1245
	for_each_pipe(dev_priv, pipe) {
7144 serge 1246
		fbc->possible_framebuffer_bits |=
6084 serge 1247
				INTEL_FRONTBUFFER_PRIMARY(pipe);
1248
 
6937 serge 1249
		if (fbc_on_pipe_a_only(dev_priv))
6084 serge 1250
			break;
1251
	}
1252
 
7144 serge 1253
	/* This value was pulled out of someone's hat */
1254
	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
6084 serge 1255
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1256
 
6937 serge 1257
	/* We still don't have any sort of hardware state readout for FBC, so
1258
	 * deactivate it in case the BIOS activated it to make sure software
1259
	 * matches the hardware state. */
7144 serge 1260
	if (intel_fbc_hw_is_active(dev_priv))
1261
		intel_fbc_hw_deactivate(dev_priv);
6084 serge 1262
}