Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6084 Rev 6937
1
/*
1
/*
2
 * Copyright © 2014 Intel Corporation
2
 * Copyright © 2014 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 */
22
 */
23
 
23
 
24
/**
24
/**
25
 * DOC: Frame Buffer Compression (FBC)
25
 * DOC: Frame Buffer Compression (FBC)
26
 *
26
 *
27
 * FBC tries to save memory bandwidth (and so power consumption) by
27
 * FBC tries to save memory bandwidth (and so power consumption) by
28
 * compressing the amount of memory used by the display. It is total
28
 * compressing the amount of memory used by the display. It is total
29
 * transparent to user space and completely handled in the kernel.
29
 * transparent to user space and completely handled in the kernel.
30
 *
30
 *
31
 * The benefits of FBC are mostly visible with solid backgrounds and
31
 * The benefits of FBC are mostly visible with solid backgrounds and
32
 * variation-less patterns. It comes from keeping the memory footprint small
32
 * variation-less patterns. It comes from keeping the memory footprint small
33
 * and having fewer memory pages opened and accessed for refreshing the display.
33
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
34
 *
35
 * i915 is responsible to reserve stolen memory for FBC and configure its
35
 * i915 is responsible to reserve stolen memory for FBC and configure its
36
 * offset on proper registers. The hardware takes care of all
36
 * offset on proper registers. The hardware takes care of all
37
 * compress/decompress. However there are many known cases where we have to
37
 * compress/decompress. However there are many known cases where we have to
38
 * forcibly disable it to allow proper screen updates.
38
 * forcibly disable it to allow proper screen updates.
39
 */
39
 */
40
 
40
 
41
#include "intel_drv.h"
41
#include "intel_drv.h"
42
#include "i915_drv.h"
42
#include "i915_drv.h"
43
 
43
 
44
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
44
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
45
{
45
{
46
	return dev_priv->fbc.enable_fbc != NULL;
46
	return dev_priv->fbc.activate != NULL;
-
 
47
}
-
 
48
 
-
 
49
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
-
 
50
{
-
 
51
	return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
-
 
52
}
-
 
53
 
-
 
54
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
-
 
55
{
-
 
56
	return INTEL_INFO(dev_priv)->gen < 4;
47
}
57
}
48
 
58
 
49
/*
59
/*
50
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
60
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
51
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
61
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
52
 * origin so the x and y offsets can actually fit the registers. As a
62
 * origin so the x and y offsets can actually fit the registers. As a
53
 * consequence, the fence doesn't really start exactly at the display plane
63
 * consequence, the fence doesn't really start exactly at the display plane
54
 * address we program because it starts at the real start of the buffer, so we
64
 * address we program because it starts at the real start of the buffer, so we
55
 * have to take this into consideration here.
65
 * have to take this into consideration here.
56
 */
66
 */
57
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
67
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
58
{
68
{
59
	return crtc->base.y - crtc->adjusted_y;
69
	return crtc->base.y - crtc->adjusted_y;
60
}
70
}
-
 
71
 
-
 
72
/*
-
 
73
 * For SKL+, the plane source size used by the hardware is based on the value we
-
 
74
 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
-
 
75
 * we wrote to PIPESRC.
-
 
76
 */
-
 
77
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
-
 
78
					    int *width, int *height)
-
 
79
{
-
 
80
	struct intel_plane_state *plane_state =
-
 
81
			to_intel_plane_state(crtc->base.primary->state);
-
 
82
	int w, h;
-
 
83
 
-
 
84
	if (intel_rotation_90_or_270(plane_state->base.rotation)) {
-
 
85
		w = drm_rect_height(&plane_state->src) >> 16;
-
 
86
		h = drm_rect_width(&plane_state->src) >> 16;
-
 
87
	} else {
-
 
88
		w = drm_rect_width(&plane_state->src) >> 16;
-
 
89
		h = drm_rect_height(&plane_state->src) >> 16;
-
 
90
	}
-
 
91
 
-
 
92
	if (width)
-
 
93
		*width = w;
-
 
94
	if (height)
-
 
95
		*height = h;
-
 
96
}
-
 
97
 
-
 
98
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
-
 
99
					struct drm_framebuffer *fb)
-
 
100
{
-
 
101
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
102
	int lines;
-
 
103
 
-
 
104
	intel_fbc_get_plane_source_size(crtc, NULL, &lines);
-
 
105
	if (INTEL_INFO(dev_priv)->gen >= 7)
-
 
106
		lines = min(lines, 2048);
-
 
107
 
-
 
108
	/* Hardware needs the full buffer stride, not just the active area. */
-
 
109
	return lines * fb->pitches[0];
-
 
110
}
61
 
111
 
62
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
112
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
63
{
113
{
64
	u32 fbc_ctl;
114
	u32 fbc_ctl;
65
 
115
 
66
	dev_priv->fbc.enabled = false;
116
	dev_priv->fbc.active = false;
67
 
117
 
68
	/* Disable compression */
118
	/* Disable compression */
69
	fbc_ctl = I915_READ(FBC_CONTROL);
119
	fbc_ctl = I915_READ(FBC_CONTROL);
70
	if ((fbc_ctl & FBC_CTL_EN) == 0)
120
	if ((fbc_ctl & FBC_CTL_EN) == 0)
71
		return;
121
		return;
72
 
122
 
73
	fbc_ctl &= ~FBC_CTL_EN;
123
	fbc_ctl &= ~FBC_CTL_EN;
74
	I915_WRITE(FBC_CONTROL, fbc_ctl);
124
	I915_WRITE(FBC_CONTROL, fbc_ctl);
75
 
125
 
76
	/* Wait for compressing bit to clear */
126
	/* Wait for compressing bit to clear */
77
	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
127
	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
78
		DRM_DEBUG_KMS("FBC idle timed out\n");
128
		DRM_DEBUG_KMS("FBC idle timed out\n");
79
		return;
129
		return;
80
	}
130
	}
81
 
-
 
82
	DRM_DEBUG_KMS("disabled FBC\n");
-
 
83
}
131
}
84
 
132
 
85
static void i8xx_fbc_enable(struct intel_crtc *crtc)
133
static void i8xx_fbc_activate(struct intel_crtc *crtc)
86
{
134
{
87
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
135
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
88
	struct drm_framebuffer *fb = crtc->base.primary->fb;
136
	struct drm_framebuffer *fb = crtc->base.primary->fb;
89
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
137
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
90
	int cfb_pitch;
138
	int cfb_pitch;
91
	int i;
139
	int i;
92
	u32 fbc_ctl;
140
	u32 fbc_ctl;
93
 
141
 
94
	dev_priv->fbc.enabled = true;
142
	dev_priv->fbc.active = true;
95
 
143
 
96
	/* Note: fbc.threshold == 1 for i8xx */
144
	/* Note: fbc.threshold == 1 for i8xx */
97
	cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
145
	cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
98
	if (fb->pitches[0] < cfb_pitch)
146
	if (fb->pitches[0] < cfb_pitch)
99
		cfb_pitch = fb->pitches[0];
147
		cfb_pitch = fb->pitches[0];
100
 
148
 
101
	/* FBC_CTL wants 32B or 64B units */
149
	/* FBC_CTL wants 32B or 64B units */
102
	if (IS_GEN2(dev_priv))
150
	if (IS_GEN2(dev_priv))
103
		cfb_pitch = (cfb_pitch / 32) - 1;
151
		cfb_pitch = (cfb_pitch / 32) - 1;
104
	else
152
	else
105
		cfb_pitch = (cfb_pitch / 64) - 1;
153
		cfb_pitch = (cfb_pitch / 64) - 1;
106
 
154
 
107
	/* Clear old tags */
155
	/* Clear old tags */
108
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
156
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
109
		I915_WRITE(FBC_TAG(i), 0);
157
		I915_WRITE(FBC_TAG(i), 0);
110
 
158
 
111
	if (IS_GEN4(dev_priv)) {
159
	if (IS_GEN4(dev_priv)) {
112
		u32 fbc_ctl2;
160
		u32 fbc_ctl2;
113
 
161
 
114
		/* Set it up... */
162
		/* Set it up... */
115
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
163
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
116
		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
164
		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
117
		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
165
		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
118
		I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
166
		I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
119
	}
167
	}
120
 
168
 
121
	/* enable it... */
169
	/* enable it... */
122
	fbc_ctl = I915_READ(FBC_CONTROL);
170
	fbc_ctl = I915_READ(FBC_CONTROL);
123
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
171
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
124
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
172
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
125
	if (IS_I945GM(dev_priv))
173
	if (IS_I945GM(dev_priv))
126
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
174
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
127
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
175
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
128
	fbc_ctl |= obj->fence_reg;
176
	fbc_ctl |= obj->fence_reg;
129
	I915_WRITE(FBC_CONTROL, fbc_ctl);
177
	I915_WRITE(FBC_CONTROL, fbc_ctl);
130
 
-
 
131
	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-
 
132
		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
-
 
133
}
178
}
134
 
179
 
135
static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
180
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
136
{
181
{
137
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
182
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
138
}
183
}
139
 
184
 
140
static void g4x_fbc_enable(struct intel_crtc *crtc)
185
static void g4x_fbc_activate(struct intel_crtc *crtc)
141
{
186
{
142
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
187
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
143
	struct drm_framebuffer *fb = crtc->base.primary->fb;
188
	struct drm_framebuffer *fb = crtc->base.primary->fb;
144
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
189
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
145
	u32 dpfc_ctl;
190
	u32 dpfc_ctl;
146
 
191
 
147
	dev_priv->fbc.enabled = true;
192
	dev_priv->fbc.active = true;
148
 
193
 
149
	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
194
	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
150
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
195
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
151
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
196
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
152
	else
197
	else
153
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
198
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
154
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
199
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
155
 
200
 
156
	I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
201
	I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
157
 
202
 
158
	/* enable it... */
203
	/* enable it... */
159
	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
204
	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
160
 
-
 
161
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
-
 
162
}
205
}
163
 
206
 
164
static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
207
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
165
{
208
{
166
	u32 dpfc_ctl;
209
	u32 dpfc_ctl;
167
 
210
 
168
	dev_priv->fbc.enabled = false;
211
	dev_priv->fbc.active = false;
169
 
212
 
170
	/* Disable compression */
213
	/* Disable compression */
171
	dpfc_ctl = I915_READ(DPFC_CONTROL);
214
	dpfc_ctl = I915_READ(DPFC_CONTROL);
172
	if (dpfc_ctl & DPFC_CTL_EN) {
215
	if (dpfc_ctl & DPFC_CTL_EN) {
173
		dpfc_ctl &= ~DPFC_CTL_EN;
216
		dpfc_ctl &= ~DPFC_CTL_EN;
174
		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
217
		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
175
 
-
 
176
		DRM_DEBUG_KMS("disabled FBC\n");
-
 
177
	}
218
	}
178
}
219
}
179
 
220
 
180
static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
221
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
181
{
222
{
182
	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
223
	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
183
}
224
}
-
 
225
 
184
 
226
/* This function forces a CFB recompression through the nuke operation. */
185
static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
227
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
186
{
228
{
187
	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
229
	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
188
	POSTING_READ(MSG_FBC_REND_STATE);
230
	POSTING_READ(MSG_FBC_REND_STATE);
189
}
231
}
190
 
232
 
191
static void ilk_fbc_enable(struct intel_crtc *crtc)
233
static void ilk_fbc_activate(struct intel_crtc *crtc)
192
{
234
{
193
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
235
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
194
	struct drm_framebuffer *fb = crtc->base.primary->fb;
236
	struct drm_framebuffer *fb = crtc->base.primary->fb;
195
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
237
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
196
	u32 dpfc_ctl;
238
	u32 dpfc_ctl;
197
	int threshold = dev_priv->fbc.threshold;
239
	int threshold = dev_priv->fbc.threshold;
198
	unsigned int y_offset;
240
	unsigned int y_offset;
199
 
241
 
200
	dev_priv->fbc.enabled = true;
242
	dev_priv->fbc.active = true;
201
 
243
 
202
	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
244
	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
203
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
245
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
204
		threshold++;
246
		threshold++;
205
 
247
 
206
	switch (threshold) {
248
	switch (threshold) {
207
	case 4:
249
	case 4:
208
	case 3:
250
	case 3:
209
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
251
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
210
		break;
252
		break;
211
	case 2:
253
	case 2:
212
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
254
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
213
		break;
255
		break;
214
	case 1:
256
	case 1:
215
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
257
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
216
		break;
258
		break;
217
	}
259
	}
218
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
260
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
219
	if (IS_GEN5(dev_priv))
261
	if (IS_GEN5(dev_priv))
220
		dpfc_ctl |= obj->fence_reg;
262
		dpfc_ctl |= obj->fence_reg;
221
 
263
 
222
	y_offset = get_crtc_fence_y_offset(crtc);
264
	y_offset = get_crtc_fence_y_offset(crtc);
223
	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
265
	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
224
	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
266
	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
225
	/* enable it... */
267
	/* enable it... */
226
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
268
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
227
 
269
 
228
	if (IS_GEN6(dev_priv)) {
270
	if (IS_GEN6(dev_priv)) {
229
		I915_WRITE(SNB_DPFC_CTL_SA,
271
		I915_WRITE(SNB_DPFC_CTL_SA,
230
			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
272
			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
231
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
273
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
232
	}
274
	}
233
 
275
 
234
	intel_fbc_nuke(dev_priv);
-
 
235
 
-
 
236
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
276
	intel_fbc_recompress(dev_priv);
237
}
277
}
238
 
278
 
239
static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
279
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
240
{
280
{
241
	u32 dpfc_ctl;
281
	u32 dpfc_ctl;
242
 
282
 
243
	dev_priv->fbc.enabled = false;
283
	dev_priv->fbc.active = false;
244
 
284
 
245
	/* Disable compression */
285
	/* Disable compression */
246
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
286
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
247
	if (dpfc_ctl & DPFC_CTL_EN) {
287
	if (dpfc_ctl & DPFC_CTL_EN) {
248
		dpfc_ctl &= ~DPFC_CTL_EN;
288
		dpfc_ctl &= ~DPFC_CTL_EN;
249
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
289
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
250
 
-
 
251
		DRM_DEBUG_KMS("disabled FBC\n");
-
 
252
	}
290
	}
253
}
291
}
254
 
292
 
255
static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
293
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
256
{
294
{
257
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
295
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
258
}
296
}
259
 
297
 
260
static void gen7_fbc_enable(struct intel_crtc *crtc)
298
static void gen7_fbc_activate(struct intel_crtc *crtc)
261
{
299
{
262
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
300
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
263
	struct drm_framebuffer *fb = crtc->base.primary->fb;
301
	struct drm_framebuffer *fb = crtc->base.primary->fb;
264
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
302
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
265
	u32 dpfc_ctl;
303
	u32 dpfc_ctl;
266
	int threshold = dev_priv->fbc.threshold;
304
	int threshold = dev_priv->fbc.threshold;
267
 
305
 
268
	dev_priv->fbc.enabled = true;
306
	dev_priv->fbc.active = true;
269
 
307
 
270
	dpfc_ctl = 0;
308
	dpfc_ctl = 0;
271
	if (IS_IVYBRIDGE(dev_priv))
309
	if (IS_IVYBRIDGE(dev_priv))
272
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
310
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
273
 
311
 
274
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
312
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
275
		threshold++;
313
		threshold++;
276
 
314
 
277
	switch (threshold) {
315
	switch (threshold) {
278
	case 4:
316
	case 4:
279
	case 3:
317
	case 3:
280
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
318
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
281
		break;
319
		break;
282
	case 2:
320
	case 2:
283
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
321
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
284
		break;
322
		break;
285
	case 1:
323
	case 1:
286
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
324
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
287
		break;
325
		break;
288
	}
326
	}
289
 
327
 
290
	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
328
	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
291
 
329
 
292
	if (dev_priv->fbc.false_color)
330
	if (dev_priv->fbc.false_color)
293
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
331
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
294
 
332
 
295
	if (IS_IVYBRIDGE(dev_priv)) {
333
	if (IS_IVYBRIDGE(dev_priv)) {
296
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
334
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
297
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
335
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
298
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
336
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
299
			   ILK_FBCQ_DIS);
337
			   ILK_FBCQ_DIS);
300
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
338
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
301
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
339
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
302
		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
340
		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
303
			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
341
			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
304
			   HSW_FBCQ_DIS);
342
			   HSW_FBCQ_DIS);
305
	}
343
	}
306
 
344
 
307
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
345
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
308
 
346
 
309
	I915_WRITE(SNB_DPFC_CTL_SA,
347
	I915_WRITE(SNB_DPFC_CTL_SA,
310
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
348
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
311
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
349
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
312
 
350
 
313
	intel_fbc_nuke(dev_priv);
-
 
314
 
-
 
315
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
351
	intel_fbc_recompress(dev_priv);
316
}
352
}
317
 
353
 
318
/**
354
/**
319
 * intel_fbc_enabled - Is FBC enabled?
355
 * intel_fbc_is_active - Is FBC active?
320
 * @dev_priv: i915 device instance
356
 * @dev_priv: i915 device instance
321
 *
357
 *
322
 * This function is used to verify the current state of FBC.
358
 * This function is used to verify the current state of FBC.
323
 * FIXME: This should be tracked in the plane config eventually
359
 * FIXME: This should be tracked in the plane config eventually
324
 *        instead of queried at runtime for most callers.
360
 *        instead of queried at runtime for most callers.
325
 */
361
 */
326
bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
362
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
327
{
363
{
328
	return dev_priv->fbc.enabled;
364
	return dev_priv->fbc.active;
329
}
365
}
330
 
366
 
331
static void intel_fbc_enable(struct intel_crtc *crtc,
-
 
332
			     const struct drm_framebuffer *fb)
367
static void intel_fbc_activate(const struct drm_framebuffer *fb)
333
{
368
{
-
 
369
	struct drm_i915_private *dev_priv = fb->dev->dev_private;
334
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
370
	struct intel_crtc *crtc = dev_priv->fbc.crtc;
335
 
-
 
336
	dev_priv->fbc.enable_fbc(crtc);
371
 
337
 
372
	dev_priv->fbc.activate(crtc);
338
	dev_priv->fbc.crtc = crtc;
373
 
339
	dev_priv->fbc.fb_id = fb->base.id;
374
	dev_priv->fbc.fb_id = fb->base.id;
340
	dev_priv->fbc.y = crtc->base.y;
375
	dev_priv->fbc.y = crtc->base.y;
341
}
376
}
342
 
377
 
343
static void intel_fbc_work_fn(struct work_struct *__work)
378
static void intel_fbc_work_fn(struct work_struct *__work)
344
{
379
{
345
	struct intel_fbc_work *work =
380
	struct drm_i915_private *dev_priv =
346
		container_of(to_delayed_work(__work),
381
		container_of(__work, struct drm_i915_private, fbc.work.work);
347
			     struct intel_fbc_work, work);
382
	struct intel_fbc_work *work = &dev_priv->fbc.work;
348
	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
383
	struct intel_crtc *crtc = dev_priv->fbc.crtc;
349
	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
384
	int delay_ms = 50;
-
 
385
 
-
 
386
retry:
-
 
387
	/* Delay the actual enabling to let pageflipping cease and the
-
 
388
	 * display to settle before starting the compression. Note that
-
 
389
	 * this delay also serves a second purpose: it allows for a
350
 
390
	 * vblank to pass after disabling the FBC before we attempt
-
 
391
	 * to modify the control registers.
351
	mutex_lock(&dev_priv->fbc.lock);
392
	 *
352
	if (work == dev_priv->fbc.fbc_work) {
393
	 * A more complicated solution would involve tracking vblanks
-
 
394
	 * following the termination of the page-flipping sequence
353
		/* Double check that we haven't switched fb without cancelling
395
	 * and indeed performing the enable as a co-routine and not
-
 
396
	 * waiting synchronously upon the vblank.
-
 
397
	 *
354
		 * the prior work.
398
	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
355
		 */
-
 
356
		if (crtc_fb == work->fb)
399
	 */
357
			intel_fbc_enable(work->crtc, work->fb);
400
	wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
358
 
401
 
-
 
402
	mutex_lock(&dev_priv->fbc.lock);
-
 
403
 
-
 
404
	/* Were we cancelled? */
-
 
405
	if (!work->scheduled)
-
 
406
		goto out;
-
 
407
 
-
 
408
	/* Were we delayed again while this function was sleeping? */
359
		dev_priv->fbc.fbc_work = NULL;
409
	if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
-
 
410
		       jiffies)) {
-
 
411
		mutex_unlock(&dev_priv->fbc.lock);
-
 
412
		goto retry;
-
 
413
	}
-
 
414
 
-
 
415
	if (crtc->base.primary->fb == work->fb)
-
 
416
		intel_fbc_activate(work->fb);
-
 
417
 
360
	}
418
	work->scheduled = false;
361
	mutex_unlock(&dev_priv->fbc.lock);
419
 
362
 
420
out:
363
	kfree(work);
421
	mutex_unlock(&dev_priv->fbc.lock);
364
}
422
}
365
 
423
 
366
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
424
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
367
{
425
{
368
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
426
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
369
 
-
 
370
	if (dev_priv->fbc.fbc_work == NULL)
-
 
371
		return;
-
 
372
 
-
 
373
	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
 
374
 
-
 
375
	/* Synchronisation is provided by struct_mutex and checking of
-
 
376
	 * dev_priv->fbc.fbc_work, so we can perform the cancellation
-
 
377
	 * entirely asynchronously.
-
 
378
	 */
-
 
379
	if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
-
 
380
		/* tasklet was killed before being run, clean up */
-
 
381
		kfree(dev_priv->fbc.fbc_work);
-
 
382
 
-
 
383
	/* Mark the work as no longer wanted so that if it does
-
 
384
	 * wake-up (because the work was already running and waiting
-
 
385
	 * for our mutex), it will discover that is no longer
-
 
386
	 * necessary to run.
-
 
387
	 */
-
 
388
	dev_priv->fbc.fbc_work = NULL;
427
	dev_priv->fbc.work.scheduled = false;
389
}
428
}
390
 
429
 
391
static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
430
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
392
{
-
 
393
	struct intel_fbc_work *work;
431
{
-
 
432
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
394
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
433
	struct intel_fbc_work *work = &dev_priv->fbc.work;
395
 
434
 
396
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
435
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
397
 
436
 
398
	intel_fbc_cancel_work(dev_priv);
-
 
399
 
-
 
400
	work = kzalloc(sizeof(*work), GFP_KERNEL);
-
 
401
	if (work == NULL) {
437
	/* It is useless to call intel_fbc_cancel_work() in this function since
402
		DRM_ERROR("Failed to allocate FBC work structure\n");
438
	 * we're not releasing fbc.lock, so it won't have an opportunity to grab
403
		intel_fbc_enable(crtc, crtc->base.primary->fb);
-
 
404
		return;
-
 
405
	}
-
 
406
 
439
	 * it to discover that it was cancelled. So we just update the expected
407
	work->crtc = crtc;
440
	 * jiffy count. */
-
 
441
	work->fb = crtc->base.primary->fb;
408
	work->fb = crtc->base.primary->fb;
442
	work->scheduled = true;
409
	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
 
410
 
-
 
411
	dev_priv->fbc.fbc_work = work;
-
 
412
 
-
 
413
	/* Delay the actual enabling to let pageflipping cease and the
-
 
414
	 * display to settle before starting the compression. Note that
-
 
415
	 * this delay also serves a second purpose: it allows for a
-
 
416
	 * vblank to pass after disabling the FBC before we attempt
-
 
417
	 * to modify the control registers.
-
 
418
	 *
-
 
419
	 * A more complicated solution would involve tracking vblanks
-
 
420
	 * following the termination of the page-flipping sequence
-
 
421
	 * and indeed performing the enable as a co-routine and not
-
 
422
	 * waiting synchronously upon the vblank.
-
 
423
	 *
-
 
424
	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
443
	work->enable_jiffies = jiffies;
425
	 */
444
 
426
	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
445
	schedule_work(&work->work);
427
}
446
}
428
 
447
 
429
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
448
static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
430
{
449
{
431
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
450
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
432
 
451
 
433
	intel_fbc_cancel_work(dev_priv);
452
	intel_fbc_cancel_work(dev_priv);
434
 
-
 
435
	dev_priv->fbc.disable_fbc(dev_priv);
453
 
436
	dev_priv->fbc.crtc = NULL;
-
 
437
}
-
 
438
 
-
 
439
/**
-
 
440
 * intel_fbc_disable - disable FBC
-
 
441
 * @dev_priv: i915 device instance
-
 
442
 *
-
 
443
 * This function disables FBC.
-
 
444
 */
-
 
445
void intel_fbc_disable(struct drm_i915_private *dev_priv)
-
 
446
{
-
 
447
	if (!fbc_supported(dev_priv))
-
 
448
		return;
-
 
449
 
-
 
450
	mutex_lock(&dev_priv->fbc.lock);
454
	if (dev_priv->fbc.active)
451
	__intel_fbc_disable(dev_priv);
-
 
452
	mutex_unlock(&dev_priv->fbc.lock);
455
		dev_priv->fbc.deactivate(dev_priv);
453
}
456
}
454
 
457
 
455
/*
458
/*
456
 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
459
 * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
457
 * @crtc: the CRTC
460
 * @crtc: the CRTC
458
 *
461
 *
459
 * This function disables FBC if it's associated with the provided CRTC.
462
 * This function deactivates FBC if it's associated with the provided CRTC.
460
 */
463
 */
461
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
464
void intel_fbc_deactivate(struct intel_crtc *crtc)
462
{
465
{
463
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
466
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
464
 
467
 
465
	if (!fbc_supported(dev_priv))
468
	if (!fbc_supported(dev_priv))
466
		return;
469
		return;
467
 
470
 
468
	mutex_lock(&dev_priv->fbc.lock);
471
	mutex_lock(&dev_priv->fbc.lock);
469
	if (dev_priv->fbc.crtc == crtc)
472
	if (dev_priv->fbc.crtc == crtc)
470
		__intel_fbc_disable(dev_priv);
473
		__intel_fbc_deactivate(dev_priv);
471
	mutex_unlock(&dev_priv->fbc.lock);
474
	mutex_unlock(&dev_priv->fbc.lock);
472
}
475
}
473
 
-
 
474
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
-
 
475
{
-
 
476
	switch (reason) {
-
 
477
	case FBC_OK:
-
 
478
		return "FBC enabled but currently disabled in hardware";
-
 
479
	case FBC_UNSUPPORTED:
-
 
480
		return "unsupported by this chipset";
-
 
481
	case FBC_NO_OUTPUT:
-
 
482
		return "no output";
-
 
483
	case FBC_STOLEN_TOO_SMALL:
-
 
484
		return "not enough stolen memory";
-
 
485
	case FBC_UNSUPPORTED_MODE:
-
 
486
		return "mode incompatible with compression";
-
 
487
	case FBC_MODE_TOO_LARGE:
-
 
488
		return "mode too large for compression";
-
 
489
	case FBC_BAD_PLANE:
-
 
490
		return "FBC unsupported on plane";
-
 
491
	case FBC_NOT_TILED:
-
 
492
		return "framebuffer not tiled or fenced";
-
 
493
	case FBC_MULTIPLE_PIPES:
-
 
494
		return "more than one pipe active";
-
 
495
	case FBC_MODULE_PARAM:
-
 
496
		return "disabled per module param";
-
 
497
	case FBC_CHIP_DEFAULT:
-
 
498
		return "disabled per chip default";
-
 
499
	case FBC_ROTATION:
-
 
500
		return "rotation unsupported";
-
 
501
	case FBC_IN_DBG_MASTER:
-
 
502
		return "Kernel debugger is active";
-
 
503
	case FBC_BAD_STRIDE:
-
 
504
		return "framebuffer stride not supported";
-
 
505
	case FBC_PIXEL_RATE:
-
 
506
		return "pixel rate is too big";
-
 
507
	case FBC_PIXEL_FORMAT:
-
 
508
		return "pixel format is invalid";
-
 
509
	default:
-
 
510
		MISSING_CASE(reason);
-
 
511
		return "unknown reason";
-
 
512
	}
-
 
513
}
-
 
514
 
476
 
515
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
477
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
516
			      enum no_fbc_reason reason)
478
			      const char *reason)
517
{
479
{
518
	if (dev_priv->fbc.no_fbc_reason == reason)
480
	if (dev_priv->fbc.no_fbc_reason == reason)
519
		return;
481
		return;
520
 
482
 
521
	dev_priv->fbc.no_fbc_reason = reason;
483
	dev_priv->fbc.no_fbc_reason = reason;
522
	DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
484
	DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
523
}
485
}
524
 
486
 
525
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
487
static bool crtc_can_fbc(struct intel_crtc *crtc)
526
{
488
{
527
	struct drm_crtc *crtc = NULL, *tmp_crtc;
-
 
528
	enum pipe pipe;
-
 
529
	bool pipe_a_only = false;
-
 
530
 
-
 
531
	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
-
 
532
		pipe_a_only = true;
489
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
533
 
490
 
534
	for_each_pipe(dev_priv, pipe) {
-
 
535
		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
491
	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
536
 
492
		return false;
537
		if (intel_crtc_active(tmp_crtc) &&
-
 
538
		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
493
 
539
			crtc = tmp_crtc;
494
	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
-
 
495
		return false;
-
 
496
 
540
 
497
	return true;
541
		if (pipe_a_only)
498
}
-
 
499
 
-
 
500
static bool crtc_is_valid(struct intel_crtc *crtc)
-
 
501
{
542
			break;
502
	if (!intel_crtc_active(&crtc->base))
543
	}
503
		return false;
544
 
504
 
545
	if (!crtc || crtc->primary->fb == NULL)
505
	if (!to_intel_plane_state(crtc->base.primary->state)->visible)
546
		return NULL;
506
		return false;
547
 
507
 
548
	return crtc;
508
	return true;
549
}
509
}
550
 
510
 
551
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
511
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
552
{
512
{
553
	enum pipe pipe;
513
	enum pipe pipe;
554
	int n_pipes = 0;
514
	int n_pipes = 0;
555
	struct drm_crtc *crtc;
515
	struct drm_crtc *crtc;
556
 
516
 
557
	if (INTEL_INFO(dev_priv)->gen > 4)
517
	if (INTEL_INFO(dev_priv)->gen > 4)
558
		return true;
518
		return true;
559
 
519
 
560
	for_each_pipe(dev_priv, pipe) {
520
	for_each_pipe(dev_priv, pipe) {
561
		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
521
		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
562
 
522
 
563
		if (intel_crtc_active(crtc) &&
523
		if (intel_crtc_active(crtc) &&
564
		    to_intel_plane_state(crtc->primary->state)->visible)
524
		    to_intel_plane_state(crtc->primary->state)->visible)
565
			n_pipes++;
525
			n_pipes++;
566
	}
526
	}
567
 
527
 
568
	return (n_pipes < 2);
528
	return (n_pipes < 2);
569
}
529
}
570
 
530
 
571
static int find_compression_threshold(struct drm_i915_private *dev_priv,
531
static int find_compression_threshold(struct drm_i915_private *dev_priv,
572
				      struct drm_mm_node *node,
532
				      struct drm_mm_node *node,
573
				      int size,
533
				      int size,
574
				      int fb_cpp)
534
				      int fb_cpp)
575
{
535
{
576
	int compression_threshold = 1;
536
	int compression_threshold = 1;
577
	int ret;
537
	int ret;
578
	u64 end;
538
	u64 end;
579
 
539
 
580
	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
540
	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
581
	 * reserved range size, so it always assumes the maximum (8mb) is used.
541
	 * reserved range size, so it always assumes the maximum (8mb) is used.
582
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
542
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
583
	 * underruns, even if that range is not reserved by the BIOS. */
543
	 * underruns, even if that range is not reserved by the BIOS. */
-
 
544
	if (IS_BROADWELL(dev_priv) ||
584
	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
545
	    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
585
		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
546
		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
586
	else
547
	else
587
		end = dev_priv->gtt.stolen_usable_size;
548
		end = dev_priv->gtt.stolen_usable_size;
588
 
549
 
589
	/* HACK: This code depends on what we will do in *_enable_fbc. If that
550
	/* HACK: This code depends on what we will do in *_enable_fbc. If that
590
	 * code changes, this code needs to change as well.
551
	 * code changes, this code needs to change as well.
591
	 *
552
	 *
592
	 * The enable_fbc code will attempt to use one of our 2 compression
553
	 * The enable_fbc code will attempt to use one of our 2 compression
593
	 * thresholds, therefore, in that case, we only have 1 resort.
554
	 * thresholds, therefore, in that case, we only have 1 resort.
594
	 */
555
	 */
595
 
556
 
596
	/* Try to over-allocate to reduce reallocations and fragmentation. */
557
	/* Try to over-allocate to reduce reallocations and fragmentation. */
597
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
558
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
598
						   4096, 0, end);
559
						   4096, 0, end);
599
	if (ret == 0)
560
	if (ret == 0)
600
		return compression_threshold;
561
		return compression_threshold;
601
 
562
 
602
again:
563
again:
603
	/* HW's ability to limit the CFB is 1:4 */
564
	/* HW's ability to limit the CFB is 1:4 */
604
	if (compression_threshold > 4 ||
565
	if (compression_threshold > 4 ||
605
	    (fb_cpp == 2 && compression_threshold == 2))
566
	    (fb_cpp == 2 && compression_threshold == 2))
606
		return 0;
567
		return 0;
607
 
568
 
608
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
569
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
609
						   4096, 0, end);
570
						   4096, 0, end);
610
	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
571
	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
611
		return 0;
572
		return 0;
612
	} else if (ret) {
573
	} else if (ret) {
613
		compression_threshold <<= 1;
574
		compression_threshold <<= 1;
614
		goto again;
575
		goto again;
615
	} else {
576
	} else {
616
		return compression_threshold;
577
		return compression_threshold;
617
	}
578
	}
618
}
579
}
619
 
580
 
620
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
-
 
621
			       int fb_cpp)
581
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
-
 
582
{
-
 
583
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
622
{
584
	struct drm_framebuffer *fb = crtc->base.primary->state->fb;
623
	struct drm_mm_node *uninitialized_var(compressed_llb);
585
	struct drm_mm_node *uninitialized_var(compressed_llb);
-
 
586
	int size, fb_cpp, ret;
-
 
587
 
-
 
588
	WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
-
 
589
 
-
 
590
	size = intel_fbc_calculate_cfb_size(crtc, fb);
624
	int ret;
591
	fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
625
 
592
 
626
	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
593
	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
627
					 size, fb_cpp);
594
					 size, fb_cpp);
628
	if (!ret)
595
	if (!ret)
629
		goto err_llb;
596
		goto err_llb;
630
	else if (ret > 1) {
597
	else if (ret > 1) {
631
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
598
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
632
 
599
 
633
	}
600
	}
634
 
601
 
635
	dev_priv->fbc.threshold = ret;
602
	dev_priv->fbc.threshold = ret;
636
 
603
 
637
	if (INTEL_INFO(dev_priv)->gen >= 5)
604
	if (INTEL_INFO(dev_priv)->gen >= 5)
638
		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
605
		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
639
	else if (IS_GM45(dev_priv)) {
606
	else if (IS_GM45(dev_priv)) {
640
		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
607
		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
641
	} else {
608
	} else {
642
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
609
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
643
		if (!compressed_llb)
610
		if (!compressed_llb)
644
			goto err_fb;
611
			goto err_fb;
645
 
612
 
646
		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
613
		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
647
						  4096, 4096);
614
						  4096, 4096);
648
		if (ret)
615
		if (ret)
649
			goto err_fb;
616
			goto err_fb;
650
 
617
 
651
		dev_priv->fbc.compressed_llb = compressed_llb;
618
		dev_priv->fbc.compressed_llb = compressed_llb;
652
 
619
 
653
		I915_WRITE(FBC_CFB_BASE,
620
		I915_WRITE(FBC_CFB_BASE,
654
			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
621
			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
655
		I915_WRITE(FBC_LL_BASE,
622
		I915_WRITE(FBC_LL_BASE,
656
			   dev_priv->mm.stolen_base + compressed_llb->start);
623
			   dev_priv->mm.stolen_base + compressed_llb->start);
657
	}
624
	}
658
 
-
 
659
	dev_priv->fbc.uncompressed_size = size;
-
 
660
 
625
 
661
	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
626
	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
662
		      dev_priv->fbc.compressed_fb.size,
627
		      dev_priv->fbc.compressed_fb.size,
663
		      dev_priv->fbc.threshold);
628
		      dev_priv->fbc.threshold);
664
 
629
 
665
	return 0;
630
	return 0;
666
 
631
 
667
err_fb:
632
err_fb:
668
	kfree(compressed_llb);
633
	kfree(compressed_llb);
669
	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
634
	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
670
err_llb:
635
err_llb:
671
	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
636
	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
672
	return -ENOSPC;
637
	return -ENOSPC;
673
}
638
}
674
 
639
 
675
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
640
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
676
{
641
{
677
	if (dev_priv->fbc.uncompressed_size == 0)
642
	if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
678
		return;
643
		i915_gem_stolen_remove_node(dev_priv,
679
 
-
 
680
	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
644
					    &dev_priv->fbc.compressed_fb);
681
 
645
 
682
	if (dev_priv->fbc.compressed_llb) {
646
	if (dev_priv->fbc.compressed_llb) {
683
		i915_gem_stolen_remove_node(dev_priv,
647
		i915_gem_stolen_remove_node(dev_priv,
684
					    dev_priv->fbc.compressed_llb);
648
					    dev_priv->fbc.compressed_llb);
685
		kfree(dev_priv->fbc.compressed_llb);
649
		kfree(dev_priv->fbc.compressed_llb);
686
	}
650
	}
687
 
-
 
688
	dev_priv->fbc.uncompressed_size = 0;
-
 
689
}
651
}
690
 
652
 
691
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
653
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
692
{
654
{
693
	if (!fbc_supported(dev_priv))
655
	if (!fbc_supported(dev_priv))
694
		return;
656
		return;
695
 
657
 
696
	mutex_lock(&dev_priv->fbc.lock);
658
	mutex_lock(&dev_priv->fbc.lock);
697
	__intel_fbc_cleanup_cfb(dev_priv);
659
	__intel_fbc_cleanup_cfb(dev_priv);
698
	mutex_unlock(&dev_priv->fbc.lock);
660
	mutex_unlock(&dev_priv->fbc.lock);
699
}
661
}
700
 
-
 
701
/*
-
 
702
 * For SKL+, the plane source size used by the hardware is based on the value we
-
 
703
 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
-
 
704
 * we wrote to PIPESRC.
-
 
705
 */
-
 
706
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
-
 
707
					    int *width, int *height)
-
 
708
{
-
 
709
	struct intel_plane_state *plane_state =
-
 
710
			to_intel_plane_state(crtc->base.primary->state);
-
 
711
	int w, h;
-
 
712
 
-
 
713
	if (intel_rotation_90_or_270(plane_state->base.rotation)) {
-
 
714
		w = drm_rect_height(&plane_state->src) >> 16;
-
 
715
		h = drm_rect_width(&plane_state->src) >> 16;
-
 
716
	} else {
-
 
717
		w = drm_rect_width(&plane_state->src) >> 16;
-
 
718
		h = drm_rect_height(&plane_state->src) >> 16;
-
 
719
	}
-
 
720
 
-
 
721
	if (width)
-
 
722
		*width = w;
-
 
723
	if (height)
-
 
724
		*height = h;
-
 
725
}
-
 
726
 
-
 
727
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
-
 
728
{
-
 
729
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
730
	struct drm_framebuffer *fb = crtc->base.primary->fb;
-
 
731
	int lines;
-
 
732
 
-
 
733
	intel_fbc_get_plane_source_size(crtc, NULL, &lines);
-
 
734
	if (INTEL_INFO(dev_priv)->gen >= 7)
-
 
735
		lines = min(lines, 2048);
-
 
736
 
-
 
737
	return lines * fb->pitches[0];
-
 
738
}
-
 
739
 
-
 
740
static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
-
 
741
{
-
 
742
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
743
	struct drm_framebuffer *fb = crtc->base.primary->fb;
-
 
744
	int size, cpp;
-
 
745
 
-
 
746
	size = intel_fbc_calculate_cfb_size(crtc);
-
 
747
	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
 
748
 
-
 
749
	if (size <= dev_priv->fbc.uncompressed_size)
-
 
750
		return 0;
-
 
751
 
-
 
752
	/* Release any current block */
-
 
753
	__intel_fbc_cleanup_cfb(dev_priv);
-
 
754
 
-
 
755
	return intel_fbc_alloc_cfb(dev_priv, size, cpp);
-
 
756
}
-
 
757
 
662
 
758
static bool stride_is_valid(struct drm_i915_private *dev_priv,
663
static bool stride_is_valid(struct drm_i915_private *dev_priv,
759
			    unsigned int stride)
664
			    unsigned int stride)
760
{
665
{
761
	/* These should have been caught earlier. */
666
	/* These should have been caught earlier. */
762
	WARN_ON(stride < 512);
667
	WARN_ON(stride < 512);
763
	WARN_ON((stride & (64 - 1)) != 0);
668
	WARN_ON((stride & (64 - 1)) != 0);
764
 
669
 
765
	/* Below are the additional FBC restrictions. */
670
	/* Below are the additional FBC restrictions. */
766
 
671
 
767
	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
672
	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
768
		return stride == 4096 || stride == 8192;
673
		return stride == 4096 || stride == 8192;
769
 
674
 
770
	if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
675
	if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
771
		return false;
676
		return false;
772
 
677
 
773
	if (stride > 16384)
678
	if (stride > 16384)
774
		return false;
679
		return false;
775
 
680
 
776
	return true;
681
	return true;
777
}
682
}
778
 
683
 
779
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
684
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
780
{
685
{
781
	struct drm_device *dev = fb->dev;
686
	struct drm_device *dev = fb->dev;
782
	struct drm_i915_private *dev_priv = dev->dev_private;
687
	struct drm_i915_private *dev_priv = dev->dev_private;
783
 
688
 
784
	switch (fb->pixel_format) {
689
	switch (fb->pixel_format) {
785
	case DRM_FORMAT_XRGB8888:
690
	case DRM_FORMAT_XRGB8888:
786
	case DRM_FORMAT_XBGR8888:
691
	case DRM_FORMAT_XBGR8888:
787
		return true;
692
		return true;
788
	case DRM_FORMAT_XRGB1555:
693
	case DRM_FORMAT_XRGB1555:
789
	case DRM_FORMAT_RGB565:
694
	case DRM_FORMAT_RGB565:
790
		/* 16bpp not supported on gen2 */
695
		/* 16bpp not supported on gen2 */
791
		if (IS_GEN2(dev))
696
		if (IS_GEN2(dev))
792
			return false;
697
			return false;
793
		/* WaFbcOnly1to1Ratio:ctg */
698
		/* WaFbcOnly1to1Ratio:ctg */
794
		if (IS_G4X(dev_priv))
699
		if (IS_G4X(dev_priv))
795
			return false;
700
			return false;
796
		return true;
701
		return true;
797
	default:
702
	default:
798
		return false;
703
		return false;
799
	}
704
	}
800
}
705
}
801
 
706
 
802
/*
707
/*
803
 * For some reason, the hardware tracking starts looking at whatever we
708
 * For some reason, the hardware tracking starts looking at whatever we
804
 * programmed as the display plane base address register. It does not look at
709
 * programmed as the display plane base address register. It does not look at
805
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
710
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
806
 * variables instead of just looking at the pipe/plane size.
711
 * variables instead of just looking at the pipe/plane size.
807
 */
712
 */
808
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
713
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
809
{
714
{
810
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
715
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
811
	unsigned int effective_w, effective_h, max_w, max_h;
716
	unsigned int effective_w, effective_h, max_w, max_h;
812
 
717
 
813
	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
718
	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
814
		max_w = 4096;
719
		max_w = 4096;
815
		max_h = 4096;
720
		max_h = 4096;
816
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
721
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
817
		max_w = 4096;
722
		max_w = 4096;
818
		max_h = 2048;
723
		max_h = 2048;
819
	} else {
724
	} else {
820
		max_w = 2048;
725
		max_w = 2048;
821
		max_h = 1536;
726
		max_h = 1536;
822
	}
727
	}
823
 
728
 
824
	intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
729
	intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
825
	effective_w += crtc->adjusted_x;
730
	effective_w += crtc->adjusted_x;
826
	effective_h += crtc->adjusted_y;
731
	effective_h += crtc->adjusted_y;
827
 
732
 
828
	return effective_w <= max_w && effective_h <= max_h;
733
	return effective_w <= max_w && effective_h <= max_h;
829
}
734
}
830
 
735
 
831
/**
736
/**
832
 * __intel_fbc_update - enable/disable FBC as needed, unlocked
737
 * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
833
 * @dev_priv: i915 device instance
-
 
834
 *
-
 
835
 * Set up the framebuffer compression hardware at mode set time.  We
-
 
836
 * enable it if possible:
-
 
837
 *   - plane A only (on pre-965)
-
 
838
 *   - no pixel mulitply/line duplication
738
 * @crtc: the CRTC that triggered the update
839
 *   - no alpha buffer discard
-
 
840
 *   - no dual wide
-
 
841
 *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
-
 
842
 *
739
 *
843
 * We can't assume that any compression will take place (worst case),
-
 
844
 * so the compressed buffer has to be the same size as the uncompressed
740
 * This function completely reevaluates the status of FBC, then activates,
845
 * one.  It also must reside (along with the line length buffer) in
-
 
846
 * stolen memory.
-
 
847
 *
-
 
848
 * We need to enable/disable FBC on a global basis.
741
 * deactivates or maintains it on the same state.
849
 */
742
 */
850
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
743
static void __intel_fbc_update(struct intel_crtc *crtc)
851
{
744
{
852
	struct drm_crtc *crtc = NULL;
-
 
853
	struct intel_crtc *intel_crtc;
745
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
854
	struct drm_framebuffer *fb;
746
	struct drm_framebuffer *fb;
855
	struct drm_i915_gem_object *obj;
747
	struct drm_i915_gem_object *obj;
856
	const struct drm_display_mode *adjusted_mode;
748
	const struct drm_display_mode *adjusted_mode;
857
 
749
 
858
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
750
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
859
 
-
 
860
	/* disable framebuffer compression in vGPU */
751
 
861
	if (intel_vgpu_active(dev_priv->dev))
-
 
862
		i915.enable_fbc = 0;
-
 
863
 
-
 
864
	if (i915.enable_fbc < 0) {
-
 
865
		set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
-
 
866
		goto out_disable;
-
 
867
	}
-
 
868
 
-
 
869
	if (!i915.enable_fbc) {
752
	if (!multiple_pipes_ok(dev_priv)) {
870
		set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
753
		set_no_fbc_reason(dev_priv, "more than one pipe active");
871
		goto out_disable;
754
		goto out_disable;
872
	}
755
	}
873
 
-
 
874
	/*
-
 
875
	 * If FBC is already on, we just have to verify that we can
-
 
876
	 * keep it that way...
-
 
877
	 * Need to disable if:
-
 
878
	 *   - more than one pipe is active
-
 
879
	 *   - changing FBC params (stride, fence, mode)
-
 
880
	 *   - new fb is too large to fit in compressed buffer
-
 
881
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
-
 
882
	 */
-
 
883
	crtc = intel_fbc_find_crtc(dev_priv);
-
 
884
	if (!crtc) {
756
 
885
		set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
757
	if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
886
		goto out_disable;
-
 
887
	}
758
		return;
888
 
759
 
889
	if (!multiple_pipes_ok(dev_priv)) {
760
	if (!crtc_is_valid(crtc)) {
890
		set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
761
		set_no_fbc_reason(dev_priv, "no output");
891
		goto out_disable;
762
		goto out_disable;
892
	}
763
	}
893
 
-
 
894
	intel_crtc = to_intel_crtc(crtc);
764
 
895
	fb = crtc->primary->fb;
765
	fb = crtc->base.primary->fb;
896
	obj = intel_fb_obj(fb);
766
	obj = intel_fb_obj(fb);
897
	adjusted_mode = &intel_crtc->config->base.adjusted_mode;
767
	adjusted_mode = &crtc->config->base.adjusted_mode;
898
 
768
 
899
	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
769
	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
900
	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
770
	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
901
		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
771
		set_no_fbc_reason(dev_priv, "incompatible mode");
902
		goto out_disable;
772
		goto out_disable;
903
	}
773
	}
904
 
774
 
905
	if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
775
	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
906
		set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
-
 
907
		goto out_disable;
-
 
908
	}
-
 
909
 
-
 
910
	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
-
 
911
	    intel_crtc->plane != PLANE_A) {
-
 
912
		set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
776
		set_no_fbc_reason(dev_priv, "mode too large for compression");
913
		goto out_disable;
777
		goto out_disable;
914
	}
778
	}
915
 
779
 
916
	/* The use of a CPU fence is mandatory in order to detect writes
780
	/* The use of a CPU fence is mandatory in order to detect writes
917
	 * by the CPU to the scanout and trigger updates to the FBC.
781
	 * by the CPU to the scanout and trigger updates to the FBC.
918
	 */
782
	 */
919
	if (obj->tiling_mode != I915_TILING_X ||
783
	if (obj->tiling_mode != I915_TILING_X ||
920
	    obj->fence_reg == I915_FENCE_REG_NONE) {
784
	    obj->fence_reg == I915_FENCE_REG_NONE) {
921
		set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
785
		set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
922
		goto out_disable;
786
		goto out_disable;
923
	}
787
	}
924
	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
788
	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
925
	    crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
789
	    crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
926
		set_no_fbc_reason(dev_priv, FBC_ROTATION);
790
		set_no_fbc_reason(dev_priv, "rotation unsupported");
927
		goto out_disable;
791
		goto out_disable;
928
	}
792
	}
929
 
793
 
930
	if (!stride_is_valid(dev_priv, fb->pitches[0])) {
794
	if (!stride_is_valid(dev_priv, fb->pitches[0])) {
931
		set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
795
		set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
932
		goto out_disable;
796
		goto out_disable;
933
	}
797
	}
934
 
798
 
935
	if (!pixel_format_is_valid(fb)) {
799
	if (!pixel_format_is_valid(fb)) {
936
		set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
-
 
937
		goto out_disable;
-
 
938
	}
-
 
939
 
-
 
940
	/* If the kernel debugger is active, always disable compression */
-
 
941
	if (in_dbg_master()) {
-
 
942
		set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
800
		set_no_fbc_reason(dev_priv, "pixel format is invalid");
943
		goto out_disable;
801
		goto out_disable;
944
	}
802
	}
945
 
803
 
946
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
804
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
947
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
805
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
948
	    ilk_pipe_pixel_rate(intel_crtc->config) >=
806
	    ilk_pipe_pixel_rate(crtc->config) >=
949
	    dev_priv->cdclk_freq * 95 / 100) {
807
	    dev_priv->cdclk_freq * 95 / 100) {
950
		set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
808
		set_no_fbc_reason(dev_priv, "pixel rate is too big");
951
		goto out_disable;
809
		goto out_disable;
952
	}
810
	}
-
 
811
 
-
 
812
	/* It is possible for the required CFB size change without a
-
 
813
	 * crtc->disable + crtc->enable since it is possible to change the
-
 
814
	 * stride without triggering a full modeset. Since we try to
-
 
815
	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
-
 
816
	 * if this happens, but if we exceed the current CFB size we'll have to
-
 
817
	 * disable FBC. Notice that it would be possible to disable FBC, wait
-
 
818
	 * for a frame, free the stolen node, then try to reenable FBC in case
-
 
819
	 * we didn't get any invalidate/deactivate calls, but this would require
-
 
820
	 * a lot of tracking just for a specific case. If we conclude it's an
953
 
821
	 * important case, we can implement it later. */
-
 
822
	if (intel_fbc_calculate_cfb_size(crtc, fb) >
954
	if (intel_fbc_setup_cfb(intel_crtc)) {
823
	    dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
955
		set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
824
		set_no_fbc_reason(dev_priv, "CFB requirements changed");
956
		goto out_disable;
825
		goto out_disable;
957
	}
826
	}
958
 
827
 
959
	/* If the scanout has not changed, don't modify the FBC settings.
828
	/* If the scanout has not changed, don't modify the FBC settings.
960
	 * Note that we make the fundamental assumption that the fb->obj
829
	 * Note that we make the fundamental assumption that the fb->obj
961
	 * cannot be unpinned (and have its GTT offset and fence revoked)
830
	 * cannot be unpinned (and have its GTT offset and fence revoked)
962
	 * without first being decoupled from the scanout and FBC disabled.
831
	 * without first being decoupled from the scanout and FBC disabled.
963
	 */
832
	 */
964
	if (dev_priv->fbc.crtc == intel_crtc &&
833
	if (dev_priv->fbc.crtc == crtc &&
965
	    dev_priv->fbc.fb_id == fb->base.id &&
834
	    dev_priv->fbc.fb_id == fb->base.id &&
966
	    dev_priv->fbc.y == crtc->y)
835
	    dev_priv->fbc.y == crtc->base.y &&
-
 
836
	    dev_priv->fbc.active)
967
		return;
837
		return;
968
 
838
 
969
	if (intel_fbc_enabled(dev_priv)) {
839
	if (intel_fbc_is_active(dev_priv)) {
970
		/* We update FBC along two paths, after changing fb/crtc
840
		/* We update FBC along two paths, after changing fb/crtc
971
		 * configuration (modeswitching) and after page-flipping
841
		 * configuration (modeswitching) and after page-flipping
972
		 * finishes. For the latter, we know that not only did
842
		 * finishes. For the latter, we know that not only did
973
		 * we disable the FBC at the start of the page-flip
843
		 * we disable the FBC at the start of the page-flip
974
		 * sequence, but also more than one vblank has passed.
844
		 * sequence, but also more than one vblank has passed.
975
		 *
845
		 *
976
		 * For the former case of modeswitching, it is possible
846
		 * For the former case of modeswitching, it is possible
977
		 * to switch between two FBC valid configurations
847
		 * to switch between two FBC valid configurations
978
		 * instantaneously so we do need to disable the FBC
848
		 * instantaneously so we do need to disable the FBC
979
		 * before we can modify its control registers. We also
849
		 * before we can modify its control registers. We also
980
		 * have to wait for the next vblank for that to take
850
		 * have to wait for the next vblank for that to take
981
		 * effect. However, since we delay enabling FBC we can
851
		 * effect. However, since we delay enabling FBC we can
982
		 * assume that a vblank has passed since disabling and
852
		 * assume that a vblank has passed since disabling and
983
		 * that we can safely alter the registers in the deferred
853
		 * that we can safely alter the registers in the deferred
984
		 * callback.
854
		 * callback.
985
		 *
855
		 *
986
		 * In the scenario that we go from a valid to invalid
856
		 * In the scenario that we go from a valid to invalid
987
		 * and then back to valid FBC configuration we have
857
		 * and then back to valid FBC configuration we have
988
		 * no strict enforcement that a vblank occurred since
858
		 * no strict enforcement that a vblank occurred since
989
		 * disabling the FBC. However, along all current pipe
859
		 * disabling the FBC. However, along all current pipe
990
		 * disabling paths we do need to wait for a vblank at
860
		 * disabling paths we do need to wait for a vblank at
991
		 * some point. And we wait before enabling FBC anyway.
861
		 * some point. And we wait before enabling FBC anyway.
992
		 */
862
		 */
993
		DRM_DEBUG_KMS("disabling active FBC for update\n");
863
		DRM_DEBUG_KMS("deactivating FBC for update\n");
994
		__intel_fbc_disable(dev_priv);
864
		__intel_fbc_deactivate(dev_priv);
995
	}
865
	}
996
 
866
 
997
	intel_fbc_schedule_enable(intel_crtc);
867
	intel_fbc_schedule_activation(crtc);
998
	dev_priv->fbc.no_fbc_reason = FBC_OK;
868
	dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
999
	return;
869
	return;
1000
 
870
 
1001
out_disable:
871
out_disable:
1002
	/* Multiple disables should be harmless */
872
	/* Multiple disables should be harmless */
1003
	if (intel_fbc_enabled(dev_priv)) {
873
	if (intel_fbc_is_active(dev_priv)) {
1004
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
874
		DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
1005
		__intel_fbc_disable(dev_priv);
875
		__intel_fbc_deactivate(dev_priv);
1006
	}
876
	}
1007
	__intel_fbc_cleanup_cfb(dev_priv);
-
 
1008
}
877
}
1009
 
878
 
1010
/*
879
/*
1011
 * intel_fbc_update - enable/disable FBC as needed
880
 * intel_fbc_update - activate/deactivate FBC as needed
1012
 * @dev_priv: i915 device instance
881
 * @crtc: the CRTC that triggered the update
1013
 *
882
 *
1014
 * This function reevaluates the overall state and enables or disables FBC.
883
 * This function reevaluates the overall state and activates or deactivates FBC.
1015
 */
884
 */
1016
void intel_fbc_update(struct drm_i915_private *dev_priv)
885
void intel_fbc_update(struct intel_crtc *crtc)
1017
{
886
{
-
 
887
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
888
 
1018
	if (!fbc_supported(dev_priv))
889
	if (!fbc_supported(dev_priv))
1019
		return;
890
		return;
1020
 
891
 
1021
	mutex_lock(&dev_priv->fbc.lock);
892
	mutex_lock(&dev_priv->fbc.lock);
1022
	__intel_fbc_update(dev_priv);
893
	__intel_fbc_update(crtc);
1023
	mutex_unlock(&dev_priv->fbc.lock);
894
	mutex_unlock(&dev_priv->fbc.lock);
1024
}
895
}
1025
 
896
 
1026
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
897
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1027
			  unsigned int frontbuffer_bits,
898
			  unsigned int frontbuffer_bits,
1028
			  enum fb_op_origin origin)
899
			  enum fb_op_origin origin)
1029
{
900
{
1030
	unsigned int fbc_bits;
901
	unsigned int fbc_bits;
1031
 
902
 
1032
	if (!fbc_supported(dev_priv))
903
	if (!fbc_supported(dev_priv))
1033
		return;
904
		return;
1034
 
905
 
1035
	if (origin == ORIGIN_GTT)
906
	if (origin == ORIGIN_GTT)
1036
		return;
907
		return;
1037
 
908
 
1038
	mutex_lock(&dev_priv->fbc.lock);
909
	mutex_lock(&dev_priv->fbc.lock);
1039
 
910
 
1040
	if (dev_priv->fbc.enabled)
911
	if (dev_priv->fbc.enabled)
1041
		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
912
		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
1042
	else if (dev_priv->fbc.fbc_work)
-
 
1043
		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
-
 
1044
					dev_priv->fbc.fbc_work->crtc->pipe);
-
 
1045
	else
913
	else
1046
		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
914
		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
1047
 
915
 
1048
	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
916
	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
1049
 
917
 
1050
	if (dev_priv->fbc.busy_bits)
918
	if (dev_priv->fbc.busy_bits)
1051
		__intel_fbc_disable(dev_priv);
919
		__intel_fbc_deactivate(dev_priv);
1052
 
920
 
1053
	mutex_unlock(&dev_priv->fbc.lock);
921
	mutex_unlock(&dev_priv->fbc.lock);
1054
}
922
}
1055
 
923
 
1056
void intel_fbc_flush(struct drm_i915_private *dev_priv,
924
void intel_fbc_flush(struct drm_i915_private *dev_priv,
1057
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
925
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
1058
{
926
{
1059
	if (!fbc_supported(dev_priv))
927
	if (!fbc_supported(dev_priv))
1060
		return;
928
		return;
1061
 
929
 
1062
	if (origin == ORIGIN_GTT)
930
	if (origin == ORIGIN_GTT)
1063
		return;
931
		return;
1064
 
932
 
1065
	mutex_lock(&dev_priv->fbc.lock);
933
	mutex_lock(&dev_priv->fbc.lock);
1066
 
934
 
1067
	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
935
	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
-
 
936
 
-
 
937
	if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
-
 
938
		if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
-
 
939
			intel_fbc_recompress(dev_priv);
-
 
940
		} else {
-
 
941
			__intel_fbc_deactivate(dev_priv);
-
 
942
			__intel_fbc_update(dev_priv->fbc.crtc);
-
 
943
		}
-
 
944
	}
-
 
945
 
-
 
946
	mutex_unlock(&dev_priv->fbc.lock);
-
 
947
}
-
 
948
 
-
 
949
/**
-
 
950
 * intel_fbc_enable: tries to enable FBC on the CRTC
-
 
951
 * @crtc: the CRTC
-
 
952
 *
-
 
953
 * This function checks if it's possible to enable FBC on the following CRTC,
-
 
954
 * then enables it. Notice that it doesn't activate FBC.
-
 
955
 */
-
 
956
void intel_fbc_enable(struct intel_crtc *crtc)
-
 
957
{
-
 
958
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
959
 
-
 
960
	if (!fbc_supported(dev_priv))
-
 
961
		return;
-
 
962
 
-
 
963
	mutex_lock(&dev_priv->fbc.lock);
1068
 
964
 
-
 
965
	if (dev_priv->fbc.enabled) {
-
 
966
		WARN_ON(dev_priv->fbc.crtc == crtc);
-
 
967
		goto out;
-
 
968
	}
-
 
969
 
-
 
970
	WARN_ON(dev_priv->fbc.active);
-
 
971
	WARN_ON(dev_priv->fbc.crtc != NULL);
-
 
972
 
-
 
973
	if (intel_vgpu_active(dev_priv->dev)) {
-
 
974
		set_no_fbc_reason(dev_priv, "VGPU is active");
-
 
975
		goto out;
-
 
976
	}
-
 
977
 
-
 
978
	if (i915.enable_fbc < 0) {
-
 
979
		set_no_fbc_reason(dev_priv, "disabled per chip default");
-
 
980
		goto out;
-
 
981
	}
-
 
982
 
-
 
983
	if (!i915.enable_fbc) {
-
 
984
		set_no_fbc_reason(dev_priv, "disabled per module param");
-
 
985
		goto out;
-
 
986
	}
-
 
987
 
-
 
988
	if (!crtc_can_fbc(crtc)) {
-
 
989
		set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
-
 
990
		goto out;
-
 
991
	}
-
 
992
 
-
 
993
	if (intel_fbc_alloc_cfb(crtc)) {
-
 
994
		set_no_fbc_reason(dev_priv, "not enough stolen memory");
-
 
995
		goto out;
-
 
996
	}
-
 
997
 
-
 
998
	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-
 
999
	dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
-
 
1000
 
-
 
1001
	dev_priv->fbc.enabled = true;
-
 
1002
	dev_priv->fbc.crtc = crtc;
-
 
1003
out:
-
 
1004
	mutex_unlock(&dev_priv->fbc.lock);
-
 
1005
}
-
 
1006
 
-
 
1007
/**
-
 
1008
 * __intel_fbc_disable - disable FBC
-
 
1009
 * @dev_priv: i915 device instance
-
 
1010
 *
-
 
1011
 * This is the low level function that actually disables FBC. Callers should
-
 
1012
 * grab the FBC lock.
-
 
1013
 */
-
 
1014
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
-
 
1015
{
-
 
1016
	struct intel_crtc *crtc = dev_priv->fbc.crtc;
-
 
1017
 
-
 
1018
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-
 
1019
	WARN_ON(!dev_priv->fbc.enabled);
-
 
1020
	WARN_ON(dev_priv->fbc.active);
-
 
1021
	assert_pipe_disabled(dev_priv, crtc->pipe);
-
 
1022
 
-
 
1023
	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-
 
1024
 
-
 
1025
	__intel_fbc_cleanup_cfb(dev_priv);
-
 
1026
 
-
 
1027
	dev_priv->fbc.enabled = false;
-
 
1028
	dev_priv->fbc.crtc = NULL;
-
 
1029
}
-
 
1030
 
-
 
1031
/**
-
 
1032
 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
-
 
1033
 * @crtc: the CRTC
-
 
1034
 *
-
 
1035
 * This function disables FBC if it's associated with the provided CRTC.
-
 
1036
 */
-
 
1037
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
-
 
1038
{
-
 
1039
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
1040
 
-
 
1041
	if (!fbc_supported(dev_priv))
-
 
1042
		return;
-
 
1043
 
-
 
1044
	mutex_lock(&dev_priv->fbc.lock);
-
 
1045
	if (dev_priv->fbc.crtc == crtc) {
-
 
1046
		WARN_ON(!dev_priv->fbc.enabled);
1069
	if (!dev_priv->fbc.busy_bits) {
1047
		WARN_ON(dev_priv->fbc.active);
-
 
1048
		__intel_fbc_disable(dev_priv);
1070
		__intel_fbc_disable(dev_priv);
1049
	}
1071
		__intel_fbc_update(dev_priv);
1050
	mutex_unlock(&dev_priv->fbc.lock);
-
 
1051
	}
-
 
1052
 
-
 
1053
/**
-
 
1054
 * intel_fbc_disable - globally disable FBC
-
 
1055
 * @dev_priv: i915 device instance
-
 
1056
 *
-
 
1057
 * This function disables FBC regardless of which CRTC is associated with it.
-
 
1058
 */
-
 
1059
void intel_fbc_disable(struct drm_i915_private *dev_priv)
-
 
1060
{
-
 
1061
	if (!fbc_supported(dev_priv))
-
 
1062
		return;
-
 
1063
 
-
 
1064
	mutex_lock(&dev_priv->fbc.lock);
1072
	}
1065
	if (dev_priv->fbc.enabled)
1073
 
1066
		__intel_fbc_disable(dev_priv);
1074
	mutex_unlock(&dev_priv->fbc.lock);
1067
	mutex_unlock(&dev_priv->fbc.lock);
1075
}
1068
}
1076
 
1069
 
1077
/**
1070
/**
1078
 * intel_fbc_init - Initialize FBC
1071
 * intel_fbc_init - Initialize FBC
1079
 * @dev_priv: the i915 device
1072
 * @dev_priv: the i915 device
1080
 *
1073
 *
1081
 * This function might be called during PM init process.
1074
 * This function might be called during PM init process.
1082
 */
1075
 */
1083
void intel_fbc_init(struct drm_i915_private *dev_priv)
1076
void intel_fbc_init(struct drm_i915_private *dev_priv)
1084
{
1077
{
1085
	enum pipe pipe;
1078
	enum pipe pipe;
-
 
1079
 
1086
 
1080
	INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
-
 
1081
	mutex_init(&dev_priv->fbc.lock);
-
 
1082
	dev_priv->fbc.enabled = false;
-
 
1083
	dev_priv->fbc.active = false;
1087
	mutex_init(&dev_priv->fbc.lock);
1084
	dev_priv->fbc.work.scheduled = false;
1088
 
-
 
1089
	if (!HAS_FBC(dev_priv)) {
1085
 
1090
		dev_priv->fbc.enabled = false;
1086
	if (!HAS_FBC(dev_priv)) {
1091
		dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
1087
		dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
1092
		return;
1088
		return;
1093
	}
1089
	}
1094
 
1090
 
1095
	for_each_pipe(dev_priv, pipe) {
1091
	for_each_pipe(dev_priv, pipe) {
1096
		dev_priv->fbc.possible_framebuffer_bits |=
1092
		dev_priv->fbc.possible_framebuffer_bits |=
1097
				INTEL_FRONTBUFFER_PRIMARY(pipe);
1093
				INTEL_FRONTBUFFER_PRIMARY(pipe);
1098
 
1094
 
1099
		if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
1095
		if (fbc_on_pipe_a_only(dev_priv))
1100
			break;
1096
			break;
1101
	}
1097
	}
1102
 
1098
 
1103
	if (INTEL_INFO(dev_priv)->gen >= 7) {
1099
	if (INTEL_INFO(dev_priv)->gen >= 7) {
1104
		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
1100
		dev_priv->fbc.is_active = ilk_fbc_is_active;
1105
		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
1101
		dev_priv->fbc.activate = gen7_fbc_activate;
1106
		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
1102
		dev_priv->fbc.deactivate = ilk_fbc_deactivate;
1107
	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
1103
	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
1108
		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
1104
		dev_priv->fbc.is_active = ilk_fbc_is_active;
1109
		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
1105
		dev_priv->fbc.activate = ilk_fbc_activate;
1110
		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
1106
		dev_priv->fbc.deactivate = ilk_fbc_deactivate;
1111
	} else if (IS_GM45(dev_priv)) {
1107
	} else if (IS_GM45(dev_priv)) {
1112
		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
1108
		dev_priv->fbc.is_active = g4x_fbc_is_active;
1113
		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
1109
		dev_priv->fbc.activate = g4x_fbc_activate;
1114
		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
1110
		dev_priv->fbc.deactivate = g4x_fbc_deactivate;
1115
	} else {
1111
	} else {
1116
		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
1112
		dev_priv->fbc.is_active = i8xx_fbc_is_active;
1117
		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
1113
		dev_priv->fbc.activate = i8xx_fbc_activate;
1118
		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
1114
		dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
1119
 
1115
 
1120
		/* This value was pulled out of someone's hat */
1116
		/* This value was pulled out of someone's hat */
1121
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1117
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
1122
	}
1118
	}
-
 
1119
 
-
 
1120
	/* We still don't have any sort of hardware state readout for FBC, so
-
 
1121
	 * deactivate it in case the BIOS activated it to make sure software
-
 
1122
	 * matches the hardware state. */
1123
 
1123
	if (dev_priv->fbc.is_active(dev_priv))
1124
	dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
1124
		dev_priv->fbc.deactivate(dev_priv);
1125
}
1125
}