Subversion Repositories Kolibri OS

Rev

Rev 6937 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6937 Rev 7144
1
/*
1
/*
2
 * Copyright © 2006-2007 Intel Corporation
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *	Eric Anholt 
24
 *	Eric Anholt 
25
 */
25
 */
26
 
26
 
27
#include 
27
#include 
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
#include 
37
#include 
38
#include "i915_drv.h"
38
#include "i915_drv.h"
39
#include "i915_trace.h"
39
#include "i915_trace.h"
40
#include 
40
#include 
41
#include 
41
#include 
42
#include 
42
#include 
43
#include 
43
#include 
44
#include 
44
#include 
45
#include 
45
#include 
46
#include 
46
#include 
47
#include 
47
#include 
48
#include 
48
#include 
49
 
49
 
50
/* Primary plane formats for gen <= 3 */
50
/* Primary plane formats for gen <= 3 */
51
static const uint32_t i8xx_primary_formats[] = {
51
static const uint32_t i8xx_primary_formats[] = {
52
	DRM_FORMAT_C8,
52
	DRM_FORMAT_C8,
53
	DRM_FORMAT_RGB565,
53
	DRM_FORMAT_RGB565,
54
	DRM_FORMAT_XRGB1555,
54
	DRM_FORMAT_XRGB1555,
55
	DRM_FORMAT_XRGB8888,
55
	DRM_FORMAT_XRGB8888,
56
};
56
};
57
 
57
 
58
/* Primary plane formats for gen >= 4 */
58
/* Primary plane formats for gen >= 4 */
59
static const uint32_t i965_primary_formats[] = {
59
static const uint32_t i965_primary_formats[] = {
60
	DRM_FORMAT_C8,
60
	DRM_FORMAT_C8,
61
	DRM_FORMAT_RGB565,
61
	DRM_FORMAT_RGB565,
62
	DRM_FORMAT_XRGB8888,
62
	DRM_FORMAT_XRGB8888,
63
	DRM_FORMAT_XBGR8888,
63
	DRM_FORMAT_XBGR8888,
64
	DRM_FORMAT_XRGB2101010,
64
	DRM_FORMAT_XRGB2101010,
65
	DRM_FORMAT_XBGR2101010,
65
	DRM_FORMAT_XBGR2101010,
66
};
66
};
67
 
67
 
68
static const uint32_t skl_primary_formats[] = {
68
static const uint32_t skl_primary_formats[] = {
69
	DRM_FORMAT_C8,
69
	DRM_FORMAT_C8,
70
	DRM_FORMAT_RGB565,
70
	DRM_FORMAT_RGB565,
71
	DRM_FORMAT_XRGB8888,
71
	DRM_FORMAT_XRGB8888,
72
	DRM_FORMAT_XBGR8888,
72
	DRM_FORMAT_XBGR8888,
73
	DRM_FORMAT_ARGB8888,
73
	DRM_FORMAT_ARGB8888,
74
	DRM_FORMAT_ABGR8888,
74
	DRM_FORMAT_ABGR8888,
75
	DRM_FORMAT_XRGB2101010,
75
	DRM_FORMAT_XRGB2101010,
76
	DRM_FORMAT_XBGR2101010,
76
	DRM_FORMAT_XBGR2101010,
77
	DRM_FORMAT_YUYV,
77
	DRM_FORMAT_YUYV,
78
	DRM_FORMAT_YVYU,
78
	DRM_FORMAT_YVYU,
79
	DRM_FORMAT_UYVY,
79
	DRM_FORMAT_UYVY,
80
	DRM_FORMAT_VYUY,
80
	DRM_FORMAT_VYUY,
81
};
81
};
82
 
82
 
83
/* Cursor formats */
83
/* Cursor formats */
84
static const uint32_t intel_cursor_formats[] = {
84
static const uint32_t intel_cursor_formats[] = {
85
	DRM_FORMAT_ARGB8888,
85
	DRM_FORMAT_ARGB8888,
86
};
86
};
87
 
-
 
88
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
-
 
89
 
87
 
90
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
88
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
91
				struct intel_crtc_state *pipe_config);
89
				struct intel_crtc_state *pipe_config);
92
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
90
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
93
				   struct intel_crtc_state *pipe_config);
91
				   struct intel_crtc_state *pipe_config);
94
 
92
 
95
static int intel_framebuffer_init(struct drm_device *dev,
93
static int intel_framebuffer_init(struct drm_device *dev,
96
				  struct intel_framebuffer *ifb,
94
				  struct intel_framebuffer *ifb,
97
				  struct drm_mode_fb_cmd2 *mode_cmd,
95
				  struct drm_mode_fb_cmd2 *mode_cmd,
98
				  struct drm_i915_gem_object *obj);
96
				  struct drm_i915_gem_object *obj);
99
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
97
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
100
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
98
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
101
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
99
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
102
					 struct intel_link_m_n *m_n,
100
					 struct intel_link_m_n *m_n,
103
					 struct intel_link_m_n *m2_n2);
101
					 struct intel_link_m_n *m2_n2);
104
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
102
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
105
static void haswell_set_pipeconf(struct drm_crtc *crtc);
103
static void haswell_set_pipeconf(struct drm_crtc *crtc);
106
static void intel_set_pipe_csc(struct drm_crtc *crtc);
104
static void intel_set_pipe_csc(struct drm_crtc *crtc);
107
static void vlv_prepare_pll(struct intel_crtc *crtc,
105
static void vlv_prepare_pll(struct intel_crtc *crtc,
108
			    const struct intel_crtc_state *pipe_config);
106
			    const struct intel_crtc_state *pipe_config);
109
static void chv_prepare_pll(struct intel_crtc *crtc,
107
static void chv_prepare_pll(struct intel_crtc *crtc,
110
			    const struct intel_crtc_state *pipe_config);
108
			    const struct intel_crtc_state *pipe_config);
111
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
109
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
112
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
113
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
111
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
114
	struct intel_crtc_state *crtc_state);
112
	struct intel_crtc_state *crtc_state);
115
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
113
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
116
			   int num_connectors);
114
			   int num_connectors);
117
static void skylake_pfit_enable(struct intel_crtc *crtc);
115
static void skylake_pfit_enable(struct intel_crtc *crtc);
118
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
116
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
119
static void ironlake_pfit_enable(struct intel_crtc *crtc);
117
static void ironlake_pfit_enable(struct intel_crtc *crtc);
120
static void intel_modeset_setup_hw_state(struct drm_device *dev);
118
static void intel_modeset_setup_hw_state(struct drm_device *dev);
121
static void intel_pre_disable_primary(struct drm_crtc *crtc);
119
static void intel_pre_disable_primary(struct drm_crtc *crtc);
122
 
120
 
123
typedef struct {
121
typedef struct {
124
	int	min, max;
122
	int	min, max;
125
} intel_range_t;
123
} intel_range_t;
126
 
124
 
127
typedef struct {
125
typedef struct {
128
	int	dot_limit;
126
	int	dot_limit;
129
	int	p2_slow, p2_fast;
127
	int	p2_slow, p2_fast;
130
} intel_p2_t;
128
} intel_p2_t;
131
 
129
 
132
typedef struct intel_limit intel_limit_t;
130
typedef struct intel_limit intel_limit_t;
133
struct intel_limit {
131
struct intel_limit {
134
	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
132
	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
135
	intel_p2_t	    p2;
133
	intel_p2_t	    p2;
136
};
134
};
137
 
135
 
138
/* returns HPLL frequency in kHz */
136
/* returns HPLL frequency in kHz */
139
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
137
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
140
{
138
{
141
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
139
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
142
 
140
 
143
	/* Obtain SKU information */
141
	/* Obtain SKU information */
144
	mutex_lock(&dev_priv->sb_lock);
142
	mutex_lock(&dev_priv->sb_lock);
145
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
143
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
146
		CCK_FUSE_HPLL_FREQ_MASK;
144
		CCK_FUSE_HPLL_FREQ_MASK;
147
	mutex_unlock(&dev_priv->sb_lock);
145
	mutex_unlock(&dev_priv->sb_lock);
148
 
146
 
149
	return vco_freq[hpll_freq] * 1000;
147
	return vco_freq[hpll_freq] * 1000;
150
}
148
}
151
 
149
 
152
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
150
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153
				  const char *name, u32 reg)
151
				  const char *name, u32 reg)
154
{
152
{
155
	u32 val;
153
	u32 val;
156
	int divider;
154
	int divider;
157
 
155
 
158
	if (dev_priv->hpll_freq == 0)
156
	if (dev_priv->hpll_freq == 0)
159
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
157
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
160
 
158
 
161
	mutex_lock(&dev_priv->sb_lock);
159
	mutex_lock(&dev_priv->sb_lock);
162
	val = vlv_cck_read(dev_priv, reg);
160
	val = vlv_cck_read(dev_priv, reg);
163
	mutex_unlock(&dev_priv->sb_lock);
161
	mutex_unlock(&dev_priv->sb_lock);
164
 
162
 
165
	divider = val & CCK_FREQUENCY_VALUES;
163
	divider = val & CCK_FREQUENCY_VALUES;
166
 
164
 
167
	WARN((val & CCK_FREQUENCY_STATUS) !=
165
	WARN((val & CCK_FREQUENCY_STATUS) !=
168
	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
166
	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
169
	     "%s change in progress\n", name);
167
	     "%s change in progress\n", name);
170
 
168
 
171
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
169
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
172
}
170
}
173
 
171
 
174
int
172
int
175
intel_pch_rawclk(struct drm_device *dev)
173
intel_pch_rawclk(struct drm_device *dev)
176
{
174
{
177
	struct drm_i915_private *dev_priv = dev->dev_private;
175
	struct drm_i915_private *dev_priv = dev->dev_private;
178
 
176
 
179
	WARN_ON(!HAS_PCH_SPLIT(dev));
177
	WARN_ON(!HAS_PCH_SPLIT(dev));
180
 
178
 
181
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
179
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
182
}
180
}
183
 
181
 
184
/* hrawclock is 1/4 the FSB frequency */
182
/* hrawclock is 1/4 the FSB frequency */
185
int intel_hrawclk(struct drm_device *dev)
183
int intel_hrawclk(struct drm_device *dev)
186
{
184
{
187
	struct drm_i915_private *dev_priv = dev->dev_private;
185
	struct drm_i915_private *dev_priv = dev->dev_private;
188
	uint32_t clkcfg;
186
	uint32_t clkcfg;
189
 
187
 
190
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
188
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
191
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
189
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
192
		return 200;
190
		return 200;
193
 
191
 
194
	clkcfg = I915_READ(CLKCFG);
192
	clkcfg = I915_READ(CLKCFG);
195
	switch (clkcfg & CLKCFG_FSB_MASK) {
193
	switch (clkcfg & CLKCFG_FSB_MASK) {
196
	case CLKCFG_FSB_400:
194
	case CLKCFG_FSB_400:
197
		return 100;
195
		return 100;
198
	case CLKCFG_FSB_533:
196
	case CLKCFG_FSB_533:
199
		return 133;
197
		return 133;
200
	case CLKCFG_FSB_667:
198
	case CLKCFG_FSB_667:
201
		return 166;
199
		return 166;
202
	case CLKCFG_FSB_800:
200
	case CLKCFG_FSB_800:
203
		return 200;
201
		return 200;
204
	case CLKCFG_FSB_1067:
202
	case CLKCFG_FSB_1067:
205
		return 266;
203
		return 266;
206
	case CLKCFG_FSB_1333:
204
	case CLKCFG_FSB_1333:
207
		return 333;
205
		return 333;
208
	/* these two are just a guess; one of them might be right */
206
	/* these two are just a guess; one of them might be right */
209
	case CLKCFG_FSB_1600:
207
	case CLKCFG_FSB_1600:
210
	case CLKCFG_FSB_1600_ALT:
208
	case CLKCFG_FSB_1600_ALT:
211
		return 400;
209
		return 400;
212
	default:
210
	default:
213
		return 133;
211
		return 133;
214
	}
212
	}
215
}
213
}
216
 
214
 
217
static void intel_update_czclk(struct drm_i915_private *dev_priv)
215
static void intel_update_czclk(struct drm_i915_private *dev_priv)
218
{
216
{
219
	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
217
	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
220
		return;
218
		return;
221
 
219
 
222
	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
220
	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
223
						      CCK_CZ_CLOCK_CONTROL);
221
						      CCK_CZ_CLOCK_CONTROL);
224
 
222
 
225
	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
223
	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
226
}
224
}
227
 
225
 
228
static inline u32 /* units of 100MHz */
226
static inline u32 /* units of 100MHz */
229
intel_fdi_link_freq(struct drm_device *dev)
227
intel_fdi_link_freq(struct drm_device *dev)
230
{
228
{
231
	if (IS_GEN5(dev)) {
229
	if (IS_GEN5(dev)) {
232
		struct drm_i915_private *dev_priv = dev->dev_private;
230
		struct drm_i915_private *dev_priv = dev->dev_private;
233
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
231
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
234
	} else
232
	} else
235
		return 27;
233
		return 27;
236
}
234
}
237
 
235
 
238
static const intel_limit_t intel_limits_i8xx_dac = {
236
static const intel_limit_t intel_limits_i8xx_dac = {
239
	.dot = { .min = 25000, .max = 350000 },
237
	.dot = { .min = 25000, .max = 350000 },
240
	.vco = { .min = 908000, .max = 1512000 },
238
	.vco = { .min = 908000, .max = 1512000 },
241
	.n = { .min = 2, .max = 16 },
239
	.n = { .min = 2, .max = 16 },
242
	.m = { .min = 96, .max = 140 },
240
	.m = { .min = 96, .max = 140 },
243
	.m1 = { .min = 18, .max = 26 },
241
	.m1 = { .min = 18, .max = 26 },
244
	.m2 = { .min = 6, .max = 16 },
242
	.m2 = { .min = 6, .max = 16 },
245
	.p = { .min = 4, .max = 128 },
243
	.p = { .min = 4, .max = 128 },
246
	.p1 = { .min = 2, .max = 33 },
244
	.p1 = { .min = 2, .max = 33 },
247
	.p2 = { .dot_limit = 165000,
245
	.p2 = { .dot_limit = 165000,
248
		.p2_slow = 4, .p2_fast = 2 },
246
		.p2_slow = 4, .p2_fast = 2 },
249
};
247
};
250
 
248
 
251
static const intel_limit_t intel_limits_i8xx_dvo = {
249
static const intel_limit_t intel_limits_i8xx_dvo = {
252
	.dot = { .min = 25000, .max = 350000 },
250
	.dot = { .min = 25000, .max = 350000 },
253
	.vco = { .min = 908000, .max = 1512000 },
251
	.vco = { .min = 908000, .max = 1512000 },
254
	.n = { .min = 2, .max = 16 },
252
	.n = { .min = 2, .max = 16 },
255
	.m = { .min = 96, .max = 140 },
253
	.m = { .min = 96, .max = 140 },
256
	.m1 = { .min = 18, .max = 26 },
254
	.m1 = { .min = 18, .max = 26 },
257
	.m2 = { .min = 6, .max = 16 },
255
	.m2 = { .min = 6, .max = 16 },
258
	.p = { .min = 4, .max = 128 },
256
	.p = { .min = 4, .max = 128 },
259
	.p1 = { .min = 2, .max = 33 },
257
	.p1 = { .min = 2, .max = 33 },
260
	.p2 = { .dot_limit = 165000,
258
	.p2 = { .dot_limit = 165000,
261
		.p2_slow = 4, .p2_fast = 4 },
259
		.p2_slow = 4, .p2_fast = 4 },
262
};
260
};
263
 
261
 
264
static const intel_limit_t intel_limits_i8xx_lvds = {
262
static const intel_limit_t intel_limits_i8xx_lvds = {
265
	.dot = { .min = 25000, .max = 350000 },
263
	.dot = { .min = 25000, .max = 350000 },
266
	.vco = { .min = 908000, .max = 1512000 },
264
	.vco = { .min = 908000, .max = 1512000 },
267
	.n = { .min = 2, .max = 16 },
265
	.n = { .min = 2, .max = 16 },
268
	.m = { .min = 96, .max = 140 },
266
	.m = { .min = 96, .max = 140 },
269
	.m1 = { .min = 18, .max = 26 },
267
	.m1 = { .min = 18, .max = 26 },
270
	.m2 = { .min = 6, .max = 16 },
268
	.m2 = { .min = 6, .max = 16 },
271
	.p = { .min = 4, .max = 128 },
269
	.p = { .min = 4, .max = 128 },
272
	.p1 = { .min = 1, .max = 6 },
270
	.p1 = { .min = 1, .max = 6 },
273
	.p2 = { .dot_limit = 165000,
271
	.p2 = { .dot_limit = 165000,
274
		.p2_slow = 14, .p2_fast = 7 },
272
		.p2_slow = 14, .p2_fast = 7 },
275
};
273
};
276
 
274
 
277
static const intel_limit_t intel_limits_i9xx_sdvo = {
275
static const intel_limit_t intel_limits_i9xx_sdvo = {
278
	.dot = { .min = 20000, .max = 400000 },
276
	.dot = { .min = 20000, .max = 400000 },
279
	.vco = { .min = 1400000, .max = 2800000 },
277
	.vco = { .min = 1400000, .max = 2800000 },
280
	.n = { .min = 1, .max = 6 },
278
	.n = { .min = 1, .max = 6 },
281
	.m = { .min = 70, .max = 120 },
279
	.m = { .min = 70, .max = 120 },
282
	.m1 = { .min = 8, .max = 18 },
280
	.m1 = { .min = 8, .max = 18 },
283
	.m2 = { .min = 3, .max = 7 },
281
	.m2 = { .min = 3, .max = 7 },
284
	.p = { .min = 5, .max = 80 },
282
	.p = { .min = 5, .max = 80 },
285
	.p1 = { .min = 1, .max = 8 },
283
	.p1 = { .min = 1, .max = 8 },
286
	.p2 = { .dot_limit = 200000,
284
	.p2 = { .dot_limit = 200000,
287
		.p2_slow = 10, .p2_fast = 5 },
285
		.p2_slow = 10, .p2_fast = 5 },
288
};
286
};
289
 
287
 
290
static const intel_limit_t intel_limits_i9xx_lvds = {
288
static const intel_limit_t intel_limits_i9xx_lvds = {
291
	.dot = { .min = 20000, .max = 400000 },
289
	.dot = { .min = 20000, .max = 400000 },
292
	.vco = { .min = 1400000, .max = 2800000 },
290
	.vco = { .min = 1400000, .max = 2800000 },
293
	.n = { .min = 1, .max = 6 },
291
	.n = { .min = 1, .max = 6 },
294
	.m = { .min = 70, .max = 120 },
292
	.m = { .min = 70, .max = 120 },
295
	.m1 = { .min = 8, .max = 18 },
293
	.m1 = { .min = 8, .max = 18 },
296
	.m2 = { .min = 3, .max = 7 },
294
	.m2 = { .min = 3, .max = 7 },
297
	.p = { .min = 7, .max = 98 },
295
	.p = { .min = 7, .max = 98 },
298
	.p1 = { .min = 1, .max = 8 },
296
	.p1 = { .min = 1, .max = 8 },
299
	.p2 = { .dot_limit = 112000,
297
	.p2 = { .dot_limit = 112000,
300
		.p2_slow = 14, .p2_fast = 7 },
298
		.p2_slow = 14, .p2_fast = 7 },
301
};
299
};
302
 
300
 
303
 
301
 
304
static const intel_limit_t intel_limits_g4x_sdvo = {
302
static const intel_limit_t intel_limits_g4x_sdvo = {
305
	.dot = { .min = 25000, .max = 270000 },
303
	.dot = { .min = 25000, .max = 270000 },
306
	.vco = { .min = 1750000, .max = 3500000},
304
	.vco = { .min = 1750000, .max = 3500000},
307
	.n = { .min = 1, .max = 4 },
305
	.n = { .min = 1, .max = 4 },
308
	.m = { .min = 104, .max = 138 },
306
	.m = { .min = 104, .max = 138 },
309
	.m1 = { .min = 17, .max = 23 },
307
	.m1 = { .min = 17, .max = 23 },
310
	.m2 = { .min = 5, .max = 11 },
308
	.m2 = { .min = 5, .max = 11 },
311
	.p = { .min = 10, .max = 30 },
309
	.p = { .min = 10, .max = 30 },
312
	.p1 = { .min = 1, .max = 3},
310
	.p1 = { .min = 1, .max = 3},
313
	.p2 = { .dot_limit = 270000,
311
	.p2 = { .dot_limit = 270000,
314
		.p2_slow = 10,
312
		.p2_slow = 10,
315
		.p2_fast = 10
313
		.p2_fast = 10
316
	},
314
	},
317
};
315
};
318
 
316
 
319
static const intel_limit_t intel_limits_g4x_hdmi = {
317
static const intel_limit_t intel_limits_g4x_hdmi = {
320
	.dot = { .min = 22000, .max = 400000 },
318
	.dot = { .min = 22000, .max = 400000 },
321
	.vco = { .min = 1750000, .max = 3500000},
319
	.vco = { .min = 1750000, .max = 3500000},
322
	.n = { .min = 1, .max = 4 },
320
	.n = { .min = 1, .max = 4 },
323
	.m = { .min = 104, .max = 138 },
321
	.m = { .min = 104, .max = 138 },
324
	.m1 = { .min = 16, .max = 23 },
322
	.m1 = { .min = 16, .max = 23 },
325
	.m2 = { .min = 5, .max = 11 },
323
	.m2 = { .min = 5, .max = 11 },
326
	.p = { .min = 5, .max = 80 },
324
	.p = { .min = 5, .max = 80 },
327
	.p1 = { .min = 1, .max = 8},
325
	.p1 = { .min = 1, .max = 8},
328
	.p2 = { .dot_limit = 165000,
326
	.p2 = { .dot_limit = 165000,
329
		.p2_slow = 10, .p2_fast = 5 },
327
		.p2_slow = 10, .p2_fast = 5 },
330
};
328
};
331
 
329
 
332
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
330
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
333
	.dot = { .min = 20000, .max = 115000 },
331
	.dot = { .min = 20000, .max = 115000 },
334
	.vco = { .min = 1750000, .max = 3500000 },
332
	.vco = { .min = 1750000, .max = 3500000 },
335
	.n = { .min = 1, .max = 3 },
333
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 104, .max = 138 },
334
	.m = { .min = 104, .max = 138 },
337
	.m1 = { .min = 17, .max = 23 },
335
	.m1 = { .min = 17, .max = 23 },
338
	.m2 = { .min = 5, .max = 11 },
336
	.m2 = { .min = 5, .max = 11 },
339
	.p = { .min = 28, .max = 112 },
337
	.p = { .min = 28, .max = 112 },
340
	.p1 = { .min = 2, .max = 8 },
338
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 0,
339
	.p2 = { .dot_limit = 0,
342
		.p2_slow = 14, .p2_fast = 14
340
		.p2_slow = 14, .p2_fast = 14
343
	},
341
	},
344
};
342
};
345
 
343
 
346
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
344
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
347
	.dot = { .min = 80000, .max = 224000 },
345
	.dot = { .min = 80000, .max = 224000 },
348
	.vco = { .min = 1750000, .max = 3500000 },
346
	.vco = { .min = 1750000, .max = 3500000 },
349
	.n = { .min = 1, .max = 3 },
347
	.n = { .min = 1, .max = 3 },
350
	.m = { .min = 104, .max = 138 },
348
	.m = { .min = 104, .max = 138 },
351
	.m1 = { .min = 17, .max = 23 },
349
	.m1 = { .min = 17, .max = 23 },
352
	.m2 = { .min = 5, .max = 11 },
350
	.m2 = { .min = 5, .max = 11 },
353
	.p = { .min = 14, .max = 42 },
351
	.p = { .min = 14, .max = 42 },
354
	.p1 = { .min = 2, .max = 6 },
352
	.p1 = { .min = 2, .max = 6 },
355
	.p2 = { .dot_limit = 0,
353
	.p2 = { .dot_limit = 0,
356
		.p2_slow = 7, .p2_fast = 7
354
		.p2_slow = 7, .p2_fast = 7
357
	},
355
	},
358
};
356
};
359
 
357
 
360
static const intel_limit_t intel_limits_pineview_sdvo = {
358
static const intel_limit_t intel_limits_pineview_sdvo = {
361
	.dot = { .min = 20000, .max = 400000},
359
	.dot = { .min = 20000, .max = 400000},
362
	.vco = { .min = 1700000, .max = 3500000 },
360
	.vco = { .min = 1700000, .max = 3500000 },
363
	/* Pineview's Ncounter is a ring counter */
361
	/* Pineview's Ncounter is a ring counter */
364
	.n = { .min = 3, .max = 6 },
362
	.n = { .min = 3, .max = 6 },
365
	.m = { .min = 2, .max = 256 },
363
	.m = { .min = 2, .max = 256 },
366
	/* Pineview only has one combined m divider, which we treat as m2. */
364
	/* Pineview only has one combined m divider, which we treat as m2. */
367
	.m1 = { .min = 0, .max = 0 },
365
	.m1 = { .min = 0, .max = 0 },
368
	.m2 = { .min = 0, .max = 254 },
366
	.m2 = { .min = 0, .max = 254 },
369
	.p = { .min = 5, .max = 80 },
367
	.p = { .min = 5, .max = 80 },
370
	.p1 = { .min = 1, .max = 8 },
368
	.p1 = { .min = 1, .max = 8 },
371
	.p2 = { .dot_limit = 200000,
369
	.p2 = { .dot_limit = 200000,
372
		.p2_slow = 10, .p2_fast = 5 },
370
		.p2_slow = 10, .p2_fast = 5 },
373
};
371
};
374
 
372
 
375
static const intel_limit_t intel_limits_pineview_lvds = {
373
static const intel_limit_t intel_limits_pineview_lvds = {
376
	.dot = { .min = 20000, .max = 400000 },
374
	.dot = { .min = 20000, .max = 400000 },
377
	.vco = { .min = 1700000, .max = 3500000 },
375
	.vco = { .min = 1700000, .max = 3500000 },
378
	.n = { .min = 3, .max = 6 },
376
	.n = { .min = 3, .max = 6 },
379
	.m = { .min = 2, .max = 256 },
377
	.m = { .min = 2, .max = 256 },
380
	.m1 = { .min = 0, .max = 0 },
378
	.m1 = { .min = 0, .max = 0 },
381
	.m2 = { .min = 0, .max = 254 },
379
	.m2 = { .min = 0, .max = 254 },
382
	.p = { .min = 7, .max = 112 },
380
	.p = { .min = 7, .max = 112 },
383
	.p1 = { .min = 1, .max = 8 },
381
	.p1 = { .min = 1, .max = 8 },
384
	.p2 = { .dot_limit = 112000,
382
	.p2 = { .dot_limit = 112000,
385
		.p2_slow = 14, .p2_fast = 14 },
383
		.p2_slow = 14, .p2_fast = 14 },
386
};
384
};
387
 
385
 
388
/* Ironlake / Sandybridge
386
/* Ironlake / Sandybridge
389
 *
387
 *
390
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
388
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
391
 * the range value for them is (actual_value - 2).
389
 * the range value for them is (actual_value - 2).
392
 */
390
 */
393
static const intel_limit_t intel_limits_ironlake_dac = {
391
static const intel_limit_t intel_limits_ironlake_dac = {
394
	.dot = { .min = 25000, .max = 350000 },
392
	.dot = { .min = 25000, .max = 350000 },
395
	.vco = { .min = 1760000, .max = 3510000 },
393
	.vco = { .min = 1760000, .max = 3510000 },
396
	.n = { .min = 1, .max = 5 },
394
	.n = { .min = 1, .max = 5 },
397
	.m = { .min = 79, .max = 127 },
395
	.m = { .min = 79, .max = 127 },
398
	.m1 = { .min = 12, .max = 22 },
396
	.m1 = { .min = 12, .max = 22 },
399
	.m2 = { .min = 5, .max = 9 },
397
	.m2 = { .min = 5, .max = 9 },
400
	.p = { .min = 5, .max = 80 },
398
	.p = { .min = 5, .max = 80 },
401
	.p1 = { .min = 1, .max = 8 },
399
	.p1 = { .min = 1, .max = 8 },
402
	.p2 = { .dot_limit = 225000,
400
	.p2 = { .dot_limit = 225000,
403
		.p2_slow = 10, .p2_fast = 5 },
401
		.p2_slow = 10, .p2_fast = 5 },
404
};
402
};
405
 
403
 
406
static const intel_limit_t intel_limits_ironlake_single_lvds = {
404
static const intel_limit_t intel_limits_ironlake_single_lvds = {
407
	.dot = { .min = 25000, .max = 350000 },
405
	.dot = { .min = 25000, .max = 350000 },
408
	.vco = { .min = 1760000, .max = 3510000 },
406
	.vco = { .min = 1760000, .max = 3510000 },
409
	.n = { .min = 1, .max = 3 },
407
	.n = { .min = 1, .max = 3 },
410
	.m = { .min = 79, .max = 118 },
408
	.m = { .min = 79, .max = 118 },
411
	.m1 = { .min = 12, .max = 22 },
409
	.m1 = { .min = 12, .max = 22 },
412
	.m2 = { .min = 5, .max = 9 },
410
	.m2 = { .min = 5, .max = 9 },
413
	.p = { .min = 28, .max = 112 },
411
	.p = { .min = 28, .max = 112 },
414
	.p1 = { .min = 2, .max = 8 },
412
	.p1 = { .min = 2, .max = 8 },
415
	.p2 = { .dot_limit = 225000,
413
	.p2 = { .dot_limit = 225000,
416
		.p2_slow = 14, .p2_fast = 14 },
414
		.p2_slow = 14, .p2_fast = 14 },
417
};
415
};
418
 
416
 
419
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
417
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
420
	.dot = { .min = 25000, .max = 350000 },
418
	.dot = { .min = 25000, .max = 350000 },
421
	.vco = { .min = 1760000, .max = 3510000 },
419
	.vco = { .min = 1760000, .max = 3510000 },
422
	.n = { .min = 1, .max = 3 },
420
	.n = { .min = 1, .max = 3 },
423
	.m = { .min = 79, .max = 127 },
421
	.m = { .min = 79, .max = 127 },
424
	.m1 = { .min = 12, .max = 22 },
422
	.m1 = { .min = 12, .max = 22 },
425
	.m2 = { .min = 5, .max = 9 },
423
	.m2 = { .min = 5, .max = 9 },
426
	.p = { .min = 14, .max = 56 },
424
	.p = { .min = 14, .max = 56 },
427
	.p1 = { .min = 2, .max = 8 },
425
	.p1 = { .min = 2, .max = 8 },
428
	.p2 = { .dot_limit = 225000,
426
	.p2 = { .dot_limit = 225000,
429
		.p2_slow = 7, .p2_fast = 7 },
427
		.p2_slow = 7, .p2_fast = 7 },
430
};
428
};
431
 
429
 
432
/* LVDS 100mhz refclk limits. */
430
/* LVDS 100mhz refclk limits. */
433
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
431
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
434
	.dot = { .min = 25000, .max = 350000 },
432
	.dot = { .min = 25000, .max = 350000 },
435
	.vco = { .min = 1760000, .max = 3510000 },
433
	.vco = { .min = 1760000, .max = 3510000 },
436
	.n = { .min = 1, .max = 2 },
434
	.n = { .min = 1, .max = 2 },
437
	.m = { .min = 79, .max = 126 },
435
	.m = { .min = 79, .max = 126 },
438
	.m1 = { .min = 12, .max = 22 },
436
	.m1 = { .min = 12, .max = 22 },
439
	.m2 = { .min = 5, .max = 9 },
437
	.m2 = { .min = 5, .max = 9 },
440
	.p = { .min = 28, .max = 112 },
438
	.p = { .min = 28, .max = 112 },
441
	.p1 = { .min = 2, .max = 8 },
439
	.p1 = { .min = 2, .max = 8 },
442
	.p2 = { .dot_limit = 225000,
440
	.p2 = { .dot_limit = 225000,
443
		.p2_slow = 14, .p2_fast = 14 },
441
		.p2_slow = 14, .p2_fast = 14 },
444
};
442
};
445
 
443
 
446
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
444
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
447
	.dot = { .min = 25000, .max = 350000 },
445
	.dot = { .min = 25000, .max = 350000 },
448
	.vco = { .min = 1760000, .max = 3510000 },
446
	.vco = { .min = 1760000, .max = 3510000 },
449
	.n = { .min = 1, .max = 3 },
447
	.n = { .min = 1, .max = 3 },
450
	.m = { .min = 79, .max = 126 },
448
	.m = { .min = 79, .max = 126 },
451
	.m1 = { .min = 12, .max = 22 },
449
	.m1 = { .min = 12, .max = 22 },
452
	.m2 = { .min = 5, .max = 9 },
450
	.m2 = { .min = 5, .max = 9 },
453
	.p = { .min = 14, .max = 42 },
451
	.p = { .min = 14, .max = 42 },
454
	.p1 = { .min = 2, .max = 6 },
452
	.p1 = { .min = 2, .max = 6 },
455
	.p2 = { .dot_limit = 225000,
453
	.p2 = { .dot_limit = 225000,
456
		.p2_slow = 7, .p2_fast = 7 },
454
		.p2_slow = 7, .p2_fast = 7 },
457
};
455
};
458
 
456
 
459
static const intel_limit_t intel_limits_vlv = {
457
static const intel_limit_t intel_limits_vlv = {
460
	 /*
458
	 /*
461
	  * These are the data rate limits (measured in fast clocks)
459
	  * These are the data rate limits (measured in fast clocks)
462
	  * since those are the strictest limits we have. The fast
460
	  * since those are the strictest limits we have. The fast
463
	  * clock and actual rate limits are more relaxed, so checking
461
	  * clock and actual rate limits are more relaxed, so checking
464
	  * them would make no difference.
462
	  * them would make no difference.
465
	  */
463
	  */
466
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
464
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
467
	.vco = { .min = 4000000, .max = 6000000 },
465
	.vco = { .min = 4000000, .max = 6000000 },
468
	.n = { .min = 1, .max = 7 },
466
	.n = { .min = 1, .max = 7 },
469
	.m1 = { .min = 2, .max = 3 },
467
	.m1 = { .min = 2, .max = 3 },
470
	.m2 = { .min = 11, .max = 156 },
468
	.m2 = { .min = 11, .max = 156 },
471
	.p1 = { .min = 2, .max = 3 },
469
	.p1 = { .min = 2, .max = 3 },
472
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
470
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
473
};
471
};
474
 
472
 
475
static const intel_limit_t intel_limits_chv = {
473
static const intel_limit_t intel_limits_chv = {
476
	/*
474
	/*
477
	 * These are the data rate limits (measured in fast clocks)
475
	 * These are the data rate limits (measured in fast clocks)
478
	 * since those are the strictest limits we have.  The fast
476
	 * since those are the strictest limits we have.  The fast
479
	 * clock and actual rate limits are more relaxed, so checking
477
	 * clock and actual rate limits are more relaxed, so checking
480
	 * them would make no difference.
478
	 * them would make no difference.
481
	 */
479
	 */
482
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
480
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
483
	.vco = { .min = 4800000, .max = 6480000 },
481
	.vco = { .min = 4800000, .max = 6480000 },
484
	.n = { .min = 1, .max = 1 },
482
	.n = { .min = 1, .max = 1 },
485
	.m1 = { .min = 2, .max = 2 },
483
	.m1 = { .min = 2, .max = 2 },
486
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
484
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
487
	.p1 = { .min = 2, .max = 4 },
485
	.p1 = { .min = 2, .max = 4 },
488
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
486
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
489
};
487
};
490
 
488
 
491
static const intel_limit_t intel_limits_bxt = {
489
static const intel_limit_t intel_limits_bxt = {
492
	/* FIXME: find real dot limits */
490
	/* FIXME: find real dot limits */
493
	.dot = { .min = 0, .max = INT_MAX },
491
	.dot = { .min = 0, .max = INT_MAX },
494
	.vco = { .min = 4800000, .max = 6700000 },
492
	.vco = { .min = 4800000, .max = 6700000 },
495
	.n = { .min = 1, .max = 1 },
493
	.n = { .min = 1, .max = 1 },
496
	.m1 = { .min = 2, .max = 2 },
494
	.m1 = { .min = 2, .max = 2 },
497
	/* FIXME: find real m2 limits */
495
	/* FIXME: find real m2 limits */
498
	.m2 = { .min = 2 << 22, .max = 255 << 22 },
496
	.m2 = { .min = 2 << 22, .max = 255 << 22 },
499
	.p1 = { .min = 2, .max = 4 },
497
	.p1 = { .min = 2, .max = 4 },
500
	.p2 = { .p2_slow = 1, .p2_fast = 20 },
498
	.p2 = { .p2_slow = 1, .p2_fast = 20 },
501
};
499
};
502
 
500
 
503
static bool
501
static bool
504
needs_modeset(struct drm_crtc_state *state)
502
needs_modeset(struct drm_crtc_state *state)
505
{
503
{
506
	return drm_atomic_crtc_needs_modeset(state);
504
	return drm_atomic_crtc_needs_modeset(state);
507
}
505
}
508
 
506
 
509
/**
507
/**
510
 * Returns whether any output on the specified pipe is of the specified type
508
 * Returns whether any output on the specified pipe is of the specified type
511
 */
509
 */
512
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
510
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
513
{
511
{
514
	struct drm_device *dev = crtc->base.dev;
512
	struct drm_device *dev = crtc->base.dev;
515
	struct intel_encoder *encoder;
513
	struct intel_encoder *encoder;
516
 
514
 
517
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
515
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
518
		if (encoder->type == type)
516
		if (encoder->type == type)
519
			return true;
517
			return true;
520
 
518
 
521
	return false;
519
	return false;
522
}
520
}
523
 
521
 
524
/**
522
/**
525
 * Returns whether any output on the specified pipe will have the specified
523
 * Returns whether any output on the specified pipe will have the specified
526
 * type after a staged modeset is complete, i.e., the same as
524
 * type after a staged modeset is complete, i.e., the same as
527
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
525
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
528
 * encoder->crtc.
526
 * encoder->crtc.
529
 */
527
 */
530
static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
528
static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
531
				      int type)
529
				      int type)
532
{
530
{
533
	struct drm_atomic_state *state = crtc_state->base.state;
531
	struct drm_atomic_state *state = crtc_state->base.state;
534
	struct drm_connector *connector;
532
	struct drm_connector *connector;
535
	struct drm_connector_state *connector_state;
533
	struct drm_connector_state *connector_state;
536
	struct intel_encoder *encoder;
534
	struct intel_encoder *encoder;
537
	int i, num_connectors = 0;
535
	int i, num_connectors = 0;
538
 
536
 
539
	for_each_connector_in_state(state, connector, connector_state, i) {
537
	for_each_connector_in_state(state, connector, connector_state, i) {
540
		if (connector_state->crtc != crtc_state->base.crtc)
538
		if (connector_state->crtc != crtc_state->base.crtc)
541
			continue;
539
			continue;
542
 
540
 
543
		num_connectors++;
541
		num_connectors++;
544
 
542
 
545
		encoder = to_intel_encoder(connector_state->best_encoder);
543
		encoder = to_intel_encoder(connector_state->best_encoder);
546
		if (encoder->type == type)
544
		if (encoder->type == type)
547
			return true;
545
			return true;
548
	}
546
	}
549
 
547
 
550
	WARN_ON(num_connectors == 0);
548
	WARN_ON(num_connectors == 0);
551
 
549
 
552
	return false;
550
	return false;
553
}
551
}
554
 
552
 
555
static const intel_limit_t *
553
static const intel_limit_t *
556
intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
554
intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
557
{
555
{
558
	struct drm_device *dev = crtc_state->base.crtc->dev;
556
	struct drm_device *dev = crtc_state->base.crtc->dev;
559
	const intel_limit_t *limit;
557
	const intel_limit_t *limit;
560
 
558
 
561
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
559
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
562
		if (intel_is_dual_link_lvds(dev)) {
560
		if (intel_is_dual_link_lvds(dev)) {
563
			if (refclk == 100000)
561
			if (refclk == 100000)
564
				limit = &intel_limits_ironlake_dual_lvds_100m;
562
				limit = &intel_limits_ironlake_dual_lvds_100m;
565
			else
563
			else
566
				limit = &intel_limits_ironlake_dual_lvds;
564
				limit = &intel_limits_ironlake_dual_lvds;
567
		} else {
565
		} else {
568
			if (refclk == 100000)
566
			if (refclk == 100000)
569
				limit = &intel_limits_ironlake_single_lvds_100m;
567
				limit = &intel_limits_ironlake_single_lvds_100m;
570
			else
568
			else
571
				limit = &intel_limits_ironlake_single_lvds;
569
				limit = &intel_limits_ironlake_single_lvds;
572
		}
570
		}
573
	} else
571
	} else
574
		limit = &intel_limits_ironlake_dac;
572
		limit = &intel_limits_ironlake_dac;
575
 
573
 
576
	return limit;
574
	return limit;
577
}
575
}
578
 
576
 
579
static const intel_limit_t *
577
static const intel_limit_t *
580
intel_g4x_limit(struct intel_crtc_state *crtc_state)
578
intel_g4x_limit(struct intel_crtc_state *crtc_state)
581
{
579
{
582
	struct drm_device *dev = crtc_state->base.crtc->dev;
580
	struct drm_device *dev = crtc_state->base.crtc->dev;
583
	const intel_limit_t *limit;
581
	const intel_limit_t *limit;
584
 
582
 
585
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
583
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
586
		if (intel_is_dual_link_lvds(dev))
584
		if (intel_is_dual_link_lvds(dev))
587
			limit = &intel_limits_g4x_dual_channel_lvds;
585
			limit = &intel_limits_g4x_dual_channel_lvds;
588
		else
586
		else
589
			limit = &intel_limits_g4x_single_channel_lvds;
587
			limit = &intel_limits_g4x_single_channel_lvds;
590
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
588
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
591
		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
589
		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
592
		limit = &intel_limits_g4x_hdmi;
590
		limit = &intel_limits_g4x_hdmi;
593
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
591
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
594
		limit = &intel_limits_g4x_sdvo;
592
		limit = &intel_limits_g4x_sdvo;
595
	} else /* The option is for other outputs */
593
	} else /* The option is for other outputs */
596
		limit = &intel_limits_i9xx_sdvo;
594
		limit = &intel_limits_i9xx_sdvo;
597
 
595
 
598
	return limit;
596
	return limit;
599
}
597
}
600
 
598
 
601
static const intel_limit_t *
599
static const intel_limit_t *
602
intel_limit(struct intel_crtc_state *crtc_state, int refclk)
600
intel_limit(struct intel_crtc_state *crtc_state, int refclk)
603
{
601
{
604
	struct drm_device *dev = crtc_state->base.crtc->dev;
602
	struct drm_device *dev = crtc_state->base.crtc->dev;
605
	const intel_limit_t *limit;
603
	const intel_limit_t *limit;
606
 
604
 
607
	if (IS_BROXTON(dev))
605
	if (IS_BROXTON(dev))
608
		limit = &intel_limits_bxt;
606
		limit = &intel_limits_bxt;
609
	else if (HAS_PCH_SPLIT(dev))
607
	else if (HAS_PCH_SPLIT(dev))
610
		limit = intel_ironlake_limit(crtc_state, refclk);
608
		limit = intel_ironlake_limit(crtc_state, refclk);
611
	else if (IS_G4X(dev)) {
609
	else if (IS_G4X(dev)) {
612
		limit = intel_g4x_limit(crtc_state);
610
		limit = intel_g4x_limit(crtc_state);
613
	} else if (IS_PINEVIEW(dev)) {
611
	} else if (IS_PINEVIEW(dev)) {
614
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
612
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
615
			limit = &intel_limits_pineview_lvds;
613
			limit = &intel_limits_pineview_lvds;
616
		else
614
		else
617
			limit = &intel_limits_pineview_sdvo;
615
			limit = &intel_limits_pineview_sdvo;
618
	} else if (IS_CHERRYVIEW(dev)) {
616
	} else if (IS_CHERRYVIEW(dev)) {
619
		limit = &intel_limits_chv;
617
		limit = &intel_limits_chv;
620
	} else if (IS_VALLEYVIEW(dev)) {
618
	} else if (IS_VALLEYVIEW(dev)) {
621
		limit = &intel_limits_vlv;
619
		limit = &intel_limits_vlv;
622
	} else if (!IS_GEN2(dev)) {
620
	} else if (!IS_GEN2(dev)) {
623
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
621
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
624
			limit = &intel_limits_i9xx_lvds;
622
			limit = &intel_limits_i9xx_lvds;
625
		else
623
		else
626
			limit = &intel_limits_i9xx_sdvo;
624
			limit = &intel_limits_i9xx_sdvo;
627
	} else {
625
	} else {
628
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
626
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
629
			limit = &intel_limits_i8xx_lvds;
627
			limit = &intel_limits_i8xx_lvds;
630
		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
628
		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
631
			limit = &intel_limits_i8xx_dvo;
629
			limit = &intel_limits_i8xx_dvo;
632
		else
630
		else
633
			limit = &intel_limits_i8xx_dac;
631
			limit = &intel_limits_i8xx_dac;
634
	}
632
	}
635
	return limit;
633
	return limit;
636
}
634
}
637
 
635
 
638
/*
636
/*
639
 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
637
 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
640
 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
638
 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
641
 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
639
 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
642
 * The helpers' return value is the rate of the clock that is fed to the
640
 * The helpers' return value is the rate of the clock that is fed to the
643
 * display engine's pipe which can be the above fast dot clock rate or a
641
 * display engine's pipe which can be the above fast dot clock rate or a
644
 * divided-down version of it.
642
 * divided-down version of it.
645
 */
643
 */
646
/* m1 is reserved as 0 in Pineview, n is a ring counter */
644
/* m1 is reserved as 0 in Pineview, n is a ring counter */
647
static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
645
static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
648
{
646
{
649
	clock->m = clock->m2 + 2;
647
	clock->m = clock->m2 + 2;
650
	clock->p = clock->p1 * clock->p2;
648
	clock->p = clock->p1 * clock->p2;
651
	if (WARN_ON(clock->n == 0 || clock->p == 0))
649
	if (WARN_ON(clock->n == 0 || clock->p == 0))
652
		return 0;
650
		return 0;
653
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
651
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
654
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
652
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
655
 
653
 
656
	return clock->dot;
654
	return clock->dot;
657
}
655
}
658
 
656
 
659
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
657
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
660
{
658
{
661
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
659
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
662
}
660
}
663
 
661
 
664
static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
662
static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
665
{
663
{
666
	clock->m = i9xx_dpll_compute_m(clock);
664
	clock->m = i9xx_dpll_compute_m(clock);
667
	clock->p = clock->p1 * clock->p2;
665
	clock->p = clock->p1 * clock->p2;
668
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
666
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
669
		return 0;
667
		return 0;
670
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
668
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
671
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
669
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
672
 
670
 
673
	return clock->dot;
671
	return clock->dot;
674
}
672
}
675
 
673
 
676
static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
674
static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
677
{
675
{
678
	clock->m = clock->m1 * clock->m2;
676
	clock->m = clock->m1 * clock->m2;
679
	clock->p = clock->p1 * clock->p2;
677
	clock->p = clock->p1 * clock->p2;
680
	if (WARN_ON(clock->n == 0 || clock->p == 0))
678
	if (WARN_ON(clock->n == 0 || clock->p == 0))
681
		return 0;
679
		return 0;
682
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
680
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
683
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
681
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
684
 
682
 
685
	return clock->dot / 5;
683
	return clock->dot / 5;
686
}
684
}
687
 
685
 
688
int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
686
int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
689
{
687
{
690
	clock->m = clock->m1 * clock->m2;
688
	clock->m = clock->m1 * clock->m2;
691
	clock->p = clock->p1 * clock->p2;
689
	clock->p = clock->p1 * clock->p2;
692
	if (WARN_ON(clock->n == 0 || clock->p == 0))
690
	if (WARN_ON(clock->n == 0 || clock->p == 0))
693
		return 0;
691
		return 0;
694
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
692
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
695
			clock->n << 22);
693
			clock->n << 22);
696
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
694
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
697
 
695
 
698
	return clock->dot / 5;
696
	return clock->dot / 5;
699
}
697
}
700
 
698
 
701
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
699
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
702
/**
700
/**
703
 * Returns whether the given set of divisors are valid for a given refclk with
701
 * Returns whether the given set of divisors are valid for a given refclk with
704
 * the given connectors.
702
 * the given connectors.
705
 */
703
 */
706
 
704
 
707
static bool intel_PLL_is_valid(struct drm_device *dev,
705
static bool intel_PLL_is_valid(struct drm_device *dev,
708
			       const intel_limit_t *limit,
706
			       const intel_limit_t *limit,
709
			       const intel_clock_t *clock)
707
			       const intel_clock_t *clock)
710
{
708
{
711
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
709
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
712
		INTELPllInvalid("n out of range\n");
710
		INTELPllInvalid("n out of range\n");
713
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
711
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
714
		INTELPllInvalid("p1 out of range\n");
712
		INTELPllInvalid("p1 out of range\n");
715
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
713
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
716
		INTELPllInvalid("m2 out of range\n");
714
		INTELPllInvalid("m2 out of range\n");
717
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
715
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
718
		INTELPllInvalid("m1 out of range\n");
716
		INTELPllInvalid("m1 out of range\n");
719
 
717
 
720
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
718
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
721
	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
719
	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
722
		if (clock->m1 <= clock->m2)
720
		if (clock->m1 <= clock->m2)
723
			INTELPllInvalid("m1 <= m2\n");
721
			INTELPllInvalid("m1 <= m2\n");
724
 
722
 
725
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
723
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
726
		if (clock->p < limit->p.min || limit->p.max < clock->p)
724
		if (clock->p < limit->p.min || limit->p.max < clock->p)
727
			INTELPllInvalid("p out of range\n");
725
			INTELPllInvalid("p out of range\n");
728
		if (clock->m < limit->m.min || limit->m.max < clock->m)
726
		if (clock->m < limit->m.min || limit->m.max < clock->m)
729
			INTELPllInvalid("m out of range\n");
727
			INTELPllInvalid("m out of range\n");
730
	}
728
	}
731
 
729
 
732
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
730
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
733
		INTELPllInvalid("vco out of range\n");
731
		INTELPllInvalid("vco out of range\n");
734
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
732
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
735
	 * connector, etc., rather than just a single range.
733
	 * connector, etc., rather than just a single range.
736
	 */
734
	 */
737
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
735
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
738
		INTELPllInvalid("dot out of range\n");
736
		INTELPllInvalid("dot out of range\n");
739
 
737
 
740
	return true;
738
	return true;
741
}
739
}
742
 
740
 
743
static int
741
static int
744
i9xx_select_p2_div(const intel_limit_t *limit,
742
i9xx_select_p2_div(const intel_limit_t *limit,
745
		   const struct intel_crtc_state *crtc_state,
743
		   const struct intel_crtc_state *crtc_state,
746
		   int target)
744
		   int target)
747
{
745
{
748
	struct drm_device *dev = crtc_state->base.crtc->dev;
746
	struct drm_device *dev = crtc_state->base.crtc->dev;
749
 
747
 
750
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
748
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
751
		/*
749
		/*
752
		 * For LVDS just rely on its current settings for dual-channel.
750
		 * For LVDS just rely on its current settings for dual-channel.
753
		 * We haven't figured out how to reliably set up different
751
		 * We haven't figured out how to reliably set up different
754
		 * single/dual channel state, if we even can.
752
		 * single/dual channel state, if we even can.
755
		 */
753
		 */
756
		if (intel_is_dual_link_lvds(dev))
754
		if (intel_is_dual_link_lvds(dev))
757
			return limit->p2.p2_fast;
755
			return limit->p2.p2_fast;
758
		else
756
		else
759
			return limit->p2.p2_slow;
757
			return limit->p2.p2_slow;
760
	} else {
758
	} else {
761
		if (target < limit->p2.dot_limit)
759
		if (target < limit->p2.dot_limit)
762
			return limit->p2.p2_slow;
760
			return limit->p2.p2_slow;
763
		else
761
		else
764
			return limit->p2.p2_fast;
762
			return limit->p2.p2_fast;
765
	}
763
	}
766
}
764
}
767
 
765
 
768
static bool
766
static bool
769
i9xx_find_best_dpll(const intel_limit_t *limit,
767
i9xx_find_best_dpll(const intel_limit_t *limit,
770
		    struct intel_crtc_state *crtc_state,
768
		    struct intel_crtc_state *crtc_state,
771
		    int target, int refclk, intel_clock_t *match_clock,
769
		    int target, int refclk, intel_clock_t *match_clock,
772
		    intel_clock_t *best_clock)
770
		    intel_clock_t *best_clock)
773
{
771
{
774
	struct drm_device *dev = crtc_state->base.crtc->dev;
772
	struct drm_device *dev = crtc_state->base.crtc->dev;
775
	intel_clock_t clock;
773
	intel_clock_t clock;
776
	int err = target;
774
	int err = target;
777
 
775
 
778
	memset(best_clock, 0, sizeof(*best_clock));
776
	memset(best_clock, 0, sizeof(*best_clock));
779
 
777
 
780
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
778
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
781
 
779
 
782
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
780
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
783
	     clock.m1++) {
781
	     clock.m1++) {
784
		for (clock.m2 = limit->m2.min;
782
		for (clock.m2 = limit->m2.min;
785
		     clock.m2 <= limit->m2.max; clock.m2++) {
783
		     clock.m2 <= limit->m2.max; clock.m2++) {
786
			if (clock.m2 >= clock.m1)
784
			if (clock.m2 >= clock.m1)
787
				break;
785
				break;
788
			for (clock.n = limit->n.min;
786
			for (clock.n = limit->n.min;
789
			     clock.n <= limit->n.max; clock.n++) {
787
			     clock.n <= limit->n.max; clock.n++) {
790
				for (clock.p1 = limit->p1.min;
788
				for (clock.p1 = limit->p1.min;
791
					clock.p1 <= limit->p1.max; clock.p1++) {
789
					clock.p1 <= limit->p1.max; clock.p1++) {
792
					int this_err;
790
					int this_err;
793
 
791
 
794
					i9xx_calc_dpll_params(refclk, &clock);
792
					i9xx_calc_dpll_params(refclk, &clock);
795
					if (!intel_PLL_is_valid(dev, limit,
793
					if (!intel_PLL_is_valid(dev, limit,
796
								&clock))
794
								&clock))
797
						continue;
795
						continue;
798
					if (match_clock &&
796
					if (match_clock &&
799
					    clock.p != match_clock->p)
797
					    clock.p != match_clock->p)
800
						continue;
798
						continue;
801
 
799
 
802
					this_err = abs(clock.dot - target);
800
					this_err = abs(clock.dot - target);
803
					if (this_err < err) {
801
					if (this_err < err) {
804
						*best_clock = clock;
802
						*best_clock = clock;
805
						err = this_err;
803
						err = this_err;
806
					}
804
					}
807
				}
805
				}
808
			}
806
			}
809
		}
807
		}
810
	}
808
	}
811
 
809
 
812
	return (err != target);
810
	return (err != target);
813
}
811
}
814
 
812
 
815
static bool
813
static bool
816
pnv_find_best_dpll(const intel_limit_t *limit,
814
pnv_find_best_dpll(const intel_limit_t *limit,
817
		   struct intel_crtc_state *crtc_state,
815
		   struct intel_crtc_state *crtc_state,
818
		   int target, int refclk, intel_clock_t *match_clock,
816
		   int target, int refclk, intel_clock_t *match_clock,
819
		   intel_clock_t *best_clock)
817
		   intel_clock_t *best_clock)
820
{
818
{
821
	struct drm_device *dev = crtc_state->base.crtc->dev;
819
	struct drm_device *dev = crtc_state->base.crtc->dev;
822
	intel_clock_t clock;
820
	intel_clock_t clock;
823
	int err = target;
821
	int err = target;
824
 
822
 
825
	memset(best_clock, 0, sizeof(*best_clock));
823
	memset(best_clock, 0, sizeof(*best_clock));
826
 
824
 
827
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
825
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
828
 
826
 
829
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
827
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
830
	     clock.m1++) {
828
	     clock.m1++) {
831
		for (clock.m2 = limit->m2.min;
829
		for (clock.m2 = limit->m2.min;
832
		     clock.m2 <= limit->m2.max; clock.m2++) {
830
		     clock.m2 <= limit->m2.max; clock.m2++) {
833
			for (clock.n = limit->n.min;
831
			for (clock.n = limit->n.min;
834
			     clock.n <= limit->n.max; clock.n++) {
832
			     clock.n <= limit->n.max; clock.n++) {
835
				for (clock.p1 = limit->p1.min;
833
				for (clock.p1 = limit->p1.min;
836
					clock.p1 <= limit->p1.max; clock.p1++) {
834
					clock.p1 <= limit->p1.max; clock.p1++) {
837
					int this_err;
835
					int this_err;
838
 
836
 
839
					pnv_calc_dpll_params(refclk, &clock);
837
					pnv_calc_dpll_params(refclk, &clock);
840
					if (!intel_PLL_is_valid(dev, limit,
838
					if (!intel_PLL_is_valid(dev, limit,
841
								&clock))
839
								&clock))
842
						continue;
840
						continue;
843
					if (match_clock &&
841
					if (match_clock &&
844
					    clock.p != match_clock->p)
842
					    clock.p != match_clock->p)
845
						continue;
843
						continue;
846
 
844
 
847
					this_err = abs(clock.dot - target);
845
					this_err = abs(clock.dot - target);
848
					if (this_err < err) {
846
					if (this_err < err) {
849
						*best_clock = clock;
847
						*best_clock = clock;
850
						err = this_err;
848
						err = this_err;
851
					}
849
					}
852
				}
850
				}
853
			}
851
			}
854
		}
852
		}
855
	}
853
	}
856
 
854
 
857
	return (err != target);
855
	return (err != target);
858
}
856
}
859
 
857
 
860
static bool
858
static bool
861
g4x_find_best_dpll(const intel_limit_t *limit,
859
g4x_find_best_dpll(const intel_limit_t *limit,
862
		   struct intel_crtc_state *crtc_state,
860
		   struct intel_crtc_state *crtc_state,
863
		   int target, int refclk, intel_clock_t *match_clock,
861
		   int target, int refclk, intel_clock_t *match_clock,
864
		   intel_clock_t *best_clock)
862
		   intel_clock_t *best_clock)
865
{
863
{
866
	struct drm_device *dev = crtc_state->base.crtc->dev;
864
	struct drm_device *dev = crtc_state->base.crtc->dev;
867
	intel_clock_t clock;
865
	intel_clock_t clock;
868
	int max_n;
866
	int max_n;
869
	bool found = false;
867
	bool found = false;
870
	/* approximately equals target * 0.00585 */
868
	/* approximately equals target * 0.00585 */
871
	int err_most = (target >> 8) + (target >> 9);
869
	int err_most = (target >> 8) + (target >> 9);
872
 
870
 
873
	memset(best_clock, 0, sizeof(*best_clock));
871
	memset(best_clock, 0, sizeof(*best_clock));
874
 
872
 
875
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
873
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
876
 
874
 
877
	max_n = limit->n.max;
875
	max_n = limit->n.max;
878
	/* based on hardware requirement, prefer smaller n to precision */
876
	/* based on hardware requirement, prefer smaller n to precision */
879
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
877
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
880
		/* based on hardware requirement, prefere larger m1,m2 */
878
		/* based on hardware requirement, prefere larger m1,m2 */
881
		for (clock.m1 = limit->m1.max;
879
		for (clock.m1 = limit->m1.max;
882
		     clock.m1 >= limit->m1.min; clock.m1--) {
880
		     clock.m1 >= limit->m1.min; clock.m1--) {
883
			for (clock.m2 = limit->m2.max;
881
			for (clock.m2 = limit->m2.max;
884
			     clock.m2 >= limit->m2.min; clock.m2--) {
882
			     clock.m2 >= limit->m2.min; clock.m2--) {
885
				for (clock.p1 = limit->p1.max;
883
				for (clock.p1 = limit->p1.max;
886
				     clock.p1 >= limit->p1.min; clock.p1--) {
884
				     clock.p1 >= limit->p1.min; clock.p1--) {
887
					int this_err;
885
					int this_err;
888
 
886
 
889
					i9xx_calc_dpll_params(refclk, &clock);
887
					i9xx_calc_dpll_params(refclk, &clock);
890
					if (!intel_PLL_is_valid(dev, limit,
888
					if (!intel_PLL_is_valid(dev, limit,
891
								&clock))
889
								&clock))
892
						continue;
890
						continue;
893
 
891
 
894
					this_err = abs(clock.dot - target);
892
					this_err = abs(clock.dot - target);
895
					if (this_err < err_most) {
893
					if (this_err < err_most) {
896
						*best_clock = clock;
894
						*best_clock = clock;
897
						err_most = this_err;
895
						err_most = this_err;
898
						max_n = clock.n;
896
						max_n = clock.n;
899
						found = true;
897
						found = true;
900
					}
898
					}
901
				}
899
				}
902
			}
900
			}
903
		}
901
		}
904
	}
902
	}
905
	return found;
903
	return found;
906
}
904
}
907
 
905
 
908
/*
906
/*
909
 * Check if the calculated PLL configuration is more optimal compared to the
907
 * Check if the calculated PLL configuration is more optimal compared to the
910
 * best configuration and error found so far. Return the calculated error.
908
 * best configuration and error found so far. Return the calculated error.
911
 */
909
 */
912
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
910
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
913
			       const intel_clock_t *calculated_clock,
911
			       const intel_clock_t *calculated_clock,
914
			       const intel_clock_t *best_clock,
912
			       const intel_clock_t *best_clock,
915
			       unsigned int best_error_ppm,
913
			       unsigned int best_error_ppm,
916
			       unsigned int *error_ppm)
914
			       unsigned int *error_ppm)
917
{
915
{
918
	/*
916
	/*
919
	 * For CHV ignore the error and consider only the P value.
917
	 * For CHV ignore the error and consider only the P value.
920
	 * Prefer a bigger P value based on HW requirements.
918
	 * Prefer a bigger P value based on HW requirements.
921
	 */
919
	 */
922
	if (IS_CHERRYVIEW(dev)) {
920
	if (IS_CHERRYVIEW(dev)) {
923
		*error_ppm = 0;
921
		*error_ppm = 0;
924
 
922
 
925
		return calculated_clock->p > best_clock->p;
923
		return calculated_clock->p > best_clock->p;
926
	}
924
	}
927
 
925
 
928
	if (WARN_ON_ONCE(!target_freq))
926
	if (WARN_ON_ONCE(!target_freq))
929
		return false;
927
		return false;
930
 
928
 
931
	*error_ppm = div_u64(1000000ULL *
929
	*error_ppm = div_u64(1000000ULL *
932
				abs(target_freq - calculated_clock->dot),
930
				abs(target_freq - calculated_clock->dot),
933
			     target_freq);
931
			     target_freq);
934
	/*
932
	/*
935
	 * Prefer a better P value over a better (smaller) error if the error
933
	 * Prefer a better P value over a better (smaller) error if the error
936
	 * is small. Ensure this preference for future configurations too by
934
	 * is small. Ensure this preference for future configurations too by
937
	 * setting the error to 0.
935
	 * setting the error to 0.
938
	 */
936
	 */
939
	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
937
	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
940
		*error_ppm = 0;
938
		*error_ppm = 0;
941
 
939
 
942
		return true;
940
		return true;
943
	}
941
	}
944
 
942
 
945
	return *error_ppm + 10 < best_error_ppm;
943
	return *error_ppm + 10 < best_error_ppm;
946
}
944
}
947
 
945
 
948
static bool
946
static bool
949
vlv_find_best_dpll(const intel_limit_t *limit,
947
vlv_find_best_dpll(const intel_limit_t *limit,
950
		   struct intel_crtc_state *crtc_state,
948
		   struct intel_crtc_state *crtc_state,
951
		   int target, int refclk, intel_clock_t *match_clock,
949
		   int target, int refclk, intel_clock_t *match_clock,
952
		   intel_clock_t *best_clock)
950
		   intel_clock_t *best_clock)
953
{
951
{
954
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
952
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
955
	struct drm_device *dev = crtc->base.dev;
953
	struct drm_device *dev = crtc->base.dev;
956
	intel_clock_t clock;
954
	intel_clock_t clock;
957
	unsigned int bestppm = 1000000;
955
	unsigned int bestppm = 1000000;
958
	/* min update 19.2 MHz */
956
	/* min update 19.2 MHz */
959
	int max_n = min(limit->n.max, refclk / 19200);
957
	int max_n = min(limit->n.max, refclk / 19200);
960
	bool found = false;
958
	bool found = false;
961
 
959
 
962
	target *= 5; /* fast clock */
960
	target *= 5; /* fast clock */
963
 
961
 
964
	memset(best_clock, 0, sizeof(*best_clock));
962
	memset(best_clock, 0, sizeof(*best_clock));
965
 
963
 
966
	/* based on hardware requirement, prefer smaller n to precision */
964
	/* based on hardware requirement, prefer smaller n to precision */
967
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
965
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
968
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
966
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
969
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
967
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
970
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
968
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
971
				clock.p = clock.p1 * clock.p2;
969
				clock.p = clock.p1 * clock.p2;
972
				/* based on hardware requirement, prefer bigger m1,m2 values */
970
				/* based on hardware requirement, prefer bigger m1,m2 values */
973
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
971
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
974
					unsigned int ppm;
972
					unsigned int ppm;
975
 
973
 
976
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
974
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
977
								     refclk * clock.m1);
975
								     refclk * clock.m1);
978
 
976
 
979
					vlv_calc_dpll_params(refclk, &clock);
977
					vlv_calc_dpll_params(refclk, &clock);
980
 
978
 
981
					if (!intel_PLL_is_valid(dev, limit,
979
					if (!intel_PLL_is_valid(dev, limit,
982
								&clock))
980
								&clock))
983
						continue;
981
						continue;
984
 
982
 
985
					if (!vlv_PLL_is_optimal(dev, target,
983
					if (!vlv_PLL_is_optimal(dev, target,
986
								&clock,
984
								&clock,
987
								best_clock,
985
								best_clock,
988
								bestppm, &ppm))
986
								bestppm, &ppm))
989
						continue;
987
						continue;
990
 
988
 
991
					*best_clock = clock;
989
					*best_clock = clock;
992
					bestppm = ppm;
990
					bestppm = ppm;
993
					found = true;
991
					found = true;
994
				}
992
				}
995
			}
993
			}
996
		}
994
		}
997
	}
995
	}
998
 
996
 
999
	return found;
997
	return found;
1000
}
998
}
1001
 
999
 
1002
static bool
1000
static bool
1003
chv_find_best_dpll(const intel_limit_t *limit,
1001
chv_find_best_dpll(const intel_limit_t *limit,
1004
		   struct intel_crtc_state *crtc_state,
1002
		   struct intel_crtc_state *crtc_state,
1005
		   int target, int refclk, intel_clock_t *match_clock,
1003
		   int target, int refclk, intel_clock_t *match_clock,
1006
		   intel_clock_t *best_clock)
1004
		   intel_clock_t *best_clock)
1007
{
1005
{
1008
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1006
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1009
	struct drm_device *dev = crtc->base.dev;
1007
	struct drm_device *dev = crtc->base.dev;
1010
	unsigned int best_error_ppm;
1008
	unsigned int best_error_ppm;
1011
	intel_clock_t clock;
1009
	intel_clock_t clock;
1012
	uint64_t m2;
1010
	uint64_t m2;
1013
	int found = false;
1011
	int found = false;
1014
 
1012
 
1015
	memset(best_clock, 0, sizeof(*best_clock));
1013
	memset(best_clock, 0, sizeof(*best_clock));
1016
	best_error_ppm = 1000000;
1014
	best_error_ppm = 1000000;
1017
 
1015
 
1018
	/*
1016
	/*
1019
	 * Based on hardware doc, the n always set to 1, and m1 always
1017
	 * Based on hardware doc, the n always set to 1, and m1 always
1020
	 * set to 2.  If requires to support 200Mhz refclk, we need to
1018
	 * set to 2.  If requires to support 200Mhz refclk, we need to
1021
	 * revisit this because n may not 1 anymore.
1019
	 * revisit this because n may not 1 anymore.
1022
	 */
1020
	 */
1023
	clock.n = 1, clock.m1 = 2;
1021
	clock.n = 1, clock.m1 = 2;
1024
	target *= 5;	/* fast clock */
1022
	target *= 5;	/* fast clock */
1025
 
1023
 
1026
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1024
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1027
		for (clock.p2 = limit->p2.p2_fast;
1025
		for (clock.p2 = limit->p2.p2_fast;
1028
				clock.p2 >= limit->p2.p2_slow;
1026
				clock.p2 >= limit->p2.p2_slow;
1029
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1027
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1030
			unsigned int error_ppm;
1028
			unsigned int error_ppm;
1031
 
1029
 
1032
			clock.p = clock.p1 * clock.p2;
1030
			clock.p = clock.p1 * clock.p2;
1033
 
1031
 
1034
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1032
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1035
					clock.n) << 22, refclk * clock.m1);
1033
					clock.n) << 22, refclk * clock.m1);
1036
 
1034
 
1037
			if (m2 > INT_MAX/clock.m1)
1035
			if (m2 > INT_MAX/clock.m1)
1038
				continue;
1036
				continue;
1039
 
1037
 
1040
			clock.m2 = m2;
1038
			clock.m2 = m2;
1041
 
1039
 
1042
			chv_calc_dpll_params(refclk, &clock);
1040
			chv_calc_dpll_params(refclk, &clock);
1043
 
1041
 
1044
			if (!intel_PLL_is_valid(dev, limit, &clock))
1042
			if (!intel_PLL_is_valid(dev, limit, &clock))
1045
				continue;
1043
				continue;
1046
 
1044
 
1047
			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1045
			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1048
						best_error_ppm, &error_ppm))
1046
						best_error_ppm, &error_ppm))
1049
				continue;
1047
				continue;
1050
 
1048
 
1051
			*best_clock = clock;
1049
			*best_clock = clock;
1052
			best_error_ppm = error_ppm;
1050
			best_error_ppm = error_ppm;
1053
			found = true;
1051
			found = true;
1054
		}
1052
		}
1055
	}
1053
	}
1056
 
1054
 
1057
	return found;
1055
	return found;
1058
}
1056
}
1059
 
1057
 
1060
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1058
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1061
			intel_clock_t *best_clock)
1059
			intel_clock_t *best_clock)
1062
{
1060
{
1063
	int refclk = i9xx_get_refclk(crtc_state, 0);
1061
	int refclk = i9xx_get_refclk(crtc_state, 0);
1064
 
1062
 
1065
	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1063
	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1066
				  target_clock, refclk, NULL, best_clock);
1064
				  target_clock, refclk, NULL, best_clock);
1067
}
1065
}
1068
 
1066
 
1069
bool intel_crtc_active(struct drm_crtc *crtc)
1067
bool intel_crtc_active(struct drm_crtc *crtc)
1070
{
1068
{
1071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1069
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1072
 
1070
 
1073
	/* Be paranoid as we can arrive here with only partial
1071
	/* Be paranoid as we can arrive here with only partial
1074
	 * state retrieved from the hardware during setup.
1072
	 * state retrieved from the hardware during setup.
1075
	 *
1073
	 *
1076
	 * We can ditch the adjusted_mode.crtc_clock check as soon
1074
	 * We can ditch the adjusted_mode.crtc_clock check as soon
1077
	 * as Haswell has gained clock readout/fastboot support.
1075
	 * as Haswell has gained clock readout/fastboot support.
1078
	 *
1076
	 *
1079
	 * We can ditch the crtc->primary->fb check as soon as we can
1077
	 * We can ditch the crtc->primary->fb check as soon as we can
1080
	 * properly reconstruct framebuffers.
1078
	 * properly reconstruct framebuffers.
1081
	 *
1079
	 *
1082
	 * FIXME: The intel_crtc->active here should be switched to
1080
	 * FIXME: The intel_crtc->active here should be switched to
1083
	 * crtc->state->active once we have proper CRTC states wired up
1081
	 * crtc->state->active once we have proper CRTC states wired up
1084
	 * for atomic.
1082
	 * for atomic.
1085
	 */
1083
	 */
1086
	return intel_crtc->active && crtc->primary->state->fb &&
1084
	return intel_crtc->active && crtc->primary->state->fb &&
1087
		intel_crtc->config->base.adjusted_mode.crtc_clock;
1085
		intel_crtc->config->base.adjusted_mode.crtc_clock;
1088
}
1086
}
1089
 
1087
 
1090
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1088
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1091
					     enum pipe pipe)
1089
					     enum pipe pipe)
1092
{
1090
{
1093
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1091
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1094
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1092
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1095
 
1093
 
1096
	return intel_crtc->config->cpu_transcoder;
1094
	return intel_crtc->config->cpu_transcoder;
1097
}
1095
}
1098
 
1096
 
1099
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1097
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1100
{
1098
{
1101
	struct drm_i915_private *dev_priv = dev->dev_private;
1099
	struct drm_i915_private *dev_priv = dev->dev_private;
1102
	i915_reg_t reg = PIPEDSL(pipe);
1100
	i915_reg_t reg = PIPEDSL(pipe);
1103
	u32 line1, line2;
1101
	u32 line1, line2;
1104
	u32 line_mask;
1102
	u32 line_mask;
1105
 
1103
 
1106
	if (IS_GEN2(dev))
1104
	if (IS_GEN2(dev))
1107
		line_mask = DSL_LINEMASK_GEN2;
1105
		line_mask = DSL_LINEMASK_GEN2;
1108
	else
1106
	else
1109
		line_mask = DSL_LINEMASK_GEN3;
1107
		line_mask = DSL_LINEMASK_GEN3;
1110
 
1108
 
1111
	line1 = I915_READ(reg) & line_mask;
1109
	line1 = I915_READ(reg) & line_mask;
1112
	msleep(5);
1110
	msleep(5);
1113
	line2 = I915_READ(reg) & line_mask;
1111
	line2 = I915_READ(reg) & line_mask;
1114
 
1112
 
1115
	return line1 == line2;
1113
	return line1 == line2;
1116
}
1114
}
1117
 
1115
 
1118
/*
1116
/*
1119
 * intel_wait_for_pipe_off - wait for pipe to turn off
1117
 * intel_wait_for_pipe_off - wait for pipe to turn off
1120
 * @crtc: crtc whose pipe to wait for
1118
 * @crtc: crtc whose pipe to wait for
1121
 *
1119
 *
1122
 * After disabling a pipe, we can't wait for vblank in the usual way,
1120
 * After disabling a pipe, we can't wait for vblank in the usual way,
1123
 * spinning on the vblank interrupt status bit, since we won't actually
1121
 * spinning on the vblank interrupt status bit, since we won't actually
1124
 * see an interrupt when the pipe is disabled.
1122
 * see an interrupt when the pipe is disabled.
1125
 *
1123
 *
1126
 * On Gen4 and above:
1124
 * On Gen4 and above:
1127
 *   wait for the pipe register state bit to turn off
1125
 *   wait for the pipe register state bit to turn off
1128
 *
1126
 *
1129
 * Otherwise:
1127
 * Otherwise:
1130
 *   wait for the display line value to settle (it usually
1128
 *   wait for the display line value to settle (it usually
1131
 *   ends up stopping at the start of the next frame).
1129
 *   ends up stopping at the start of the next frame).
1132
 *
1130
 *
1133
 */
1131
 */
1134
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1132
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1135
{
1133
{
1136
	struct drm_device *dev = crtc->base.dev;
1134
	struct drm_device *dev = crtc->base.dev;
1137
	struct drm_i915_private *dev_priv = dev->dev_private;
1135
	struct drm_i915_private *dev_priv = dev->dev_private;
1138
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1136
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1139
	enum pipe pipe = crtc->pipe;
1137
	enum pipe pipe = crtc->pipe;
1140
 
1138
 
1141
	if (INTEL_INFO(dev)->gen >= 4) {
1139
	if (INTEL_INFO(dev)->gen >= 4) {
1142
		i915_reg_t reg = PIPECONF(cpu_transcoder);
1140
		i915_reg_t reg = PIPECONF(cpu_transcoder);
1143
 
1141
 
1144
		/* Wait for the Pipe State to go off */
1142
		/* Wait for the Pipe State to go off */
1145
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1143
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1146
			     100))
1144
			     100))
1147
			WARN(1, "pipe_off wait timed out\n");
1145
			WARN(1, "pipe_off wait timed out\n");
1148
	} else {
1146
	} else {
1149
		/* Wait for the display line to settle */
1147
		/* Wait for the display line to settle */
1150
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1148
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1151
			WARN(1, "pipe_off wait timed out\n");
1149
			WARN(1, "pipe_off wait timed out\n");
1152
	}
1150
	}
1153
}
1151
}
1154
 
-
 
1155
static const char *state_string(bool enabled)
-
 
1156
{
-
 
1157
	return enabled ? "on" : "off";
-
 
1158
}
-
 
1159
 
1152
 
1160
/* Only for pre-ILK configs */
1153
/* Only for pre-ILK configs */
1161
void assert_pll(struct drm_i915_private *dev_priv,
1154
void assert_pll(struct drm_i915_private *dev_priv,
1162
		enum pipe pipe, bool state)
1155
		enum pipe pipe, bool state)
1163
{
1156
{
1164
	u32 val;
1157
	u32 val;
1165
	bool cur_state;
1158
	bool cur_state;
1166
 
1159
 
1167
	val = I915_READ(DPLL(pipe));
1160
	val = I915_READ(DPLL(pipe));
1168
	cur_state = !!(val & DPLL_VCO_ENABLE);
1161
	cur_state = !!(val & DPLL_VCO_ENABLE);
1169
	I915_STATE_WARN(cur_state != state,
1162
	I915_STATE_WARN(cur_state != state,
1170
	     "PLL state assertion failure (expected %s, current %s)\n",
1163
	     "PLL state assertion failure (expected %s, current %s)\n",
1171
	     state_string(state), state_string(cur_state));
1164
			onoff(state), onoff(cur_state));
1172
}
1165
}
1173
 
1166
 
1174
/* XXX: the dsi pll is shared between MIPI DSI ports */
1167
/* XXX: the dsi pll is shared between MIPI DSI ports */
1175
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1168
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1176
{
1169
{
1177
	u32 val;
1170
	u32 val;
1178
	bool cur_state;
1171
	bool cur_state;
1179
 
1172
 
1180
	mutex_lock(&dev_priv->sb_lock);
1173
	mutex_lock(&dev_priv->sb_lock);
1181
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1174
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1182
	mutex_unlock(&dev_priv->sb_lock);
1175
	mutex_unlock(&dev_priv->sb_lock);
1183
 
1176
 
1184
	cur_state = val & DSI_PLL_VCO_EN;
1177
	cur_state = val & DSI_PLL_VCO_EN;
1185
	I915_STATE_WARN(cur_state != state,
1178
	I915_STATE_WARN(cur_state != state,
1186
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1179
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1187
	     state_string(state), state_string(cur_state));
1180
			onoff(state), onoff(cur_state));
1188
}
1181
}
1189
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1182
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1190
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1183
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1191
 
1184
 
1192
struct intel_shared_dpll *
1185
struct intel_shared_dpll *
1193
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1186
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1194
{
1187
{
1195
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1188
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1196
 
1189
 
1197
	if (crtc->config->shared_dpll < 0)
1190
	if (crtc->config->shared_dpll < 0)
1198
		return NULL;
1191
		return NULL;
1199
 
1192
 
1200
	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1193
	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1201
}
1194
}
1202
 
1195
 
1203
/* For ILK+ */
1196
/* For ILK+ */
1204
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1197
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1205
			struct intel_shared_dpll *pll,
1198
			struct intel_shared_dpll *pll,
1206
			bool state)
1199
			bool state)
1207
{
1200
{
1208
	bool cur_state;
1201
	bool cur_state;
1209
	struct intel_dpll_hw_state hw_state;
1202
	struct intel_dpll_hw_state hw_state;
1210
 
-
 
1211
	if (WARN (!pll,
1203
 
1212
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
1204
	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
1213
		return;
1205
		return;
1214
 
1206
 
1215
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1207
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1216
	I915_STATE_WARN(cur_state != state,
1208
	I915_STATE_WARN(cur_state != state,
1217
	     "%s assertion failure (expected %s, current %s)\n",
1209
	     "%s assertion failure (expected %s, current %s)\n",
1218
	     pll->name, state_string(state), state_string(cur_state));
1210
			pll->name, onoff(state), onoff(cur_state));
1219
}
1211
}
1220
 
1212
 
1221
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1213
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1222
			  enum pipe pipe, bool state)
1214
			  enum pipe pipe, bool state)
1223
{
1215
{
1224
	bool cur_state;
1216
	bool cur_state;
1225
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1217
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1226
								      pipe);
1218
								      pipe);
1227
 
1219
 
1228
	if (HAS_DDI(dev_priv->dev)) {
1220
	if (HAS_DDI(dev_priv->dev)) {
1229
		/* DDI does not have a specific FDI_TX register */
1221
		/* DDI does not have a specific FDI_TX register */
1230
		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1222
		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1231
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1223
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1232
	} else {
1224
	} else {
1233
		u32 val = I915_READ(FDI_TX_CTL(pipe));
1225
		u32 val = I915_READ(FDI_TX_CTL(pipe));
1234
		cur_state = !!(val & FDI_TX_ENABLE);
1226
		cur_state = !!(val & FDI_TX_ENABLE);
1235
	}
1227
	}
1236
	I915_STATE_WARN(cur_state != state,
1228
	I915_STATE_WARN(cur_state != state,
1237
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1229
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1238
	     state_string(state), state_string(cur_state));
1230
			onoff(state), onoff(cur_state));
1239
}
1231
}
1240
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1232
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1241
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1233
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1242
 
1234
 
1243
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1235
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1244
			  enum pipe pipe, bool state)
1236
			  enum pipe pipe, bool state)
1245
{
1237
{
1246
	u32 val;
1238
	u32 val;
1247
	bool cur_state;
1239
	bool cur_state;
1248
 
1240
 
1249
	val = I915_READ(FDI_RX_CTL(pipe));
1241
	val = I915_READ(FDI_RX_CTL(pipe));
1250
	cur_state = !!(val & FDI_RX_ENABLE);
1242
	cur_state = !!(val & FDI_RX_ENABLE);
1251
	I915_STATE_WARN(cur_state != state,
1243
	I915_STATE_WARN(cur_state != state,
1252
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1244
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1253
	     state_string(state), state_string(cur_state));
1245
			onoff(state), onoff(cur_state));
1254
}
1246
}
1255
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1247
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1256
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1248
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1257
 
1249
 
1258
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1250
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1259
				      enum pipe pipe)
1251
				      enum pipe pipe)
1260
{
1252
{
1261
	u32 val;
1253
	u32 val;
1262
 
1254
 
1263
	/* ILK FDI PLL is always enabled */
1255
	/* ILK FDI PLL is always enabled */
1264
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1256
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1265
		return;
1257
		return;
1266
 
1258
 
1267
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1259
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1268
	if (HAS_DDI(dev_priv->dev))
1260
	if (HAS_DDI(dev_priv->dev))
1269
		return;
1261
		return;
1270
 
1262
 
1271
	val = I915_READ(FDI_TX_CTL(pipe));
1263
	val = I915_READ(FDI_TX_CTL(pipe));
1272
	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1264
	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1273
}
1265
}
1274
 
1266
 
1275
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1267
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1276
		       enum pipe pipe, bool state)
1268
		       enum pipe pipe, bool state)
1277
{
1269
{
1278
	u32 val;
1270
	u32 val;
1279
	bool cur_state;
1271
	bool cur_state;
1280
 
1272
 
1281
	val = I915_READ(FDI_RX_CTL(pipe));
1273
	val = I915_READ(FDI_RX_CTL(pipe));
1282
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1274
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1283
	I915_STATE_WARN(cur_state != state,
1275
	I915_STATE_WARN(cur_state != state,
1284
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1276
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1285
	     state_string(state), state_string(cur_state));
1277
			onoff(state), onoff(cur_state));
1286
}
1278
}
1287
 
1279
 
1288
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1280
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1289
			   enum pipe pipe)
1281
			   enum pipe pipe)
1290
{
1282
{
1291
	struct drm_device *dev = dev_priv->dev;
1283
	struct drm_device *dev = dev_priv->dev;
1292
	i915_reg_t pp_reg;
1284
	i915_reg_t pp_reg;
1293
	u32 val;
1285
	u32 val;
1294
	enum pipe panel_pipe = PIPE_A;
1286
	enum pipe panel_pipe = PIPE_A;
1295
	bool locked = true;
1287
	bool locked = true;
1296
 
1288
 
1297
	if (WARN_ON(HAS_DDI(dev)))
1289
	if (WARN_ON(HAS_DDI(dev)))
1298
		return;
1290
		return;
1299
 
1291
 
1300
	if (HAS_PCH_SPLIT(dev)) {
1292
	if (HAS_PCH_SPLIT(dev)) {
1301
		u32 port_sel;
1293
		u32 port_sel;
1302
 
1294
 
1303
		pp_reg = PCH_PP_CONTROL;
1295
		pp_reg = PCH_PP_CONTROL;
1304
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1296
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1305
 
1297
 
1306
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1298
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1307
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1299
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1308
			panel_pipe = PIPE_B;
1300
			panel_pipe = PIPE_B;
1309
		/* XXX: else fix for eDP */
1301
		/* XXX: else fix for eDP */
1310
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1302
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1311
		/* presumably write lock depends on pipe, not port select */
1303
		/* presumably write lock depends on pipe, not port select */
1312
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1304
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1313
		panel_pipe = pipe;
1305
		panel_pipe = pipe;
1314
	} else {
1306
	} else {
1315
		pp_reg = PP_CONTROL;
1307
		pp_reg = PP_CONTROL;
1316
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1308
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1317
			panel_pipe = PIPE_B;
1309
			panel_pipe = PIPE_B;
1318
	}
1310
	}
1319
 
1311
 
1320
	val = I915_READ(pp_reg);
1312
	val = I915_READ(pp_reg);
1321
	if (!(val & PANEL_POWER_ON) ||
1313
	if (!(val & PANEL_POWER_ON) ||
1322
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1314
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1323
		locked = false;
1315
		locked = false;
1324
 
1316
 
1325
	I915_STATE_WARN(panel_pipe == pipe && locked,
1317
	I915_STATE_WARN(panel_pipe == pipe && locked,
1326
	     "panel assertion failure, pipe %c regs locked\n",
1318
	     "panel assertion failure, pipe %c regs locked\n",
1327
	     pipe_name(pipe));
1319
	     pipe_name(pipe));
1328
}
1320
}
1329
 
1321
 
1330
static void assert_cursor(struct drm_i915_private *dev_priv,
1322
static void assert_cursor(struct drm_i915_private *dev_priv,
1331
			  enum pipe pipe, bool state)
1323
			  enum pipe pipe, bool state)
1332
{
1324
{
1333
	struct drm_device *dev = dev_priv->dev;
1325
	struct drm_device *dev = dev_priv->dev;
1334
	bool cur_state;
1326
	bool cur_state;
1335
 
1327
 
1336
	if (IS_845G(dev) || IS_I865G(dev))
1328
	if (IS_845G(dev) || IS_I865G(dev))
1337
		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1329
		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1338
	else
1330
	else
1339
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1331
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1340
 
1332
 
1341
	I915_STATE_WARN(cur_state != state,
1333
	I915_STATE_WARN(cur_state != state,
1342
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1334
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1343
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1335
			pipe_name(pipe), onoff(state), onoff(cur_state));
1344
}
1336
}
1345
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1337
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1346
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1338
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1347
 
1339
 
1348
void assert_pipe(struct drm_i915_private *dev_priv,
1340
void assert_pipe(struct drm_i915_private *dev_priv,
1349
		 enum pipe pipe, bool state)
1341
		 enum pipe pipe, bool state)
1350
{
1342
{
1351
	bool cur_state;
1343
	bool cur_state;
1352
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1344
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1353
								      pipe);
1345
								      pipe);
1354
	enum intel_display_power_domain power_domain;
1346
	enum intel_display_power_domain power_domain;
1355
 
1347
 
1356
	/* if we need the pipe quirk it must be always on */
1348
	/* if we need the pipe quirk it must be always on */
1357
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1349
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1358
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1350
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1359
		state = true;
1351
		state = true;
1360
 
1352
 
1361
	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1353
	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1362
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1354
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1363
		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1355
		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1364
		cur_state = !!(val & PIPECONF_ENABLE);
1356
		cur_state = !!(val & PIPECONF_ENABLE);
1365
 
1357
 
1366
		intel_display_power_put(dev_priv, power_domain);
1358
		intel_display_power_put(dev_priv, power_domain);
1367
	} else {
1359
	} else {
1368
		cur_state = false;
1360
		cur_state = false;
1369
	}
1361
	}
1370
 
1362
 
1371
	I915_STATE_WARN(cur_state != state,
1363
	I915_STATE_WARN(cur_state != state,
1372
	     "pipe %c assertion failure (expected %s, current %s)\n",
1364
	     "pipe %c assertion failure (expected %s, current %s)\n",
1373
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1365
			pipe_name(pipe), onoff(state), onoff(cur_state));
1374
}
1366
}
1375
 
1367
 
1376
static void assert_plane(struct drm_i915_private *dev_priv,
1368
static void assert_plane(struct drm_i915_private *dev_priv,
1377
			 enum plane plane, bool state)
1369
			 enum plane plane, bool state)
1378
{
1370
{
1379
	u32 val;
1371
	u32 val;
1380
	bool cur_state;
1372
	bool cur_state;
1381
 
1373
 
1382
	val = I915_READ(DSPCNTR(plane));
1374
	val = I915_READ(DSPCNTR(plane));
1383
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1375
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1384
	I915_STATE_WARN(cur_state != state,
1376
	I915_STATE_WARN(cur_state != state,
1385
	     "plane %c assertion failure (expected %s, current %s)\n",
1377
	     "plane %c assertion failure (expected %s, current %s)\n",
1386
	     plane_name(plane), state_string(state), state_string(cur_state));
1378
			plane_name(plane), onoff(state), onoff(cur_state));
1387
}
1379
}
1388
 
1380
 
1389
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1381
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1390
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1382
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1391
 
1383
 
1392
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1384
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1393
				   enum pipe pipe)
1385
				   enum pipe pipe)
1394
{
1386
{
1395
	struct drm_device *dev = dev_priv->dev;
1387
	struct drm_device *dev = dev_priv->dev;
1396
	int i;
1388
	int i;
1397
 
1389
 
1398
	/* Primary planes are fixed to pipes on gen4+ */
1390
	/* Primary planes are fixed to pipes on gen4+ */
1399
	if (INTEL_INFO(dev)->gen >= 4) {
1391
	if (INTEL_INFO(dev)->gen >= 4) {
1400
		u32 val = I915_READ(DSPCNTR(pipe));
1392
		u32 val = I915_READ(DSPCNTR(pipe));
1401
		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1393
		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1402
		     "plane %c assertion failure, should be disabled but not\n",
1394
		     "plane %c assertion failure, should be disabled but not\n",
1403
		     plane_name(pipe));
1395
		     plane_name(pipe));
1404
		return;
1396
		return;
1405
	}
1397
	}
1406
 
1398
 
1407
	/* Need to check both planes against the pipe */
1399
	/* Need to check both planes against the pipe */
1408
	for_each_pipe(dev_priv, i) {
1400
	for_each_pipe(dev_priv, i) {
1409
		u32 val = I915_READ(DSPCNTR(i));
1401
		u32 val = I915_READ(DSPCNTR(i));
1410
		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1402
		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1411
			DISPPLANE_SEL_PIPE_SHIFT;
1403
			DISPPLANE_SEL_PIPE_SHIFT;
1412
		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1404
		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1413
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1405
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1414
		     plane_name(i), pipe_name(pipe));
1406
		     plane_name(i), pipe_name(pipe));
1415
	}
1407
	}
1416
}
1408
}
1417
 
1409
 
1418
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1410
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1419
				    enum pipe pipe)
1411
				    enum pipe pipe)
1420
{
1412
{
1421
	struct drm_device *dev = dev_priv->dev;
1413
	struct drm_device *dev = dev_priv->dev;
1422
	int sprite;
1414
	int sprite;
1423
 
1415
 
1424
	if (INTEL_INFO(dev)->gen >= 9) {
1416
	if (INTEL_INFO(dev)->gen >= 9) {
1425
		for_each_sprite(dev_priv, pipe, sprite) {
1417
		for_each_sprite(dev_priv, pipe, sprite) {
1426
			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1418
			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1427
			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1419
			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1428
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1420
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1429
			     sprite, pipe_name(pipe));
1421
			     sprite, pipe_name(pipe));
1430
		}
1422
		}
1431
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1423
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1432
		for_each_sprite(dev_priv, pipe, sprite) {
1424
		for_each_sprite(dev_priv, pipe, sprite) {
1433
			u32 val = I915_READ(SPCNTR(pipe, sprite));
1425
			u32 val = I915_READ(SPCNTR(pipe, sprite));
1434
			I915_STATE_WARN(val & SP_ENABLE,
1426
			I915_STATE_WARN(val & SP_ENABLE,
1435
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1427
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1436
			     sprite_name(pipe, sprite), pipe_name(pipe));
1428
			     sprite_name(pipe, sprite), pipe_name(pipe));
1437
		}
1429
		}
1438
	} else if (INTEL_INFO(dev)->gen >= 7) {
1430
	} else if (INTEL_INFO(dev)->gen >= 7) {
1439
		u32 val = I915_READ(SPRCTL(pipe));
1431
		u32 val = I915_READ(SPRCTL(pipe));
1440
		I915_STATE_WARN(val & SPRITE_ENABLE,
1432
		I915_STATE_WARN(val & SPRITE_ENABLE,
1441
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1433
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1442
		     plane_name(pipe), pipe_name(pipe));
1434
		     plane_name(pipe), pipe_name(pipe));
1443
	} else if (INTEL_INFO(dev)->gen >= 5) {
1435
	} else if (INTEL_INFO(dev)->gen >= 5) {
1444
		u32 val = I915_READ(DVSCNTR(pipe));
1436
		u32 val = I915_READ(DVSCNTR(pipe));
1445
		I915_STATE_WARN(val & DVS_ENABLE,
1437
		I915_STATE_WARN(val & DVS_ENABLE,
1446
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1438
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1447
		     plane_name(pipe), pipe_name(pipe));
1439
		     plane_name(pipe), pipe_name(pipe));
1448
	}
1440
	}
1449
}
1441
}
1450
 
1442
 
1451
static void assert_vblank_disabled(struct drm_crtc *crtc)
1443
static void assert_vblank_disabled(struct drm_crtc *crtc)
1452
{
1444
{
1453
	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1445
	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1454
		drm_crtc_vblank_put(crtc);
1446
		drm_crtc_vblank_put(crtc);
1455
}
1447
}
1456
 
1448
 
1457
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1449
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1458
{
1450
{
1459
	u32 val;
1451
	u32 val;
1460
	bool enabled;
1452
	bool enabled;
1461
 
1453
 
1462
	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1454
	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1463
 
1455
 
1464
	val = I915_READ(PCH_DREF_CONTROL);
1456
	val = I915_READ(PCH_DREF_CONTROL);
1465
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1457
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1466
			    DREF_SUPERSPREAD_SOURCE_MASK));
1458
			    DREF_SUPERSPREAD_SOURCE_MASK));
1467
	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1459
	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1468
}
1460
}
1469
 
1461
 
1470
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1462
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1471
					   enum pipe pipe)
1463
					   enum pipe pipe)
1472
{
1464
{
1473
	u32 val;
1465
	u32 val;
1474
	bool enabled;
1466
	bool enabled;
1475
 
1467
 
1476
	val = I915_READ(PCH_TRANSCONF(pipe));
1468
	val = I915_READ(PCH_TRANSCONF(pipe));
1477
	enabled = !!(val & TRANS_ENABLE);
1469
	enabled = !!(val & TRANS_ENABLE);
1478
	I915_STATE_WARN(enabled,
1470
	I915_STATE_WARN(enabled,
1479
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1471
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1480
	     pipe_name(pipe));
1472
	     pipe_name(pipe));
1481
}
1473
}
1482
 
1474
 
1483
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1475
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1484
			    enum pipe pipe, u32 port_sel, u32 val)
1476
			    enum pipe pipe, u32 port_sel, u32 val)
1485
{
1477
{
1486
	if ((val & DP_PORT_EN) == 0)
1478
	if ((val & DP_PORT_EN) == 0)
1487
		return false;
1479
		return false;
1488
 
1480
 
1489
	if (HAS_PCH_CPT(dev_priv->dev)) {
1481
	if (HAS_PCH_CPT(dev_priv->dev)) {
1490
		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1482
		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1491
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1483
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1492
			return false;
1484
			return false;
1493
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1485
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1494
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1486
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1495
			return false;
1487
			return false;
1496
	} else {
1488
	} else {
1497
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1489
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1498
			return false;
1490
			return false;
1499
	}
1491
	}
1500
	return true;
1492
	return true;
1501
}
1493
}
1502
 
1494
 
1503
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1495
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1504
			      enum pipe pipe, u32 val)
1496
			      enum pipe pipe, u32 val)
1505
{
1497
{
1506
	if ((val & SDVO_ENABLE) == 0)
1498
	if ((val & SDVO_ENABLE) == 0)
1507
		return false;
1499
		return false;
1508
 
1500
 
1509
	if (HAS_PCH_CPT(dev_priv->dev)) {
1501
	if (HAS_PCH_CPT(dev_priv->dev)) {
1510
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1502
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1511
			return false;
1503
			return false;
1512
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1504
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1513
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1505
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1514
			return false;
1506
			return false;
1515
	} else {
1507
	} else {
1516
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1508
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1517
			return false;
1509
			return false;
1518
	}
1510
	}
1519
	return true;
1511
	return true;
1520
}
1512
}
1521
 
1513
 
1522
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1514
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1523
			      enum pipe pipe, u32 val)
1515
			      enum pipe pipe, u32 val)
1524
{
1516
{
1525
	if ((val & LVDS_PORT_EN) == 0)
1517
	if ((val & LVDS_PORT_EN) == 0)
1526
		return false;
1518
		return false;
1527
 
1519
 
1528
	if (HAS_PCH_CPT(dev_priv->dev)) {
1520
	if (HAS_PCH_CPT(dev_priv->dev)) {
1529
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1521
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1530
			return false;
1522
			return false;
1531
	} else {
1523
	} else {
1532
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1524
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1533
			return false;
1525
			return false;
1534
	}
1526
	}
1535
	return true;
1527
	return true;
1536
}
1528
}
1537
 
1529
 
1538
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1530
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1539
			      enum pipe pipe, u32 val)
1531
			      enum pipe pipe, u32 val)
1540
{
1532
{
1541
	if ((val & ADPA_DAC_ENABLE) == 0)
1533
	if ((val & ADPA_DAC_ENABLE) == 0)
1542
		return false;
1534
		return false;
1543
	if (HAS_PCH_CPT(dev_priv->dev)) {
1535
	if (HAS_PCH_CPT(dev_priv->dev)) {
1544
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1536
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1545
			return false;
1537
			return false;
1546
	} else {
1538
	} else {
1547
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1539
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1548
			return false;
1540
			return false;
1549
	}
1541
	}
1550
	return true;
1542
	return true;
1551
}
1543
}
1552
 
1544
 
1553
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1545
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1554
				   enum pipe pipe, i915_reg_t reg,
1546
				   enum pipe pipe, i915_reg_t reg,
1555
				   u32 port_sel)
1547
				   u32 port_sel)
1556
{
1548
{
1557
	u32 val = I915_READ(reg);
1549
	u32 val = I915_READ(reg);
1558
	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1550
	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1559
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1551
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1560
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1552
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1561
 
1553
 
1562
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1554
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1563
	     && (val & DP_PIPEB_SELECT),
1555
	     && (val & DP_PIPEB_SELECT),
1564
	     "IBX PCH dp port still using transcoder B\n");
1556
	     "IBX PCH dp port still using transcoder B\n");
1565
}
1557
}
1566
 
1558
 
1567
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1559
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1568
				     enum pipe pipe, i915_reg_t reg)
1560
				     enum pipe pipe, i915_reg_t reg)
1569
{
1561
{
1570
	u32 val = I915_READ(reg);
1562
	u32 val = I915_READ(reg);
1571
	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1563
	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1572
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1564
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1573
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1565
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1574
 
1566
 
1575
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1567
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1576
	     && (val & SDVO_PIPE_B_SELECT),
1568
	     && (val & SDVO_PIPE_B_SELECT),
1577
	     "IBX PCH hdmi port still using transcoder B\n");
1569
	     "IBX PCH hdmi port still using transcoder B\n");
1578
}
1570
}
1579
 
1571
 
1580
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1572
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1581
				      enum pipe pipe)
1573
				      enum pipe pipe)
1582
{
1574
{
1583
	u32 val;
1575
	u32 val;
1584
 
1576
 
1585
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1577
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1586
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1578
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1587
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1579
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1588
 
1580
 
1589
	val = I915_READ(PCH_ADPA);
1581
	val = I915_READ(PCH_ADPA);
1590
	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1582
	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1591
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1583
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1592
	     pipe_name(pipe));
1584
	     pipe_name(pipe));
1593
 
1585
 
1594
	val = I915_READ(PCH_LVDS);
1586
	val = I915_READ(PCH_LVDS);
1595
	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1587
	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1596
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1588
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1597
	     pipe_name(pipe));
1589
	     pipe_name(pipe));
1598
 
1590
 
1599
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1591
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1600
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1592
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1601
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1593
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1602
}
1594
}
1603
 
1595
 
1604
static void vlv_enable_pll(struct intel_crtc *crtc,
1596
static void vlv_enable_pll(struct intel_crtc *crtc,
1605
			   const struct intel_crtc_state *pipe_config)
1597
			   const struct intel_crtc_state *pipe_config)
1606
{
1598
{
1607
	struct drm_device *dev = crtc->base.dev;
1599
	struct drm_device *dev = crtc->base.dev;
1608
	struct drm_i915_private *dev_priv = dev->dev_private;
1600
	struct drm_i915_private *dev_priv = dev->dev_private;
1609
	i915_reg_t reg = DPLL(crtc->pipe);
1601
	i915_reg_t reg = DPLL(crtc->pipe);
1610
	u32 dpll = pipe_config->dpll_hw_state.dpll;
1602
	u32 dpll = pipe_config->dpll_hw_state.dpll;
1611
 
1603
 
1612
	assert_pipe_disabled(dev_priv, crtc->pipe);
1604
	assert_pipe_disabled(dev_priv, crtc->pipe);
1613
 
1605
 
1614
	/* PLL is protected by panel, make sure we can write it */
1606
	/* PLL is protected by panel, make sure we can write it */
1615
	if (IS_MOBILE(dev_priv->dev))
1607
	if (IS_MOBILE(dev_priv->dev))
1616
		assert_panel_unlocked(dev_priv, crtc->pipe);
1608
		assert_panel_unlocked(dev_priv, crtc->pipe);
1617
 
1609
 
1618
	I915_WRITE(reg, dpll);
1610
	I915_WRITE(reg, dpll);
1619
	POSTING_READ(reg);
1611
	POSTING_READ(reg);
1620
	udelay(150);
1612
	udelay(150);
1621
 
1613
 
1622
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1614
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1623
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1615
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1624
 
1616
 
1625
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1617
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1626
	POSTING_READ(DPLL_MD(crtc->pipe));
1618
	POSTING_READ(DPLL_MD(crtc->pipe));
1627
 
1619
 
1628
	/* We do this three times for luck */
1620
	/* We do this three times for luck */
1629
	I915_WRITE(reg, dpll);
1621
	I915_WRITE(reg, dpll);
1630
	POSTING_READ(reg);
1622
	POSTING_READ(reg);
1631
	udelay(150); /* wait for warmup */
1623
	udelay(150); /* wait for warmup */
1632
	I915_WRITE(reg, dpll);
1624
	I915_WRITE(reg, dpll);
1633
	POSTING_READ(reg);
1625
	POSTING_READ(reg);
1634
	udelay(150); /* wait for warmup */
1626
	udelay(150); /* wait for warmup */
1635
	I915_WRITE(reg, dpll);
1627
	I915_WRITE(reg, dpll);
1636
	POSTING_READ(reg);
1628
	POSTING_READ(reg);
1637
	udelay(150); /* wait for warmup */
1629
	udelay(150); /* wait for warmup */
1638
}
1630
}
1639
 
1631
 
1640
static void chv_enable_pll(struct intel_crtc *crtc,
1632
static void chv_enable_pll(struct intel_crtc *crtc,
1641
			   const struct intel_crtc_state *pipe_config)
1633
			   const struct intel_crtc_state *pipe_config)
1642
{
1634
{
1643
	struct drm_device *dev = crtc->base.dev;
1635
	struct drm_device *dev = crtc->base.dev;
1644
	struct drm_i915_private *dev_priv = dev->dev_private;
1636
	struct drm_i915_private *dev_priv = dev->dev_private;
1645
	int pipe = crtc->pipe;
1637
	int pipe = crtc->pipe;
1646
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1638
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1647
	u32 tmp;
1639
	u32 tmp;
1648
 
1640
 
1649
	assert_pipe_disabled(dev_priv, crtc->pipe);
1641
	assert_pipe_disabled(dev_priv, crtc->pipe);
1650
 
1642
 
1651
	mutex_lock(&dev_priv->sb_lock);
1643
	mutex_lock(&dev_priv->sb_lock);
1652
 
1644
 
1653
	/* Enable back the 10bit clock to display controller */
1645
	/* Enable back the 10bit clock to display controller */
1654
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1646
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1655
	tmp |= DPIO_DCLKP_EN;
1647
	tmp |= DPIO_DCLKP_EN;
1656
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1648
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1657
 
1649
 
1658
	mutex_unlock(&dev_priv->sb_lock);
1650
	mutex_unlock(&dev_priv->sb_lock);
1659
 
1651
 
1660
	/*
1652
	/*
1661
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1653
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1662
	 */
1654
	 */
1663
	udelay(1);
1655
	udelay(1);
1664
 
1656
 
1665
	/* Enable PLL */
1657
	/* Enable PLL */
1666
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1658
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1667
 
1659
 
1668
	/* Check PLL is locked */
1660
	/* Check PLL is locked */
1669
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1661
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1670
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1662
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1671
 
1663
 
1672
	/* not sure when this should be written */
1664
	/* not sure when this should be written */
1673
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1665
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1674
	POSTING_READ(DPLL_MD(pipe));
1666
	POSTING_READ(DPLL_MD(pipe));
1675
}
1667
}
1676
 
1668
 
1677
static int intel_num_dvo_pipes(struct drm_device *dev)
1669
static int intel_num_dvo_pipes(struct drm_device *dev)
1678
{
1670
{
1679
	struct intel_crtc *crtc;
1671
	struct intel_crtc *crtc;
1680
	int count = 0;
1672
	int count = 0;
1681
 
1673
 
1682
	for_each_intel_crtc(dev, crtc)
1674
	for_each_intel_crtc(dev, crtc)
1683
		count += crtc->base.state->active &&
1675
		count += crtc->base.state->active &&
1684
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1676
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1685
 
1677
 
1686
	return count;
1678
	return count;
1687
}
1679
}
1688
 
1680
 
1689
static void i9xx_enable_pll(struct intel_crtc *crtc)
1681
static void i9xx_enable_pll(struct intel_crtc *crtc)
1690
{
1682
{
1691
	struct drm_device *dev = crtc->base.dev;
1683
	struct drm_device *dev = crtc->base.dev;
1692
	struct drm_i915_private *dev_priv = dev->dev_private;
1684
	struct drm_i915_private *dev_priv = dev->dev_private;
1693
	i915_reg_t reg = DPLL(crtc->pipe);
1685
	i915_reg_t reg = DPLL(crtc->pipe);
1694
	u32 dpll = crtc->config->dpll_hw_state.dpll;
1686
	u32 dpll = crtc->config->dpll_hw_state.dpll;
1695
 
1687
 
1696
	assert_pipe_disabled(dev_priv, crtc->pipe);
1688
	assert_pipe_disabled(dev_priv, crtc->pipe);
1697
 
1689
 
1698
	/* No really, not for ILK+ */
1690
	/* No really, not for ILK+ */
1699
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1691
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1700
 
1692
 
1701
	/* PLL is protected by panel, make sure we can write it */
1693
	/* PLL is protected by panel, make sure we can write it */
1702
	if (IS_MOBILE(dev) && !IS_I830(dev))
1694
	if (IS_MOBILE(dev) && !IS_I830(dev))
1703
		assert_panel_unlocked(dev_priv, crtc->pipe);
1695
		assert_panel_unlocked(dev_priv, crtc->pipe);
1704
 
1696
 
1705
	/* Enable DVO 2x clock on both PLLs if necessary */
1697
	/* Enable DVO 2x clock on both PLLs if necessary */
1706
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1698
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1707
		/*
1699
		/*
1708
		 * It appears to be important that we don't enable this
1700
		 * It appears to be important that we don't enable this
1709
		 * for the current pipe before otherwise configuring the
1701
		 * for the current pipe before otherwise configuring the
1710
		 * PLL. No idea how this should be handled if multiple
1702
		 * PLL. No idea how this should be handled if multiple
1711
		 * DVO outputs are enabled simultaneosly.
1703
		 * DVO outputs are enabled simultaneosly.
1712
		 */
1704
		 */
1713
		dpll |= DPLL_DVO_2X_MODE;
1705
		dpll |= DPLL_DVO_2X_MODE;
1714
		I915_WRITE(DPLL(!crtc->pipe),
1706
		I915_WRITE(DPLL(!crtc->pipe),
1715
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1707
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1716
	}
1708
	}
1717
 
1709
 
1718
	/*
1710
	/*
1719
	 * Apparently we need to have VGA mode enabled prior to changing
1711
	 * Apparently we need to have VGA mode enabled prior to changing
1720
	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1712
	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1721
	 * dividers, even though the register value does change.
1713
	 * dividers, even though the register value does change.
1722
	 */
1714
	 */
1723
	I915_WRITE(reg, 0);
1715
	I915_WRITE(reg, 0);
1724
 
1716
 
1725
	I915_WRITE(reg, dpll);
1717
	I915_WRITE(reg, dpll);
1726
 
1718
 
1727
	/* Wait for the clocks to stabilize. */
1719
	/* Wait for the clocks to stabilize. */
1728
	POSTING_READ(reg);
1720
	POSTING_READ(reg);
1729
	udelay(150);
1721
	udelay(150);
1730
 
1722
 
1731
	if (INTEL_INFO(dev)->gen >= 4) {
1723
	if (INTEL_INFO(dev)->gen >= 4) {
1732
		I915_WRITE(DPLL_MD(crtc->pipe),
1724
		I915_WRITE(DPLL_MD(crtc->pipe),
1733
			   crtc->config->dpll_hw_state.dpll_md);
1725
			   crtc->config->dpll_hw_state.dpll_md);
1734
	} else {
1726
	} else {
1735
		/* The pixel multiplier can only be updated once the
1727
		/* The pixel multiplier can only be updated once the
1736
		 * DPLL is enabled and the clocks are stable.
1728
		 * DPLL is enabled and the clocks are stable.
1737
		 *
1729
		 *
1738
		 * So write it again.
1730
		 * So write it again.
1739
		 */
1731
		 */
1740
		I915_WRITE(reg, dpll);
1732
		I915_WRITE(reg, dpll);
1741
	}
1733
	}
1742
 
1734
 
1743
	/* We do this three times for luck */
1735
	/* We do this three times for luck */
1744
	I915_WRITE(reg, dpll);
1736
	I915_WRITE(reg, dpll);
1745
	POSTING_READ(reg);
1737
	POSTING_READ(reg);
1746
	udelay(150); /* wait for warmup */
1738
	udelay(150); /* wait for warmup */
1747
	I915_WRITE(reg, dpll);
1739
	I915_WRITE(reg, dpll);
1748
	POSTING_READ(reg);
1740
	POSTING_READ(reg);
1749
	udelay(150); /* wait for warmup */
1741
	udelay(150); /* wait for warmup */
1750
	I915_WRITE(reg, dpll);
1742
	I915_WRITE(reg, dpll);
1751
	POSTING_READ(reg);
1743
	POSTING_READ(reg);
1752
	udelay(150); /* wait for warmup */
1744
	udelay(150); /* wait for warmup */
1753
}
1745
}
1754
 
1746
 
1755
/**
1747
/**
1756
 * i9xx_disable_pll - disable a PLL
1748
 * i9xx_disable_pll - disable a PLL
1757
 * @dev_priv: i915 private structure
1749
 * @dev_priv: i915 private structure
1758
 * @pipe: pipe PLL to disable
1750
 * @pipe: pipe PLL to disable
1759
 *
1751
 *
1760
 * Disable the PLL for @pipe, making sure the pipe is off first.
1752
 * Disable the PLL for @pipe, making sure the pipe is off first.
1761
 *
1753
 *
1762
 * Note!  This is for pre-ILK only.
1754
 * Note!  This is for pre-ILK only.
1763
 */
1755
 */
1764
static void i9xx_disable_pll(struct intel_crtc *crtc)
1756
static void i9xx_disable_pll(struct intel_crtc *crtc)
1765
{
1757
{
1766
	struct drm_device *dev = crtc->base.dev;
1758
	struct drm_device *dev = crtc->base.dev;
1767
	struct drm_i915_private *dev_priv = dev->dev_private;
1759
	struct drm_i915_private *dev_priv = dev->dev_private;
1768
	enum pipe pipe = crtc->pipe;
1760
	enum pipe pipe = crtc->pipe;
1769
 
1761
 
1770
	/* Disable DVO 2x clock on both PLLs if necessary */
1762
	/* Disable DVO 2x clock on both PLLs if necessary */
1771
	if (IS_I830(dev) &&
1763
	if (IS_I830(dev) &&
1772
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1764
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1773
	    !intel_num_dvo_pipes(dev)) {
1765
	    !intel_num_dvo_pipes(dev)) {
1774
		I915_WRITE(DPLL(PIPE_B),
1766
		I915_WRITE(DPLL(PIPE_B),
1775
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1767
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1776
		I915_WRITE(DPLL(PIPE_A),
1768
		I915_WRITE(DPLL(PIPE_A),
1777
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1769
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1778
	}
1770
	}
1779
 
1771
 
1780
	/* Don't disable pipe or pipe PLLs if needed */
1772
	/* Don't disable pipe or pipe PLLs if needed */
1781
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1773
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1782
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1774
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1783
		return;
1775
		return;
1784
 
1776
 
1785
	/* Make sure the pipe isn't still relying on us */
1777
	/* Make sure the pipe isn't still relying on us */
1786
	assert_pipe_disabled(dev_priv, pipe);
1778
	assert_pipe_disabled(dev_priv, pipe);
1787
 
1779
 
1788
	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1780
	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1789
	POSTING_READ(DPLL(pipe));
1781
	POSTING_READ(DPLL(pipe));
1790
}
1782
}
1791
 
1783
 
1792
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1784
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1793
{
1785
{
1794
	u32 val;
1786
	u32 val;
1795
 
1787
 
1796
	/* Make sure the pipe isn't still relying on us */
1788
	/* Make sure the pipe isn't still relying on us */
1797
	assert_pipe_disabled(dev_priv, pipe);
1789
	assert_pipe_disabled(dev_priv, pipe);
1798
 
1790
 
1799
	/*
1791
	/*
1800
	 * Leave integrated clock source and reference clock enabled for pipe B.
1792
	 * Leave integrated clock source and reference clock enabled for pipe B.
1801
	 * The latter is needed for VGA hotplug / manual detection.
1793
	 * The latter is needed for VGA hotplug / manual detection.
1802
	 */
1794
	 */
1803
	val = DPLL_VGA_MODE_DIS;
1795
	val = DPLL_VGA_MODE_DIS;
1804
	if (pipe == PIPE_B)
1796
	if (pipe == PIPE_B)
1805
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1797
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1806
	I915_WRITE(DPLL(pipe), val);
1798
	I915_WRITE(DPLL(pipe), val);
1807
	POSTING_READ(DPLL(pipe));
1799
	POSTING_READ(DPLL(pipe));
1808
 
1800
 
1809
}
1801
}
1810
 
1802
 
1811
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1803
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1812
{
1804
{
1813
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1805
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1814
	u32 val;
1806
	u32 val;
1815
 
1807
 
1816
	/* Make sure the pipe isn't still relying on us */
1808
	/* Make sure the pipe isn't still relying on us */
1817
	assert_pipe_disabled(dev_priv, pipe);
1809
	assert_pipe_disabled(dev_priv, pipe);
1818
 
1810
 
1819
	/* Set PLL en = 0 */
1811
	/* Set PLL en = 0 */
1820
	val = DPLL_SSC_REF_CLK_CHV |
1812
	val = DPLL_SSC_REF_CLK_CHV |
1821
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1813
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1822
	if (pipe != PIPE_A)
1814
	if (pipe != PIPE_A)
1823
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1815
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1824
	I915_WRITE(DPLL(pipe), val);
1816
	I915_WRITE(DPLL(pipe), val);
1825
	POSTING_READ(DPLL(pipe));
1817
	POSTING_READ(DPLL(pipe));
1826
 
1818
 
1827
	mutex_lock(&dev_priv->sb_lock);
1819
	mutex_lock(&dev_priv->sb_lock);
1828
 
1820
 
1829
	/* Disable 10bit clock to display controller */
1821
	/* Disable 10bit clock to display controller */
1830
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1822
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1831
	val &= ~DPIO_DCLKP_EN;
1823
	val &= ~DPIO_DCLKP_EN;
1832
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1824
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1833
 
1825
 
1834
	mutex_unlock(&dev_priv->sb_lock);
1826
	mutex_unlock(&dev_priv->sb_lock);
1835
}
1827
}
1836
 
1828
 
1837
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1829
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1838
			 struct intel_digital_port *dport,
1830
			 struct intel_digital_port *dport,
1839
			 unsigned int expected_mask)
1831
			 unsigned int expected_mask)
1840
{
1832
{
1841
	u32 port_mask;
1833
	u32 port_mask;
1842
	i915_reg_t dpll_reg;
1834
	i915_reg_t dpll_reg;
1843
 
1835
 
1844
	switch (dport->port) {
1836
	switch (dport->port) {
1845
	case PORT_B:
1837
	case PORT_B:
1846
		port_mask = DPLL_PORTB_READY_MASK;
1838
		port_mask = DPLL_PORTB_READY_MASK;
1847
		dpll_reg = DPLL(0);
1839
		dpll_reg = DPLL(0);
1848
		break;
1840
		break;
1849
	case PORT_C:
1841
	case PORT_C:
1850
		port_mask = DPLL_PORTC_READY_MASK;
1842
		port_mask = DPLL_PORTC_READY_MASK;
1851
		dpll_reg = DPLL(0);
1843
		dpll_reg = DPLL(0);
1852
		expected_mask <<= 4;
1844
		expected_mask <<= 4;
1853
		break;
1845
		break;
1854
	case PORT_D:
1846
	case PORT_D:
1855
		port_mask = DPLL_PORTD_READY_MASK;
1847
		port_mask = DPLL_PORTD_READY_MASK;
1856
		dpll_reg = DPIO_PHY_STATUS;
1848
		dpll_reg = DPIO_PHY_STATUS;
1857
		break;
1849
		break;
1858
	default:
1850
	default:
1859
		BUG();
1851
		BUG();
1860
	}
1852
	}
1861
 
1853
 
1862
	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1854
	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1863
		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1855
		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1864
		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1856
		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1865
}
1857
}
1866
 
1858
 
1867
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1859
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1868
{
1860
{
1869
	struct drm_device *dev = crtc->base.dev;
1861
	struct drm_device *dev = crtc->base.dev;
1870
	struct drm_i915_private *dev_priv = dev->dev_private;
1862
	struct drm_i915_private *dev_priv = dev->dev_private;
1871
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1863
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1872
 
1864
 
1873
	if (WARN_ON(pll == NULL))
1865
	if (WARN_ON(pll == NULL))
1874
		return;
1866
		return;
1875
 
1867
 
1876
	WARN_ON(!pll->config.crtc_mask);
1868
	WARN_ON(!pll->config.crtc_mask);
1877
	if (pll->active == 0) {
1869
	if (pll->active == 0) {
1878
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1870
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1879
		WARN_ON(pll->on);
1871
		WARN_ON(pll->on);
1880
		assert_shared_dpll_disabled(dev_priv, pll);
1872
		assert_shared_dpll_disabled(dev_priv, pll);
1881
 
1873
 
1882
		pll->mode_set(dev_priv, pll);
1874
		pll->mode_set(dev_priv, pll);
1883
	}
1875
	}
1884
}
1876
}
1885
 
1877
 
1886
/**
1878
/**
1887
 * intel_enable_shared_dpll - enable PCH PLL
1879
 * intel_enable_shared_dpll - enable PCH PLL
1888
 * @dev_priv: i915 private structure
1880
 * @dev_priv: i915 private structure
1889
 * @pipe: pipe PLL to enable
1881
 * @pipe: pipe PLL to enable
1890
 *
1882
 *
1891
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1883
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1892
 * drives the transcoder clock.
1884
 * drives the transcoder clock.
1893
 */
1885
 */
1894
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1886
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1895
{
1887
{
1896
	struct drm_device *dev = crtc->base.dev;
1888
	struct drm_device *dev = crtc->base.dev;
1897
	struct drm_i915_private *dev_priv = dev->dev_private;
1889
	struct drm_i915_private *dev_priv = dev->dev_private;
1898
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1890
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1899
 
1891
 
1900
	if (WARN_ON(pll == NULL))
1892
	if (WARN_ON(pll == NULL))
1901
		return;
1893
		return;
1902
 
1894
 
1903
	if (WARN_ON(pll->config.crtc_mask == 0))
1895
	if (WARN_ON(pll->config.crtc_mask == 0))
1904
		return;
1896
		return;
1905
 
1897
 
1906
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1898
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1907
		      pll->name, pll->active, pll->on,
1899
		      pll->name, pll->active, pll->on,
1908
		      crtc->base.base.id);
1900
		      crtc->base.base.id);
1909
 
1901
 
1910
	if (pll->active++) {
1902
	if (pll->active++) {
1911
		WARN_ON(!pll->on);
1903
		WARN_ON(!pll->on);
1912
		assert_shared_dpll_enabled(dev_priv, pll);
1904
		assert_shared_dpll_enabled(dev_priv, pll);
1913
		return;
1905
		return;
1914
	}
1906
	}
1915
	WARN_ON(pll->on);
1907
	WARN_ON(pll->on);
1916
 
1908
 
1917
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1909
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1918
 
1910
 
1919
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1911
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1920
	pll->enable(dev_priv, pll);
1912
	pll->enable(dev_priv, pll);
1921
	pll->on = true;
1913
	pll->on = true;
1922
}
1914
}
1923
 
1915
 
1924
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1916
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1925
{
1917
{
1926
	struct drm_device *dev = crtc->base.dev;
1918
	struct drm_device *dev = crtc->base.dev;
1927
	struct drm_i915_private *dev_priv = dev->dev_private;
1919
	struct drm_i915_private *dev_priv = dev->dev_private;
1928
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1920
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1929
 
1921
 
1930
	/* PCH only available on ILK+ */
1922
	/* PCH only available on ILK+ */
1931
	if (INTEL_INFO(dev)->gen < 5)
1923
	if (INTEL_INFO(dev)->gen < 5)
1932
		return;
1924
		return;
1933
 
1925
 
1934
	if (pll == NULL)
1926
	if (pll == NULL)
1935
		return;
1927
		return;
1936
 
1928
 
1937
	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1929
	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1938
		return;
1930
		return;
1939
 
1931
 
1940
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1932
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1941
		      pll->name, pll->active, pll->on,
1933
		      pll->name, pll->active, pll->on,
1942
		      crtc->base.base.id);
1934
		      crtc->base.base.id);
1943
 
1935
 
1944
	if (WARN_ON(pll->active == 0)) {
1936
	if (WARN_ON(pll->active == 0)) {
1945
		assert_shared_dpll_disabled(dev_priv, pll);
1937
		assert_shared_dpll_disabled(dev_priv, pll);
1946
		return;
1938
		return;
1947
	}
1939
	}
1948
 
1940
 
1949
	assert_shared_dpll_enabled(dev_priv, pll);
1941
	assert_shared_dpll_enabled(dev_priv, pll);
1950
	WARN_ON(!pll->on);
1942
	WARN_ON(!pll->on);
1951
	if (--pll->active)
1943
	if (--pll->active)
1952
		return;
1944
		return;
1953
 
1945
 
1954
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1946
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1955
	pll->disable(dev_priv, pll);
1947
	pll->disable(dev_priv, pll);
1956
	pll->on = false;
1948
	pll->on = false;
1957
 
1949
 
1958
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1950
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1959
}
1951
}
1960
 
1952
 
1961
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1953
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1962
					   enum pipe pipe)
1954
					   enum pipe pipe)
1963
{
1955
{
1964
	struct drm_device *dev = dev_priv->dev;
1956
	struct drm_device *dev = dev_priv->dev;
1965
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1957
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1966
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1958
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1967
	i915_reg_t reg;
1959
	i915_reg_t reg;
1968
	uint32_t val, pipeconf_val;
1960
	uint32_t val, pipeconf_val;
1969
 
1961
 
1970
	/* PCH only available on ILK+ */
1962
	/* PCH only available on ILK+ */
1971
	BUG_ON(!HAS_PCH_SPLIT(dev));
1963
	BUG_ON(!HAS_PCH_SPLIT(dev));
1972
 
1964
 
1973
	/* Make sure PCH DPLL is enabled */
1965
	/* Make sure PCH DPLL is enabled */
1974
	assert_shared_dpll_enabled(dev_priv,
1966
	assert_shared_dpll_enabled(dev_priv,
1975
				   intel_crtc_to_shared_dpll(intel_crtc));
1967
				   intel_crtc_to_shared_dpll(intel_crtc));
1976
 
1968
 
1977
	/* FDI must be feeding us bits for PCH ports */
1969
	/* FDI must be feeding us bits for PCH ports */
1978
	assert_fdi_tx_enabled(dev_priv, pipe);
1970
	assert_fdi_tx_enabled(dev_priv, pipe);
1979
	assert_fdi_rx_enabled(dev_priv, pipe);
1971
	assert_fdi_rx_enabled(dev_priv, pipe);
1980
 
1972
 
1981
	if (HAS_PCH_CPT(dev)) {
1973
	if (HAS_PCH_CPT(dev)) {
1982
		/* Workaround: Set the timing override bit before enabling the
1974
		/* Workaround: Set the timing override bit before enabling the
1983
		 * pch transcoder. */
1975
		 * pch transcoder. */
1984
		reg = TRANS_CHICKEN2(pipe);
1976
		reg = TRANS_CHICKEN2(pipe);
1985
		val = I915_READ(reg);
1977
		val = I915_READ(reg);
1986
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1978
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1987
		I915_WRITE(reg, val);
1979
		I915_WRITE(reg, val);
1988
	}
1980
	}
1989
 
1981
 
1990
	reg = PCH_TRANSCONF(pipe);
1982
	reg = PCH_TRANSCONF(pipe);
1991
	val = I915_READ(reg);
1983
	val = I915_READ(reg);
1992
	pipeconf_val = I915_READ(PIPECONF(pipe));
1984
	pipeconf_val = I915_READ(PIPECONF(pipe));
1993
 
1985
 
1994
	if (HAS_PCH_IBX(dev_priv->dev)) {
1986
	if (HAS_PCH_IBX(dev_priv->dev)) {
1995
		/*
1987
		/*
1996
		 * Make the BPC in transcoder be consistent with
1988
		 * Make the BPC in transcoder be consistent with
1997
		 * that in pipeconf reg. For HDMI we must use 8bpc
1989
		 * that in pipeconf reg. For HDMI we must use 8bpc
1998
		 * here for both 8bpc and 12bpc.
1990
		 * here for both 8bpc and 12bpc.
1999
		 */
1991
		 */
2000
		val &= ~PIPECONF_BPC_MASK;
1992
		val &= ~PIPECONF_BPC_MASK;
2001
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1993
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2002
			val |= PIPECONF_8BPC;
1994
			val |= PIPECONF_8BPC;
2003
		else
1995
		else
2004
			val |= pipeconf_val & PIPECONF_BPC_MASK;
1996
			val |= pipeconf_val & PIPECONF_BPC_MASK;
2005
	}
1997
	}
2006
 
1998
 
2007
	val &= ~TRANS_INTERLACE_MASK;
1999
	val &= ~TRANS_INTERLACE_MASK;
2008
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2000
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2009
		if (HAS_PCH_IBX(dev_priv->dev) &&
2001
		if (HAS_PCH_IBX(dev_priv->dev) &&
2010
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2002
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2011
			val |= TRANS_LEGACY_INTERLACED_ILK;
2003
			val |= TRANS_LEGACY_INTERLACED_ILK;
2012
		else
2004
		else
2013
			val |= TRANS_INTERLACED;
2005
			val |= TRANS_INTERLACED;
2014
	else
2006
	else
2015
		val |= TRANS_PROGRESSIVE;
2007
		val |= TRANS_PROGRESSIVE;
2016
 
2008
 
2017
	I915_WRITE(reg, val | TRANS_ENABLE);
2009
	I915_WRITE(reg, val | TRANS_ENABLE);
2018
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2010
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2019
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2011
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2020
}
2012
}
2021
 
2013
 
2022
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2014
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2023
				      enum transcoder cpu_transcoder)
2015
				      enum transcoder cpu_transcoder)
2024
{
2016
{
2025
	u32 val, pipeconf_val;
2017
	u32 val, pipeconf_val;
2026
 
2018
 
2027
	/* PCH only available on ILK+ */
2019
	/* PCH only available on ILK+ */
2028
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2020
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2029
 
2021
 
2030
	/* FDI must be feeding us bits for PCH ports */
2022
	/* FDI must be feeding us bits for PCH ports */
2031
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2023
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2032
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2024
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2033
 
2025
 
2034
	/* Workaround: set timing override bit. */
2026
	/* Workaround: set timing override bit. */
2035
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2027
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2036
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2028
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2037
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2029
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2038
 
2030
 
2039
	val = TRANS_ENABLE;
2031
	val = TRANS_ENABLE;
2040
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2032
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2041
 
2033
 
2042
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2034
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2043
	    PIPECONF_INTERLACED_ILK)
2035
	    PIPECONF_INTERLACED_ILK)
2044
		val |= TRANS_INTERLACED;
2036
		val |= TRANS_INTERLACED;
2045
	else
2037
	else
2046
		val |= TRANS_PROGRESSIVE;
2038
		val |= TRANS_PROGRESSIVE;
2047
 
2039
 
2048
	I915_WRITE(LPT_TRANSCONF, val);
2040
	I915_WRITE(LPT_TRANSCONF, val);
2049
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2041
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2050
		DRM_ERROR("Failed to enable PCH transcoder\n");
2042
		DRM_ERROR("Failed to enable PCH transcoder\n");
2051
}
2043
}
2052
 
2044
 
2053
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2045
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2054
					    enum pipe pipe)
2046
					    enum pipe pipe)
2055
{
2047
{
2056
	struct drm_device *dev = dev_priv->dev;
2048
	struct drm_device *dev = dev_priv->dev;
2057
	i915_reg_t reg;
2049
	i915_reg_t reg;
2058
	uint32_t val;
2050
	uint32_t val;
2059
 
2051
 
2060
	/* FDI relies on the transcoder */
2052
	/* FDI relies on the transcoder */
2061
	assert_fdi_tx_disabled(dev_priv, pipe);
2053
	assert_fdi_tx_disabled(dev_priv, pipe);
2062
	assert_fdi_rx_disabled(dev_priv, pipe);
2054
	assert_fdi_rx_disabled(dev_priv, pipe);
2063
 
2055
 
2064
	/* Ports must be off as well */
2056
	/* Ports must be off as well */
2065
	assert_pch_ports_disabled(dev_priv, pipe);
2057
	assert_pch_ports_disabled(dev_priv, pipe);
2066
 
2058
 
2067
	reg = PCH_TRANSCONF(pipe);
2059
	reg = PCH_TRANSCONF(pipe);
2068
	val = I915_READ(reg);
2060
	val = I915_READ(reg);
2069
	val &= ~TRANS_ENABLE;
2061
	val &= ~TRANS_ENABLE;
2070
	I915_WRITE(reg, val);
2062
	I915_WRITE(reg, val);
2071
	/* wait for PCH transcoder off, transcoder state */
2063
	/* wait for PCH transcoder off, transcoder state */
2072
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2064
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2073
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2065
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2074
 
2066
 
2075
	if (HAS_PCH_CPT(dev)) {
2067
	if (HAS_PCH_CPT(dev)) {
2076
		/* Workaround: Clear the timing override chicken bit again. */
2068
		/* Workaround: Clear the timing override chicken bit again. */
2077
		reg = TRANS_CHICKEN2(pipe);
2069
		reg = TRANS_CHICKEN2(pipe);
2078
		val = I915_READ(reg);
2070
		val = I915_READ(reg);
2079
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2071
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2080
		I915_WRITE(reg, val);
2072
		I915_WRITE(reg, val);
2081
	}
2073
	}
2082
}
2074
}
2083
 
2075
 
2084
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2076
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2085
{
2077
{
2086
	u32 val;
2078
	u32 val;
2087
 
2079
 
2088
	val = I915_READ(LPT_TRANSCONF);
2080
	val = I915_READ(LPT_TRANSCONF);
2089
	val &= ~TRANS_ENABLE;
2081
	val &= ~TRANS_ENABLE;
2090
	I915_WRITE(LPT_TRANSCONF, val);
2082
	I915_WRITE(LPT_TRANSCONF, val);
2091
	/* wait for PCH transcoder off, transcoder state */
2083
	/* wait for PCH transcoder off, transcoder state */
2092
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2084
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2093
		DRM_ERROR("Failed to disable PCH transcoder\n");
2085
		DRM_ERROR("Failed to disable PCH transcoder\n");
2094
 
2086
 
2095
	/* Workaround: clear timing override bit. */
2087
	/* Workaround: clear timing override bit. */
2096
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2088
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2097
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2089
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2098
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2090
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2099
}
2091
}
2100
 
2092
 
2101
/**
2093
/**
2102
 * intel_enable_pipe - enable a pipe, asserting requirements
2094
 * intel_enable_pipe - enable a pipe, asserting requirements
2103
 * @crtc: crtc responsible for the pipe
2095
 * @crtc: crtc responsible for the pipe
2104
 *
2096
 *
2105
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2097
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2106
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2098
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2107
 */
2099
 */
2108
static void intel_enable_pipe(struct intel_crtc *crtc)
2100
static void intel_enable_pipe(struct intel_crtc *crtc)
2109
{
2101
{
2110
	struct drm_device *dev = crtc->base.dev;
2102
	struct drm_device *dev = crtc->base.dev;
2111
	struct drm_i915_private *dev_priv = dev->dev_private;
2103
	struct drm_i915_private *dev_priv = dev->dev_private;
2112
	enum pipe pipe = crtc->pipe;
2104
	enum pipe pipe = crtc->pipe;
2113
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2105
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2114
	enum pipe pch_transcoder;
2106
	enum pipe pch_transcoder;
2115
	i915_reg_t reg;
2107
	i915_reg_t reg;
2116
	u32 val;
2108
	u32 val;
2117
 
2109
 
2118
	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2110
	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2119
 
2111
 
2120
	assert_planes_disabled(dev_priv, pipe);
2112
	assert_planes_disabled(dev_priv, pipe);
2121
	assert_cursor_disabled(dev_priv, pipe);
2113
	assert_cursor_disabled(dev_priv, pipe);
2122
	assert_sprites_disabled(dev_priv, pipe);
2114
	assert_sprites_disabled(dev_priv, pipe);
2123
 
2115
 
2124
	if (HAS_PCH_LPT(dev_priv->dev))
2116
	if (HAS_PCH_LPT(dev_priv->dev))
2125
		pch_transcoder = TRANSCODER_A;
2117
		pch_transcoder = TRANSCODER_A;
2126
	else
2118
	else
2127
		pch_transcoder = pipe;
2119
		pch_transcoder = pipe;
2128
 
2120
 
2129
	/*
2121
	/*
2130
	 * A pipe without a PLL won't actually be able to drive bits from
2122
	 * A pipe without a PLL won't actually be able to drive bits from
2131
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2123
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2132
	 * need the check.
2124
	 * need the check.
2133
	 */
2125
	 */
2134
	if (HAS_GMCH_DISPLAY(dev_priv->dev))
2126
	if (HAS_GMCH_DISPLAY(dev_priv->dev))
2135
		if (crtc->config->has_dsi_encoder)
2127
		if (crtc->config->has_dsi_encoder)
2136
			assert_dsi_pll_enabled(dev_priv);
2128
			assert_dsi_pll_enabled(dev_priv);
2137
		else
2129
		else
2138
			assert_pll_enabled(dev_priv, pipe);
2130
			assert_pll_enabled(dev_priv, pipe);
2139
	else {
2131
	else {
2140
		if (crtc->config->has_pch_encoder) {
2132
		if (crtc->config->has_pch_encoder) {
2141
			/* if driving the PCH, we need FDI enabled */
2133
			/* if driving the PCH, we need FDI enabled */
2142
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2134
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2143
			assert_fdi_tx_pll_enabled(dev_priv,
2135
			assert_fdi_tx_pll_enabled(dev_priv,
2144
						  (enum pipe) cpu_transcoder);
2136
						  (enum pipe) cpu_transcoder);
2145
		}
2137
		}
2146
		/* FIXME: assert CPU port conditions for SNB+ */
2138
		/* FIXME: assert CPU port conditions for SNB+ */
2147
	}
2139
	}
2148
 
2140
 
2149
	reg = PIPECONF(cpu_transcoder);
2141
	reg = PIPECONF(cpu_transcoder);
2150
	val = I915_READ(reg);
2142
	val = I915_READ(reg);
2151
	if (val & PIPECONF_ENABLE) {
2143
	if (val & PIPECONF_ENABLE) {
2152
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2144
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2153
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2145
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2154
		return;
2146
		return;
2155
	}
2147
	}
2156
 
2148
 
2157
	I915_WRITE(reg, val | PIPECONF_ENABLE);
2149
	I915_WRITE(reg, val | PIPECONF_ENABLE);
2158
	POSTING_READ(reg);
2150
	POSTING_READ(reg);
-
 
2151
 
-
 
2152
	/*
-
 
2153
	 * Until the pipe starts DSL will read as 0, which would cause
-
 
2154
	 * an apparent vblank timestamp jump, which messes up also the
-
 
2155
	 * frame count when it's derived from the timestamps. So let's
-
 
2156
	 * wait for the pipe to start properly before we call
-
 
2157
	 * drm_crtc_vblank_on()
-
 
2158
	 */
-
 
2159
	if (dev->max_vblank_count == 0 &&
-
 
2160
	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
-
 
2161
		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2159
}
2162
}
2160
 
2163
 
2161
/**
2164
/**
2162
 * intel_disable_pipe - disable a pipe, asserting requirements
2165
 * intel_disable_pipe - disable a pipe, asserting requirements
2163
 * @crtc: crtc whose pipes is to be disabled
2166
 * @crtc: crtc whose pipes is to be disabled
2164
 *
2167
 *
2165
 * Disable the pipe of @crtc, making sure that various hardware
2168
 * Disable the pipe of @crtc, making sure that various hardware
2166
 * specific requirements are met, if applicable, e.g. plane
2169
 * specific requirements are met, if applicable, e.g. plane
2167
 * disabled, panel fitter off, etc.
2170
 * disabled, panel fitter off, etc.
2168
 *
2171
 *
2169
 * Will wait until the pipe has shut down before returning.
2172
 * Will wait until the pipe has shut down before returning.
2170
 */
2173
 */
2171
static void intel_disable_pipe(struct intel_crtc *crtc)
2174
static void intel_disable_pipe(struct intel_crtc *crtc)
2172
{
2175
{
2173
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2176
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2174
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2177
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2175
	enum pipe pipe = crtc->pipe;
2178
	enum pipe pipe = crtc->pipe;
2176
	i915_reg_t reg;
2179
	i915_reg_t reg;
2177
	u32 val;
2180
	u32 val;
2178
 
2181
 
2179
	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2182
	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2180
 
2183
 
2181
	/*
2184
	/*
2182
	 * Make sure planes won't keep trying to pump pixels to us,
2185
	 * Make sure planes won't keep trying to pump pixels to us,
2183
	 * or we might hang the display.
2186
	 * or we might hang the display.
2184
	 */
2187
	 */
2185
	assert_planes_disabled(dev_priv, pipe);
2188
	assert_planes_disabled(dev_priv, pipe);
2186
	assert_cursor_disabled(dev_priv, pipe);
2189
	assert_cursor_disabled(dev_priv, pipe);
2187
	assert_sprites_disabled(dev_priv, pipe);
2190
	assert_sprites_disabled(dev_priv, pipe);
2188
 
2191
 
2189
	reg = PIPECONF(cpu_transcoder);
2192
	reg = PIPECONF(cpu_transcoder);
2190
	val = I915_READ(reg);
2193
	val = I915_READ(reg);
2191
	if ((val & PIPECONF_ENABLE) == 0)
2194
	if ((val & PIPECONF_ENABLE) == 0)
2192
		return;
2195
		return;
2193
 
2196
 
2194
	/*
2197
	/*
2195
	 * Double wide has implications for planes
2198
	 * Double wide has implications for planes
2196
	 * so best keep it disabled when not needed.
2199
	 * so best keep it disabled when not needed.
2197
	 */
2200
	 */
2198
	if (crtc->config->double_wide)
2201
	if (crtc->config->double_wide)
2199
		val &= ~PIPECONF_DOUBLE_WIDE;
2202
		val &= ~PIPECONF_DOUBLE_WIDE;
2200
 
2203
 
2201
	/* Don't disable pipe or pipe PLLs if needed */
2204
	/* Don't disable pipe or pipe PLLs if needed */
2202
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2205
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2203
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2206
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2204
		val &= ~PIPECONF_ENABLE;
2207
		val &= ~PIPECONF_ENABLE;
2205
 
2208
 
2206
	I915_WRITE(reg, val);
2209
	I915_WRITE(reg, val);
2207
	if ((val & PIPECONF_ENABLE) == 0)
2210
	if ((val & PIPECONF_ENABLE) == 0)
2208
		intel_wait_for_pipe_off(crtc);
2211
		intel_wait_for_pipe_off(crtc);
2209
}
2212
}
2210
 
2213
 
2211
static bool need_vtd_wa(struct drm_device *dev)
2214
static bool need_vtd_wa(struct drm_device *dev)
2212
{
2215
{
2213
#ifdef CONFIG_INTEL_IOMMU
2216
#ifdef CONFIG_INTEL_IOMMU
2214
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2217
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2215
		return true;
2218
		return true;
2216
#endif
2219
#endif
2217
	return false;
2220
	return false;
2218
}
2221
}
2219
 
-
 
2220
unsigned int
2222
 
2221
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-
 
2222
		  uint64_t fb_format_modifier, unsigned int plane)
2223
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2223
{
2224
{
2224
	unsigned int tile_height;
-
 
-
 
2225
	return IS_GEN2(dev_priv) ? 2048 : 4096;
-
 
2226
}
-
 
2227
 
-
 
2228
static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
2225
	uint32_t pixel_bytes;
2229
				     uint64_t fb_modifier, unsigned int cpp)
2226
 
2230
{
2227
	switch (fb_format_modifier) {
-
 
2228
	case DRM_FORMAT_MOD_NONE:
2231
	switch (fb_modifier) {
2229
		tile_height = 1;
2232
	case DRM_FORMAT_MOD_NONE:
2230
		break;
2233
		return cpp;
-
 
2234
	case I915_FORMAT_MOD_X_TILED:
2231
	case I915_FORMAT_MOD_X_TILED:
2235
		if (IS_GEN2(dev_priv))
-
 
2236
			return 128;
2232
		tile_height = IS_GEN2(dev) ? 16 : 8;
2237
		else
-
 
2238
			return 512;
2233
		break;
2239
	case I915_FORMAT_MOD_Y_TILED:
2234
	case I915_FORMAT_MOD_Y_TILED:
2240
		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
-
 
2241
			return 128;
2235
		tile_height = 32;
2242
		else
2236
		break;
-
 
2237
	case I915_FORMAT_MOD_Yf_TILED:
2243
			return 512;
2238
		pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
-
 
2239
		switch (pixel_bytes) {
2244
	case I915_FORMAT_MOD_Yf_TILED:
2240
		default:
-
 
2241
		case 1:
2245
		switch (cpp) {
2242
			tile_height = 64;
2246
		case 1:
2243
			break;
2247
			return 64;
2244
		case 2:
-
 
2245
		case 4:
2248
		case 2:
2246
			tile_height = 32;
2249
		case 4:
2247
			break;
-
 
2248
		case 8:
-
 
2249
			tile_height = 16;
2250
			return 128;
2250
			break;
2251
		case 8:
2251
		case 16:
2252
		case 16:
2252
			WARN_ONCE(1,
2253
			return 256;
2253
				  "128-bit pixels are not supported for display!");
2254
		default:
2254
			tile_height = 16;
2255
			MISSING_CASE(cpp);
2255
			break;
2256
			return cpp;
2256
		}
2257
		}
2257
		break;
2258
		break;
2258
	default:
2259
	default:
2259
		MISSING_CASE(fb_format_modifier);
2260
		MISSING_CASE(fb_modifier);
2260
		tile_height = 1;
2261
		return cpp;
2261
		break;
2262
	}
2262
	}
2263
}
-
 
2264
 
-
 
2265
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
-
 
2266
			       uint64_t fb_modifier, unsigned int cpp)
-
 
2267
{
2263
 
2268
	if (fb_modifier == DRM_FORMAT_MOD_NONE)
-
 
2269
		return 1;
-
 
2270
	else
-
 
2271
		return intel_tile_size(dev_priv) /
2264
	return tile_height;
2272
			intel_tile_width(dev_priv, fb_modifier, cpp);
2265
}
2273
}
2266
 
2274
 
2267
unsigned int
2275
unsigned int
2268
intel_fb_align_height(struct drm_device *dev, unsigned int height,
2276
intel_fb_align_height(struct drm_device *dev, unsigned int height,
2269
		      uint32_t pixel_format, uint64_t fb_format_modifier)
2277
		      uint32_t pixel_format, uint64_t fb_modifier)
2270
{
2278
{
-
 
2279
	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2271
	return ALIGN(height, intel_tile_height(dev, pixel_format,
2280
	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
-
 
2281
 
2272
					       fb_format_modifier, 0));
2282
	return ALIGN(height, tile_height);
2273
}
2283
}
2274
 
2284
 
2275
static void
2285
static void
2276
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2286
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2277
			const struct drm_plane_state *plane_state)
2287
			const struct drm_plane_state *plane_state)
2278
{
2288
{
-
 
2289
	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2279
	struct intel_rotation_info *info = &view->params.rotation_info;
2290
	struct intel_rotation_info *info = &view->params.rotated;
2280
	unsigned int tile_height, tile_pitch;
2291
	unsigned int tile_size, tile_width, tile_height, cpp;
2281
 
2292
 
2282
	*view = i915_ggtt_view_normal;
2293
	*view = i915_ggtt_view_normal;
2283
 
2294
 
2284
	if (!plane_state)
2295
	if (!plane_state)
2285
		return;
2296
		return;
2286
 
2297
 
2287
	if (!intel_rotation_90_or_270(plane_state->rotation))
2298
	if (!intel_rotation_90_or_270(plane_state->rotation))
2288
		return;
2299
		return;
2289
 
2300
 
2290
	*view = i915_ggtt_view_rotated;
2301
	*view = i915_ggtt_view_rotated;
2291
 
2302
 
2292
	info->height = fb->height;
2303
	info->height = fb->height;
2293
	info->pixel_format = fb->pixel_format;
2304
	info->pixel_format = fb->pixel_format;
2294
	info->pitch = fb->pitches[0];
2305
	info->pitch = fb->pitches[0];
2295
	info->uv_offset = fb->offsets[1];
2306
	info->uv_offset = fb->offsets[1];
2296
	info->fb_modifier = fb->modifier[0];
2307
	info->fb_modifier = fb->modifier[0];
2297
 
2308
 
-
 
2309
	tile_size = intel_tile_size(dev_priv);
-
 
2310
 
2298
	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2311
	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2299
					fb->modifier[0], 0);
2312
	tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
-
 
2313
	tile_height = tile_size / tile_width;
2300
	tile_pitch = PAGE_SIZE / tile_height;
2314
 
2301
	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2315
	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
2302
	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2316
	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2303
	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2317
	info->size = info->width_pages * info->height_pages * tile_size;
2304
 
2318
 
2305
	if (info->pixel_format == DRM_FORMAT_NV12) {
2319
	if (info->pixel_format == DRM_FORMAT_NV12) {
2306
		tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2320
		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
-
 
2321
		tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
2307
						fb->modifier[0], 1);
2322
		tile_height = tile_size / tile_width;
2308
		tile_pitch = PAGE_SIZE / tile_height;
2323
 
2309
		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
-
 
2310
		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2324
		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
2311
						     tile_height);
-
 
2312
		info->size_uv = info->width_pages_uv * info->height_pages_uv *
2325
		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
2313
				PAGE_SIZE;
2326
		info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
2314
	}
2327
	}
2315
}
2328
}
2316
 
2329
 
2317
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2330
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2318
{
2331
{
2319
	if (INTEL_INFO(dev_priv)->gen >= 9)
2332
	if (INTEL_INFO(dev_priv)->gen >= 9)
2320
		return 256 * 1024;
2333
		return 256 * 1024;
2321
	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2334
	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2322
		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2335
		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2323
		return 128 * 1024;
2336
		return 128 * 1024;
2324
	else if (INTEL_INFO(dev_priv)->gen >= 4)
2337
	else if (INTEL_INFO(dev_priv)->gen >= 4)
2325
		return 4 * 1024;
2338
		return 4 * 1024;
2326
	else
2339
	else
2327
		return 0;
2340
		return 0;
2328
}
2341
}
-
 
2342
 
-
 
2343
static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
-
 
2344
					 uint64_t fb_modifier)
-
 
2345
{
-
 
2346
	switch (fb_modifier) {
-
 
2347
	case DRM_FORMAT_MOD_NONE:
-
 
2348
		return intel_linear_alignment(dev_priv);
-
 
2349
	case I915_FORMAT_MOD_X_TILED:
-
 
2350
		if (INTEL_INFO(dev_priv)->gen >= 9)
-
 
2351
			return 256 * 1024;
-
 
2352
		return 0;
-
 
2353
	case I915_FORMAT_MOD_Y_TILED:
-
 
2354
	case I915_FORMAT_MOD_Yf_TILED:
-
 
2355
		return 1 * 1024 * 1024;
-
 
2356
	default:
-
 
2357
		MISSING_CASE(fb_modifier);
-
 
2358
		return 0;
-
 
2359
	}
-
 
2360
}
2329
 
2361
 
2330
int
2362
int
2331
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2363
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2332
			   struct drm_framebuffer *fb,
2364
			   struct drm_framebuffer *fb,
2333
			   const struct drm_plane_state *plane_state)
2365
			   const struct drm_plane_state *plane_state)
2334
{
2366
{
2335
	struct drm_device *dev = fb->dev;
2367
	struct drm_device *dev = fb->dev;
2336
	struct drm_i915_private *dev_priv = dev->dev_private;
2368
	struct drm_i915_private *dev_priv = dev->dev_private;
2337
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2369
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2338
	struct i915_ggtt_view view;
2370
	struct i915_ggtt_view view;
2339
	u32 alignment;
2371
	u32 alignment;
2340
	int ret;
2372
	int ret;
2341
 
2373
 
2342
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2374
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2343
 
-
 
2344
	switch (fb->modifier[0]) {
-
 
2345
	case DRM_FORMAT_MOD_NONE:
2375
 
2346
		alignment = intel_linear_alignment(dev_priv);
-
 
2347
		break;
-
 
2348
	case I915_FORMAT_MOD_X_TILED:
-
 
2349
		if (INTEL_INFO(dev)->gen >= 9)
-
 
2350
			alignment = 256 * 1024;
-
 
2351
		else {
-
 
2352
			/* pin() will align the object as required by fence */
-
 
2353
			alignment = 0;
-
 
2354
		}
-
 
2355
		break;
-
 
2356
	case I915_FORMAT_MOD_Y_TILED:
-
 
2357
	case I915_FORMAT_MOD_Yf_TILED:
-
 
2358
		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
-
 
2359
			  "Y tiling bo slipped through, driver bug!\n"))
-
 
2360
			return -EINVAL;
-
 
2361
		alignment = 1 * 1024 * 1024;
-
 
2362
		break;
-
 
2363
	default:
-
 
2364
		MISSING_CASE(fb->modifier[0]);
-
 
2365
		return -EINVAL;
-
 
2366
	}
2376
	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2367
 
2377
 
2368
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
2378
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
2369
 
2379
 
2370
	/* Note that the w/a also requires 64 PTE of padding following the
2380
	/* Note that the w/a also requires 64 PTE of padding following the
2371
	 * bo. We currently fill all unused PTE with the shadow page and so
2381
	 * bo. We currently fill all unused PTE with the shadow page and so
2372
	 * we should always have valid PTE following the scanout preventing
2382
	 * we should always have valid PTE following the scanout preventing
2373
	 * the VT-d warning.
2383
	 * the VT-d warning.
2374
	 */
2384
	 */
2375
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2385
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2376
		alignment = 256 * 1024;
2386
		alignment = 256 * 1024;
2377
 
2387
 
2378
	/*
2388
	/*
2379
	 * Global gtt pte registers are special registers which actually forward
2389
	 * Global gtt pte registers are special registers which actually forward
2380
	 * writes to a chunk of system memory. Which means that there is no risk
2390
	 * writes to a chunk of system memory. Which means that there is no risk
2381
	 * that the register values disappear as soon as we call
2391
	 * that the register values disappear as soon as we call
2382
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2392
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2383
	 * pin/unpin/fence and not more.
2393
	 * pin/unpin/fence and not more.
2384
	 */
2394
	 */
2385
	intel_runtime_pm_get(dev_priv);
2395
	intel_runtime_pm_get(dev_priv);
2386
 
2396
 
2387
	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2397
	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2388
						   &view);
2398
						   &view);
2389
	if (ret)
2399
	if (ret)
2390
		goto err_pm;
2400
		goto err_pm;
2391
 
2401
 
2392
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2402
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2393
	 * fence, whereas 965+ only requires a fence if using
2403
	 * fence, whereas 965+ only requires a fence if using
2394
	 * framebuffer compression.  For simplicity, we always install
2404
	 * framebuffer compression.  For simplicity, we always install
2395
	 * a fence as the cost is not that onerous.
2405
	 * a fence as the cost is not that onerous.
2396
	 */
2406
	 */
2397
	if (view.type == I915_GGTT_VIEW_NORMAL) {
2407
	if (view.type == I915_GGTT_VIEW_NORMAL) {
2398
		ret = i915_gem_object_get_fence(obj);
2408
		ret = i915_gem_object_get_fence(obj);
2399
		if (ret == -EDEADLK) {
2409
		if (ret == -EDEADLK) {
2400
			/*
2410
			/*
2401
			 * -EDEADLK means there are no free fences
2411
			 * -EDEADLK means there are no free fences
2402
			 * no pending flips.
2412
			 * no pending flips.
2403
			 *
2413
			 *
2404
			 * This is propagated to atomic, but it uses
2414
			 * This is propagated to atomic, but it uses
2405
			 * -EDEADLK to force a locking recovery, so
2415
			 * -EDEADLK to force a locking recovery, so
2406
			 * change the returned error to -EBUSY.
2416
			 * change the returned error to -EBUSY.
2407
			 */
2417
			 */
2408
			ret = -EBUSY;
2418
			ret = -EBUSY;
2409
			goto err_unpin;
2419
			goto err_unpin;
2410
		} else if (ret)
2420
		} else if (ret)
2411
			goto err_unpin;
2421
			goto err_unpin;
2412
 
2422
 
2413
		i915_gem_object_pin_fence(obj);
2423
		i915_gem_object_pin_fence(obj);
2414
	}
2424
	}
2415
 
2425
 
2416
	intel_runtime_pm_put(dev_priv);
2426
	intel_runtime_pm_put(dev_priv);
2417
	return 0;
2427
	return 0;
2418
 
2428
 
2419
err_unpin:
2429
err_unpin:
2420
	i915_gem_object_unpin_from_display_plane(obj, &view);
2430
	i915_gem_object_unpin_from_display_plane(obj, &view);
2421
err_pm:
2431
err_pm:
2422
	intel_runtime_pm_put(dev_priv);
2432
	intel_runtime_pm_put(dev_priv);
2423
	return ret;
2433
	return ret;
2424
}
2434
}
2425
 
2435
 
2426
static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2436
static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2427
			       const struct drm_plane_state *plane_state)
2437
			       const struct drm_plane_state *plane_state)
2428
{
2438
{
2429
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2439
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2430
	struct i915_ggtt_view view;
2440
	struct i915_ggtt_view view;
2431
 
2441
 
2432
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2442
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2433
 
2443
 
2434
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
2444
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
2435
 
2445
 
2436
	if (view.type == I915_GGTT_VIEW_NORMAL)
2446
	if (view.type == I915_GGTT_VIEW_NORMAL)
2437
		i915_gem_object_unpin_fence(obj);
2447
		i915_gem_object_unpin_fence(obj);
2438
 
2448
 
2439
	i915_gem_object_unpin_from_display_plane(obj, &view);
2449
	i915_gem_object_unpin_from_display_plane(obj, &view);
2440
}
2450
}
2441
 
2451
 
2442
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2452
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2443
 * is assumed to be a power-of-two. */
2453
 * is assumed to be a power-of-two. */
2444
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2454
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
2445
					     int *x, int *y,
2455
			      int *x, int *y,
2446
					     unsigned int tiling_mode,
2456
			      uint64_t fb_modifier,
2447
					     unsigned int cpp,
2457
			      unsigned int cpp,
2448
					     unsigned int pitch)
2458
			      unsigned int pitch)
2449
{
2459
{
2450
	if (tiling_mode != I915_TILING_NONE) {
2460
	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
-
 
2461
		unsigned int tile_size, tile_width, tile_height;
2451
		unsigned int tile_rows, tiles;
2462
		unsigned int tile_rows, tiles;
2452
 
2463
 
-
 
2464
		tile_size = intel_tile_size(dev_priv);
2453
		tile_rows = *y / 8;
2465
		tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
2454
		*y %= 8;
2466
		tile_height = tile_size / tile_width;
2455
 
2467
 
-
 
2468
		tile_rows = *y / tile_height;
-
 
2469
		*y %= tile_height;
-
 
2470
 
2456
		tiles = *x / (512/cpp);
2471
		tiles = *x / (tile_width/cpp);
2457
		*x %= 512/cpp;
2472
		*x %= tile_width/cpp;
2458
 
2473
 
2459
		return tile_rows * pitch * 8 + tiles * 4096;
2474
		return tile_rows * pitch * tile_height + tiles * tile_size;
2460
	} else {
2475
	} else {
2461
		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2476
		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2462
		unsigned int offset;
2477
		unsigned int offset;
2463
 
2478
 
2464
		offset = *y * pitch + *x * cpp;
2479
		offset = *y * pitch + *x * cpp;
2465
		*y = (offset & alignment) / pitch;
2480
		*y = (offset & alignment) / pitch;
2466
		*x = ((offset & alignment) - *y * pitch) / cpp;
2481
		*x = ((offset & alignment) - *y * pitch) / cpp;
2467
		return offset & ~alignment;
2482
		return offset & ~alignment;
2468
	}
2483
	}
2469
}
2484
}
2470
 
2485
 
2471
static int i9xx_format_to_fourcc(int format)
2486
static int i9xx_format_to_fourcc(int format)
2472
{
2487
{
2473
	switch (format) {
2488
	switch (format) {
2474
	case DISPPLANE_8BPP:
2489
	case DISPPLANE_8BPP:
2475
		return DRM_FORMAT_C8;
2490
		return DRM_FORMAT_C8;
2476
	case DISPPLANE_BGRX555:
2491
	case DISPPLANE_BGRX555:
2477
		return DRM_FORMAT_XRGB1555;
2492
		return DRM_FORMAT_XRGB1555;
2478
	case DISPPLANE_BGRX565:
2493
	case DISPPLANE_BGRX565:
2479
		return DRM_FORMAT_RGB565;
2494
		return DRM_FORMAT_RGB565;
2480
	default:
2495
	default:
2481
	case DISPPLANE_BGRX888:
2496
	case DISPPLANE_BGRX888:
2482
		return DRM_FORMAT_XRGB8888;
2497
		return DRM_FORMAT_XRGB8888;
2483
	case DISPPLANE_RGBX888:
2498
	case DISPPLANE_RGBX888:
2484
		return DRM_FORMAT_XBGR8888;
2499
		return DRM_FORMAT_XBGR8888;
2485
	case DISPPLANE_BGRX101010:
2500
	case DISPPLANE_BGRX101010:
2486
		return DRM_FORMAT_XRGB2101010;
2501
		return DRM_FORMAT_XRGB2101010;
2487
	case DISPPLANE_RGBX101010:
2502
	case DISPPLANE_RGBX101010:
2488
		return DRM_FORMAT_XBGR2101010;
2503
		return DRM_FORMAT_XBGR2101010;
2489
	}
2504
	}
2490
}
2505
}
2491
 
2506
 
2492
static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2507
static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2493
{
2508
{
2494
	switch (format) {
2509
	switch (format) {
2495
	case PLANE_CTL_FORMAT_RGB_565:
2510
	case PLANE_CTL_FORMAT_RGB_565:
2496
		return DRM_FORMAT_RGB565;
2511
		return DRM_FORMAT_RGB565;
2497
	default:
2512
	default:
2498
	case PLANE_CTL_FORMAT_XRGB_8888:
2513
	case PLANE_CTL_FORMAT_XRGB_8888:
2499
		if (rgb_order) {
2514
		if (rgb_order) {
2500
			if (alpha)
2515
			if (alpha)
2501
				return DRM_FORMAT_ABGR8888;
2516
				return DRM_FORMAT_ABGR8888;
2502
			else
2517
			else
2503
				return DRM_FORMAT_XBGR8888;
2518
				return DRM_FORMAT_XBGR8888;
2504
		} else {
2519
		} else {
2505
			if (alpha)
2520
			if (alpha)
2506
				return DRM_FORMAT_ARGB8888;
2521
				return DRM_FORMAT_ARGB8888;
2507
			else
2522
			else
2508
				return DRM_FORMAT_XRGB8888;
2523
				return DRM_FORMAT_XRGB8888;
2509
		}
2524
		}
2510
	case PLANE_CTL_FORMAT_XRGB_2101010:
2525
	case PLANE_CTL_FORMAT_XRGB_2101010:
2511
		if (rgb_order)
2526
		if (rgb_order)
2512
			return DRM_FORMAT_XBGR2101010;
2527
			return DRM_FORMAT_XBGR2101010;
2513
		else
2528
		else
2514
			return DRM_FORMAT_XRGB2101010;
2529
			return DRM_FORMAT_XRGB2101010;
2515
	}
2530
	}
2516
}
2531
}
2517
 
2532
 
2518
static bool
2533
static bool
2519
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2534
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2520
			      struct intel_initial_plane_config *plane_config)
2535
			      struct intel_initial_plane_config *plane_config)
2521
{
2536
{
2522
	struct drm_device *dev = crtc->base.dev;
2537
	struct drm_device *dev = crtc->base.dev;
2523
	struct drm_i915_private *dev_priv = to_i915(dev);
2538
	struct drm_i915_private *dev_priv = to_i915(dev);
2524
	struct drm_i915_gem_object *obj = NULL;
2539
	struct drm_i915_gem_object *obj = NULL;
2525
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2540
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2526
	struct drm_framebuffer *fb = &plane_config->fb->base;
2541
	struct drm_framebuffer *fb = &plane_config->fb->base;
2527
	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2542
	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2528
	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2543
	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2529
				    PAGE_SIZE);
2544
				    PAGE_SIZE);
2530
 
2545
 
2531
	size_aligned -= base_aligned;
2546
	size_aligned -= base_aligned;
2532
 
2547
 
2533
	if (plane_config->size == 0)
2548
	if (plane_config->size == 0)
2534
		return false;
2549
		return false;
2535
 
2550
 
2536
	/* If the FB is too big, just don't use it since fbdev is not very
2551
	/* If the FB is too big, just don't use it since fbdev is not very
2537
	 * important and we should probably use that space with FBC or other
2552
	 * important and we should probably use that space with FBC or other
2538
	 * features. */
2553
	 * features. */
2539
	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2554
	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2540
		return false;
2555
		return false;
-
 
2556
 
-
 
2557
	mutex_lock(&dev->struct_mutex);
2541
 
2558
 
2542
	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2559
	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2543
							     base_aligned,
2560
							     base_aligned,
2544
							     base_aligned,
2561
							     base_aligned,
2545
							     size_aligned);
2562
							     size_aligned);
2546
	if (!obj)
2563
	if (!obj) {
-
 
2564
		mutex_unlock(&dev->struct_mutex);
2547
		return false;
2565
		return false;
-
 
2566
	}
2548
 
2567
 
2549
	obj->tiling_mode = plane_config->tiling;
2568
	obj->tiling_mode = plane_config->tiling;
2550
	if (obj->tiling_mode == I915_TILING_X)
2569
	if (obj->tiling_mode == I915_TILING_X)
2551
		obj->stride = fb->pitches[0];
2570
		obj->stride = fb->pitches[0];
2552
 
2571
 
2553
	mode_cmd.pixel_format = fb->pixel_format;
2572
	mode_cmd.pixel_format = fb->pixel_format;
2554
	mode_cmd.width = fb->width;
2573
	mode_cmd.width = fb->width;
2555
	mode_cmd.height = fb->height;
2574
	mode_cmd.height = fb->height;
2556
	mode_cmd.pitches[0] = fb->pitches[0];
2575
	mode_cmd.pitches[0] = fb->pitches[0];
2557
	mode_cmd.modifier[0] = fb->modifier[0];
2576
	mode_cmd.modifier[0] = fb->modifier[0];
2558
	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2577
	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2559
 
-
 
2560
	mutex_lock(&dev->struct_mutex);
2578
 
2561
	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2579
	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2562
				   &mode_cmd, obj)) {
2580
				   &mode_cmd, obj)) {
2563
		DRM_DEBUG_KMS("intel fb init failed\n");
2581
		DRM_DEBUG_KMS("intel fb init failed\n");
2564
		goto out_unref_obj;
2582
		goto out_unref_obj;
2565
	}
2583
	}
-
 
2584
 
2566
	mutex_unlock(&dev->struct_mutex);
2585
	mutex_unlock(&dev->struct_mutex);
2567
 
2586
 
2568
	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2587
	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2569
	return true;
2588
	return true;
2570
 
2589
 
2571
out_unref_obj:
2590
out_unref_obj:
2572
	drm_gem_object_unreference(&obj->base);
2591
	drm_gem_object_unreference(&obj->base);
2573
	mutex_unlock(&dev->struct_mutex);
2592
	mutex_unlock(&dev->struct_mutex);
2574
	return false;
2593
	return false;
2575
}
2594
}
2576
 
2595
 
2577
/* Update plane->state->fb to match plane->fb after driver-internal updates */
2596
/* Update plane->state->fb to match plane->fb after driver-internal updates */
2578
static void
2597
static void
2579
update_state_fb(struct drm_plane *plane)
2598
update_state_fb(struct drm_plane *plane)
2580
{
2599
{
2581
	if (plane->fb == plane->state->fb)
2600
	if (plane->fb == plane->state->fb)
2582
		return;
2601
		return;
2583
 
2602
 
2584
	if (plane->state->fb)
2603
	if (plane->state->fb)
2585
		drm_framebuffer_unreference(plane->state->fb);
2604
		drm_framebuffer_unreference(plane->state->fb);
2586
	plane->state->fb = plane->fb;
2605
	plane->state->fb = plane->fb;
2587
	if (plane->state->fb)
2606
	if (plane->state->fb)
2588
		drm_framebuffer_reference(plane->state->fb);
2607
		drm_framebuffer_reference(plane->state->fb);
2589
}
2608
}
2590
 
2609
 
2591
static void
2610
static void
2592
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2611
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2593
			     struct intel_initial_plane_config *plane_config)
2612
			     struct intel_initial_plane_config *plane_config)
2594
{
2613
{
2595
	struct drm_device *dev = intel_crtc->base.dev;
2614
	struct drm_device *dev = intel_crtc->base.dev;
2596
	struct drm_i915_private *dev_priv = dev->dev_private;
2615
	struct drm_i915_private *dev_priv = dev->dev_private;
2597
	struct drm_crtc *c;
2616
	struct drm_crtc *c;
2598
	struct intel_crtc *i;
2617
	struct intel_crtc *i;
2599
	struct drm_i915_gem_object *obj;
2618
	struct drm_i915_gem_object *obj;
2600
	struct drm_plane *primary = intel_crtc->base.primary;
2619
	struct drm_plane *primary = intel_crtc->base.primary;
2601
	struct drm_plane_state *plane_state = primary->state;
2620
	struct drm_plane_state *plane_state = primary->state;
2602
	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2621
	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2603
	struct intel_plane *intel_plane = to_intel_plane(primary);
2622
	struct intel_plane *intel_plane = to_intel_plane(primary);
-
 
2623
	struct intel_plane_state *intel_state =
-
 
2624
		to_intel_plane_state(plane_state);
2604
	struct drm_framebuffer *fb;
2625
	struct drm_framebuffer *fb;
2605
 
2626
 
2606
	if (!plane_config->fb)
2627
	if (!plane_config->fb)
2607
		return;
2628
		return;
2608
 
2629
 
2609
	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2630
	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2610
		fb = &plane_config->fb->base;
2631
		fb = &plane_config->fb->base;
2611
		goto valid_fb;
2632
		goto valid_fb;
2612
	}
2633
	}
2613
 
2634
 
2614
	kfree(plane_config->fb);
2635
	kfree(plane_config->fb);
2615
 
2636
 
2616
	/*
2637
	/*
2617
	 * Failed to alloc the obj, check to see if we should share
2638
	 * Failed to alloc the obj, check to see if we should share
2618
	 * an fb with another CRTC instead
2639
	 * an fb with another CRTC instead
2619
	 */
2640
	 */
2620
	for_each_crtc(dev, c) {
2641
	for_each_crtc(dev, c) {
2621
		i = to_intel_crtc(c);
2642
		i = to_intel_crtc(c);
2622
 
2643
 
2623
		if (c == &intel_crtc->base)
2644
		if (c == &intel_crtc->base)
2624
			continue;
2645
			continue;
2625
 
2646
 
2626
		if (!i->active)
2647
		if (!i->active)
2627
			continue;
2648
			continue;
2628
 
2649
 
2629
		fb = c->primary->fb;
2650
		fb = c->primary->fb;
2630
		if (!fb)
2651
		if (!fb)
2631
			continue;
2652
			continue;
2632
 
2653
 
2633
		obj = intel_fb_obj(fb);
2654
		obj = intel_fb_obj(fb);
2634
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2655
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2635
			drm_framebuffer_reference(fb);
2656
			drm_framebuffer_reference(fb);
2636
			goto valid_fb;
2657
			goto valid_fb;
2637
		}
2658
		}
2638
	}
2659
	}
2639
 
2660
 
2640
	/*
2661
	/*
2641
	 * We've failed to reconstruct the BIOS FB.  Current display state
2662
	 * We've failed to reconstruct the BIOS FB.  Current display state
2642
	 * indicates that the primary plane is visible, but has a NULL FB,
2663
	 * indicates that the primary plane is visible, but has a NULL FB,
2643
	 * which will lead to problems later if we don't fix it up.  The
2664
	 * which will lead to problems later if we don't fix it up.  The
2644
	 * simplest solution is to just disable the primary plane now and
2665
	 * simplest solution is to just disable the primary plane now and
2645
	 * pretend the BIOS never had it enabled.
2666
	 * pretend the BIOS never had it enabled.
2646
	 */
2667
	 */
2647
	to_intel_plane_state(plane_state)->visible = false;
2668
	to_intel_plane_state(plane_state)->visible = false;
2648
	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2669
	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2649
	intel_pre_disable_primary(&intel_crtc->base);
2670
	intel_pre_disable_primary(&intel_crtc->base);
2650
	intel_plane->disable_plane(primary, &intel_crtc->base);
2671
	intel_plane->disable_plane(primary, &intel_crtc->base);
2651
 
2672
 
2652
	return;
2673
	return;
2653
 
2674
 
2654
valid_fb:
2675
valid_fb:
2655
	plane_state->src_x = 0;
2676
	plane_state->src_x = 0;
2656
	plane_state->src_y = 0;
2677
	plane_state->src_y = 0;
2657
	plane_state->src_w = fb->width << 16;
2678
	plane_state->src_w = fb->width << 16;
2658
	plane_state->src_h = fb->height << 16;
2679
	plane_state->src_h = fb->height << 16;
2659
 
2680
 
2660
	plane_state->crtc_x = 0;
2681
	plane_state->crtc_x = 0;
2661
	plane_state->crtc_y = 0;
2682
	plane_state->crtc_y = 0;
2662
	plane_state->crtc_w = fb->width;
2683
	plane_state->crtc_w = fb->width;
2663
	plane_state->crtc_h = fb->height;
2684
	plane_state->crtc_h = fb->height;
-
 
2685
 
-
 
2686
	intel_state->src.x1 = plane_state->src_x;
-
 
2687
	intel_state->src.y1 = plane_state->src_y;
-
 
2688
	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
-
 
2689
	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
-
 
2690
	intel_state->dst.x1 = plane_state->crtc_x;
-
 
2691
	intel_state->dst.y1 = plane_state->crtc_y;
-
 
2692
	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
-
 
2693
	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2664
 
2694
 
2665
	obj = intel_fb_obj(fb);
2695
	obj = intel_fb_obj(fb);
2666
	if (obj->tiling_mode != I915_TILING_NONE)
2696
	if (obj->tiling_mode != I915_TILING_NONE)
2667
		dev_priv->preserve_bios_swizzle = true;
2697
		dev_priv->preserve_bios_swizzle = true;
2668
 
2698
 
2669
	drm_framebuffer_reference(fb);
2699
	drm_framebuffer_reference(fb);
2670
	primary->fb = primary->state->fb = fb;
2700
	primary->fb = primary->state->fb = fb;
2671
	primary->crtc = primary->state->crtc = &intel_crtc->base;
2701
	primary->crtc = primary->state->crtc = &intel_crtc->base;
2672
	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2702
	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2673
	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2703
	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2674
}
2704
}
2675
 
2705
 
2676
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2706
static void i9xx_update_primary_plane(struct drm_plane *primary,
2677
				      struct drm_framebuffer *fb,
2707
				      const struct intel_crtc_state *crtc_state,
2678
				      int x, int y)
2708
				      const struct intel_plane_state *plane_state)
2679
{
2709
{
2680
	struct drm_device *dev = crtc->dev;
2710
	struct drm_device *dev = primary->dev;
2681
	struct drm_i915_private *dev_priv = dev->dev_private;
2711
	struct drm_i915_private *dev_priv = dev->dev_private;
2682
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2712
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2683
	struct drm_plane *primary = crtc->primary;
-
 
2684
	bool visible = to_intel_plane_state(primary->state)->visible;
2713
	struct drm_framebuffer *fb = plane_state->base.fb;
2685
	struct drm_i915_gem_object *obj;
2714
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2686
	int plane = intel_crtc->plane;
2715
	int plane = intel_crtc->plane;
2687
	unsigned long linear_offset;
2716
	u32 linear_offset;
2688
	u32 dspcntr;
2717
	u32 dspcntr;
2689
	i915_reg_t reg = DSPCNTR(plane);
-
 
2690
	int pixel_size;
-
 
2691
 
-
 
2692
	if (!visible || !fb) {
-
 
2693
		I915_WRITE(reg, 0);
-
 
2694
		if (INTEL_INFO(dev)->gen >= 4)
-
 
2695
			I915_WRITE(DSPSURF(plane), 0);
-
 
2696
		else
2718
	i915_reg_t reg = DSPCNTR(plane);
2697
			I915_WRITE(DSPADDR(plane), 0);
-
 
2698
		POSTING_READ(reg);
-
 
2699
		return;
-
 
2700
	}
-
 
2701
 
2719
	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2702
	obj = intel_fb_obj(fb);
-
 
2703
	if (WARN_ON(obj == NULL))
-
 
2704
		return;
-
 
2705
 
2720
	int x = plane_state->src.x1 >> 16;
2706
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2721
	int y = plane_state->src.y1 >> 16;
2707
 
2722
 
2708
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2723
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2709
 
2724
 
2710
	dspcntr |= DISPLAY_PLANE_ENABLE;
2725
	dspcntr |= DISPLAY_PLANE_ENABLE;
2711
 
2726
 
2712
	if (INTEL_INFO(dev)->gen < 4) {
2727
	if (INTEL_INFO(dev)->gen < 4) {
2713
		if (intel_crtc->pipe == PIPE_B)
2728
		if (intel_crtc->pipe == PIPE_B)
2714
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2729
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2715
 
2730
 
2716
		/* pipesrc and dspsize control the size that is scaled from,
2731
		/* pipesrc and dspsize control the size that is scaled from,
2717
		 * which should always be the user's requested size.
2732
		 * which should always be the user's requested size.
2718
		 */
2733
		 */
2719
		I915_WRITE(DSPSIZE(plane),
2734
		I915_WRITE(DSPSIZE(plane),
2720
			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2735
			   ((crtc_state->pipe_src_h - 1) << 16) |
2721
			   (intel_crtc->config->pipe_src_w - 1));
2736
			   (crtc_state->pipe_src_w - 1));
2722
		I915_WRITE(DSPPOS(plane), 0);
2737
		I915_WRITE(DSPPOS(plane), 0);
2723
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2738
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2724
		I915_WRITE(PRIMSIZE(plane),
2739
		I915_WRITE(PRIMSIZE(plane),
2725
			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2740
			   ((crtc_state->pipe_src_h - 1) << 16) |
2726
			   (intel_crtc->config->pipe_src_w - 1));
2741
			   (crtc_state->pipe_src_w - 1));
2727
		I915_WRITE(PRIMPOS(plane), 0);
2742
		I915_WRITE(PRIMPOS(plane), 0);
2728
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2743
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2729
	}
2744
	}
2730
 
2745
 
2731
	switch (fb->pixel_format) {
2746
	switch (fb->pixel_format) {
2732
	case DRM_FORMAT_C8:
2747
	case DRM_FORMAT_C8:
2733
		dspcntr |= DISPPLANE_8BPP;
2748
		dspcntr |= DISPPLANE_8BPP;
2734
		break;
2749
		break;
2735
	case DRM_FORMAT_XRGB1555:
2750
	case DRM_FORMAT_XRGB1555:
2736
		dspcntr |= DISPPLANE_BGRX555;
2751
		dspcntr |= DISPPLANE_BGRX555;
2737
		break;
2752
		break;
2738
	case DRM_FORMAT_RGB565:
2753
	case DRM_FORMAT_RGB565:
2739
		dspcntr |= DISPPLANE_BGRX565;
2754
		dspcntr |= DISPPLANE_BGRX565;
2740
		break;
2755
		break;
2741
	case DRM_FORMAT_XRGB8888:
2756
	case DRM_FORMAT_XRGB8888:
2742
		dspcntr |= DISPPLANE_BGRX888;
2757
		dspcntr |= DISPPLANE_BGRX888;
2743
		break;
2758
		break;
2744
	case DRM_FORMAT_XBGR8888:
2759
	case DRM_FORMAT_XBGR8888:
2745
		dspcntr |= DISPPLANE_RGBX888;
2760
		dspcntr |= DISPPLANE_RGBX888;
2746
		break;
2761
		break;
2747
	case DRM_FORMAT_XRGB2101010:
2762
	case DRM_FORMAT_XRGB2101010:
2748
		dspcntr |= DISPPLANE_BGRX101010;
2763
		dspcntr |= DISPPLANE_BGRX101010;
2749
		break;
2764
		break;
2750
	case DRM_FORMAT_XBGR2101010:
2765
	case DRM_FORMAT_XBGR2101010:
2751
		dspcntr |= DISPPLANE_RGBX101010;
2766
		dspcntr |= DISPPLANE_RGBX101010;
2752
		break;
2767
		break;
2753
	default:
2768
	default:
2754
		BUG();
2769
		BUG();
2755
	}
2770
	}
2756
 
2771
 
2757
	if (INTEL_INFO(dev)->gen >= 4 &&
2772
	if (INTEL_INFO(dev)->gen >= 4 &&
2758
	    obj->tiling_mode != I915_TILING_NONE)
2773
	    obj->tiling_mode != I915_TILING_NONE)
2759
		dspcntr |= DISPPLANE_TILED;
2774
		dspcntr |= DISPPLANE_TILED;
2760
 
2775
 
2761
	if (IS_G4X(dev))
2776
	if (IS_G4X(dev))
2762
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2777
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2763
 
2778
 
2764
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2779
	linear_offset = y * fb->pitches[0] + x * cpp;
2765
 
2780
 
2766
	if (INTEL_INFO(dev)->gen >= 4) {
2781
	if (INTEL_INFO(dev)->gen >= 4) {
2767
		intel_crtc->dspaddr_offset =
2782
		intel_crtc->dspaddr_offset =
2768
			intel_gen4_compute_page_offset(dev_priv,
2783
			intel_compute_tile_offset(dev_priv, &x, &y,
2769
						       &x, &y, obj->tiling_mode,
-
 
2770
						       pixel_size,
2784
						  fb->modifier[0], cpp,
2771
						       fb->pitches[0]);
2785
						  fb->pitches[0]);
2772
		linear_offset -= intel_crtc->dspaddr_offset;
2786
		linear_offset -= intel_crtc->dspaddr_offset;
2773
	} else {
2787
	} else {
2774
		intel_crtc->dspaddr_offset = linear_offset;
2788
		intel_crtc->dspaddr_offset = linear_offset;
2775
	}
2789
	}
2776
 
2790
 
2777
	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2791
	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
2778
		dspcntr |= DISPPLANE_ROTATE_180;
2792
		dspcntr |= DISPPLANE_ROTATE_180;
2779
 
2793
 
2780
		x += (intel_crtc->config->pipe_src_w - 1);
2794
		x += (crtc_state->pipe_src_w - 1);
2781
		y += (intel_crtc->config->pipe_src_h - 1);
2795
		y += (crtc_state->pipe_src_h - 1);
2782
 
2796
 
2783
		/* Finding the last pixel of the last line of the display
2797
		/* Finding the last pixel of the last line of the display
2784
		data and adding to linear_offset*/
2798
		data and adding to linear_offset*/
2785
		linear_offset +=
2799
		linear_offset +=
2786
			(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2800
			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2787
			(intel_crtc->config->pipe_src_w - 1) * pixel_size;
2801
			(crtc_state->pipe_src_w - 1) * cpp;
2788
	}
2802
	}
2789
 
2803
 
2790
	intel_crtc->adjusted_x = x;
2804
	intel_crtc->adjusted_x = x;
2791
	intel_crtc->adjusted_y = y;
2805
	intel_crtc->adjusted_y = y;
2792
 
2806
 
2793
	I915_WRITE(reg, dspcntr);
2807
	I915_WRITE(reg, dspcntr);
2794
 
2808
 
2795
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2809
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2796
	if (INTEL_INFO(dev)->gen >= 4) {
2810
	if (INTEL_INFO(dev)->gen >= 4) {
2797
		I915_WRITE(DSPSURF(plane),
2811
		I915_WRITE(DSPSURF(plane),
2798
			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2812
			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2799
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2813
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2800
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2814
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2801
	} else
2815
	} else
2802
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2816
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2803
	POSTING_READ(reg);
2817
	POSTING_READ(reg);
2804
}
2818
}
2805
 
2819
 
2806
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2820
static void i9xx_disable_primary_plane(struct drm_plane *primary,
2807
					  struct drm_framebuffer *fb,
-
 
2808
					  int x, int y)
2821
				       struct drm_crtc *crtc)
2809
{
2822
{
2810
	struct drm_device *dev = crtc->dev;
2823
	struct drm_device *dev = crtc->dev;
2811
	struct drm_i915_private *dev_priv = dev->dev_private;
2824
	struct drm_i915_private *dev_priv = dev->dev_private;
2812
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2825
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2813
	struct drm_plane *primary = crtc->primary;
-
 
2814
	bool visible = to_intel_plane_state(primary->state)->visible;
-
 
2815
	struct drm_i915_gem_object *obj;
-
 
2816
	int plane = intel_crtc->plane;
2826
	int plane = intel_crtc->plane;
2817
	unsigned long linear_offset;
-
 
2818
	u32 dspcntr;
-
 
2819
	i915_reg_t reg = DSPCNTR(plane);
-
 
2820
	int pixel_size;
-
 
2821
 
2827
 
2822
	if (!visible || !fb) {
2828
	I915_WRITE(DSPCNTR(plane), 0);
2823
		I915_WRITE(reg, 0);
2829
	if (INTEL_INFO(dev_priv)->gen >= 4)
-
 
2830
		I915_WRITE(DSPSURF(plane), 0);
2824
		I915_WRITE(DSPSURF(plane), 0);
2831
	else
2825
		POSTING_READ(reg);
2832
		I915_WRITE(DSPADDR(plane), 0);
2826
		return;
2833
	POSTING_READ(DSPCNTR(plane));
-
 
2834
}
-
 
2835
 
-
 
2836
static void ironlake_update_primary_plane(struct drm_plane *primary,
-
 
2837
					  const struct intel_crtc_state *crtc_state,
-
 
2838
					  const struct intel_plane_state *plane_state)
-
 
2839
{
-
 
2840
	struct drm_device *dev = primary->dev;
-
 
2841
	struct drm_i915_private *dev_priv = dev->dev_private;
2827
	}
2842
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2828
 
2843
	struct drm_framebuffer *fb = plane_state->base.fb;
-
 
2844
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2829
	obj = intel_fb_obj(fb);
2845
	int plane = intel_crtc->plane;
2830
	if (WARN_ON(obj == NULL))
-
 
-
 
2846
	u32 linear_offset;
2831
		return;
2847
	u32 dspcntr;
-
 
2848
	i915_reg_t reg = DSPCNTR(plane);
-
 
2849
	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2832
 
2850
	int x = plane_state->src.x1 >> 16;
2833
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-
 
2834
 
2851
	int y = plane_state->src.y1 >> 16;
2835
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2852
 
2836
 
2853
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2837
	dspcntr |= DISPLAY_PLANE_ENABLE;
2854
	dspcntr |= DISPLAY_PLANE_ENABLE;
2838
 
2855
 
2839
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2856
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2840
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2857
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2841
 
2858
 
2842
	switch (fb->pixel_format) {
2859
	switch (fb->pixel_format) {
2843
	case DRM_FORMAT_C8:
2860
	case DRM_FORMAT_C8:
2844
		dspcntr |= DISPPLANE_8BPP;
2861
		dspcntr |= DISPPLANE_8BPP;
2845
		break;
2862
		break;
2846
	case DRM_FORMAT_RGB565:
2863
	case DRM_FORMAT_RGB565:
2847
		dspcntr |= DISPPLANE_BGRX565;
2864
		dspcntr |= DISPPLANE_BGRX565;
2848
		break;
2865
		break;
2849
	case DRM_FORMAT_XRGB8888:
2866
	case DRM_FORMAT_XRGB8888:
2850
		dspcntr |= DISPPLANE_BGRX888;
2867
		dspcntr |= DISPPLANE_BGRX888;
2851
		break;
2868
		break;
2852
	case DRM_FORMAT_XBGR8888:
2869
	case DRM_FORMAT_XBGR8888:
2853
		dspcntr |= DISPPLANE_RGBX888;
2870
		dspcntr |= DISPPLANE_RGBX888;
2854
		break;
2871
		break;
2855
	case DRM_FORMAT_XRGB2101010:
2872
	case DRM_FORMAT_XRGB2101010:
2856
		dspcntr |= DISPPLANE_BGRX101010;
2873
		dspcntr |= DISPPLANE_BGRX101010;
2857
		break;
2874
		break;
2858
	case DRM_FORMAT_XBGR2101010:
2875
	case DRM_FORMAT_XBGR2101010:
2859
		dspcntr |= DISPPLANE_RGBX101010;
2876
		dspcntr |= DISPPLANE_RGBX101010;
2860
		break;
2877
		break;
2861
	default:
2878
	default:
2862
		BUG();
2879
		BUG();
2863
	}
2880
	}
2864
 
2881
 
2865
	if (obj->tiling_mode != I915_TILING_NONE)
2882
	if (obj->tiling_mode != I915_TILING_NONE)
2866
		dspcntr |= DISPPLANE_TILED;
2883
		dspcntr |= DISPPLANE_TILED;
2867
 
2884
 
2868
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2885
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2869
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2886
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2870
 
2887
 
2871
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2888
	linear_offset = y * fb->pitches[0] + x * cpp;
2872
	intel_crtc->dspaddr_offset =
2889
	intel_crtc->dspaddr_offset =
2873
		intel_gen4_compute_page_offset(dev_priv,
-
 
2874
					       &x, &y, obj->tiling_mode,
2890
		intel_compute_tile_offset(dev_priv, &x, &y,
2875
					       pixel_size,
2891
					  fb->modifier[0], cpp,
2876
					       fb->pitches[0]);
2892
					  fb->pitches[0]);
2877
	linear_offset -= intel_crtc->dspaddr_offset;
2893
	linear_offset -= intel_crtc->dspaddr_offset;
2878
	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2894
	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
2879
		dspcntr |= DISPPLANE_ROTATE_180;
2895
		dspcntr |= DISPPLANE_ROTATE_180;
2880
 
2896
 
2881
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2897
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2882
			x += (intel_crtc->config->pipe_src_w - 1);
2898
			x += (crtc_state->pipe_src_w - 1);
2883
			y += (intel_crtc->config->pipe_src_h - 1);
2899
			y += (crtc_state->pipe_src_h - 1);
2884
 
2900
 
2885
			/* Finding the last pixel of the last line of the display
2901
			/* Finding the last pixel of the last line of the display
2886
			data and adding to linear_offset*/
2902
			data and adding to linear_offset*/
2887
			linear_offset +=
2903
			linear_offset +=
2888
				(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2904
				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2889
				(intel_crtc->config->pipe_src_w - 1) * pixel_size;
2905
				(crtc_state->pipe_src_w - 1) * cpp;
2890
		}
2906
		}
2891
	}
2907
	}
2892
 
2908
 
2893
	intel_crtc->adjusted_x = x;
2909
	intel_crtc->adjusted_x = x;
2894
	intel_crtc->adjusted_y = y;
2910
	intel_crtc->adjusted_y = y;
2895
 
2911
 
2896
	I915_WRITE(reg, dspcntr);
2912
	I915_WRITE(reg, dspcntr);
2897
 
2913
 
2898
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2914
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2899
	I915_WRITE(DSPSURF(plane),
2915
	I915_WRITE(DSPSURF(plane),
2900
		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2916
		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2901
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2917
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2902
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2918
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2903
	} else {
2919
	} else {
2904
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2920
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2905
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2921
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2906
	}
2922
	}
2907
	POSTING_READ(reg);
2923
	POSTING_READ(reg);
2908
}
2924
}
2909
 
2925
 
2910
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2926
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2911
			      uint32_t pixel_format)
2927
			      uint64_t fb_modifier, uint32_t pixel_format)
2912
{
-
 
2913
	u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
-
 
2914
 
-
 
2915
	/*
-
 
2916
	 * The stride is either expressed as a multiple of 64 bytes
-
 
2917
	 * chunks for linear buffers or in number of tiles for tiled
-
 
2918
	 * buffers.
-
 
2919
	 */
-
 
2920
	switch (fb_modifier) {
2928
{
2921
	case DRM_FORMAT_MOD_NONE:
-
 
2922
		return 64;
-
 
2923
	case I915_FORMAT_MOD_X_TILED:
-
 
2924
		if (INTEL_INFO(dev)->gen == 2)
-
 
2925
			return 128;
-
 
2926
		return 512;
-
 
2927
	case I915_FORMAT_MOD_Y_TILED:
-
 
2928
		/* No need to check for old gens and Y tiling since this is
-
 
2929
		 * about the display engine and those will be blocked before
-
 
2930
		 * we get here.
-
 
2931
		 */
-
 
2932
		return 128;
-
 
2933
	case I915_FORMAT_MOD_Yf_TILED:
-
 
2934
		if (bits_per_pixel == 8)
-
 
2935
			return 64;
-
 
2936
		else
-
 
2937
			return 128;
-
 
2938
	default:
-
 
2939
		MISSING_CASE(fb_modifier);
2929
	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
-
 
2930
		return 64;
-
 
2931
	} else {
-
 
2932
		int cpp = drm_format_plane_cpp(pixel_format, 0);
-
 
2933
 
2940
		return 64;
2934
		return intel_tile_width(dev_priv, fb_modifier, cpp);
2941
	}
2935
	}
2942
}
2936
}
2943
 
2937
 
2944
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2938
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2945
				     struct drm_i915_gem_object *obj,
2939
			   struct drm_i915_gem_object *obj,
2946
				     unsigned int plane)
2940
			   unsigned int plane)
2947
{
2941
{
2948
	struct i915_ggtt_view view;
2942
	struct i915_ggtt_view view;
2949
	struct i915_vma *vma;
2943
	struct i915_vma *vma;
2950
	u64 offset;
2944
	u64 offset;
2951
 
2945
 
2952
	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2946
	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2953
				intel_plane->base.state);
2947
				intel_plane->base.state);
2954
 
2948
 
2955
	vma = i915_gem_obj_to_ggtt_view(obj, &view);
2949
	vma = i915_gem_obj_to_ggtt_view(obj, &view);
2956
	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2950
	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2957
		view.type))
2951
		view.type))
2958
		return -1;
2952
		return -1;
2959
 
2953
 
2960
	offset = vma->node.start;
2954
	offset = vma->node.start;
2961
 
2955
 
2962
	if (plane == 1) {
2956
	if (plane == 1) {
2963
		offset += vma->ggtt_view.params.rotation_info.uv_start_page *
2957
		offset += vma->ggtt_view.params.rotated.uv_start_page *
2964
			  PAGE_SIZE;
2958
			  PAGE_SIZE;
2965
	}
2959
	}
2966
 
2960
 
2967
	WARN_ON(upper_32_bits(offset));
2961
	WARN_ON(upper_32_bits(offset));
2968
 
2962
 
2969
	return lower_32_bits(offset);
2963
	return lower_32_bits(offset);
2970
}
2964
}
2971
 
2965
 
2972
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2966
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2973
{
2967
{
2974
	struct drm_device *dev = intel_crtc->base.dev;
2968
	struct drm_device *dev = intel_crtc->base.dev;
2975
	struct drm_i915_private *dev_priv = dev->dev_private;
2969
	struct drm_i915_private *dev_priv = dev->dev_private;
2976
 
2970
 
2977
	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2971
	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2978
	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2972
	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2979
	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2973
	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2980
}
2974
}
2981
 
2975
 
2982
/*
2976
/*
2983
 * This function detaches (aka. unbinds) unused scalers in hardware
2977
 * This function detaches (aka. unbinds) unused scalers in hardware
2984
 */
2978
 */
2985
static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2979
static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2986
{
2980
{
2987
	struct intel_crtc_scaler_state *scaler_state;
2981
	struct intel_crtc_scaler_state *scaler_state;
2988
	int i;
2982
	int i;
2989
 
2983
 
2990
	scaler_state = &intel_crtc->config->scaler_state;
2984
	scaler_state = &intel_crtc->config->scaler_state;
2991
 
2985
 
2992
	/* loop through and disable scalers that aren't in use */
2986
	/* loop through and disable scalers that aren't in use */
2993
	for (i = 0; i < intel_crtc->num_scalers; i++) {
2987
	for (i = 0; i < intel_crtc->num_scalers; i++) {
2994
		if (!scaler_state->scalers[i].in_use)
2988
		if (!scaler_state->scalers[i].in_use)
2995
			skl_detach_scaler(intel_crtc, i);
2989
			skl_detach_scaler(intel_crtc, i);
2996
	}
2990
	}
2997
}
2991
}
2998
 
2992
 
2999
u32 skl_plane_ctl_format(uint32_t pixel_format)
2993
u32 skl_plane_ctl_format(uint32_t pixel_format)
3000
{
2994
{
3001
	switch (pixel_format) {
2995
	switch (pixel_format) {
3002
	case DRM_FORMAT_C8:
2996
	case DRM_FORMAT_C8:
3003
		return PLANE_CTL_FORMAT_INDEXED;
2997
		return PLANE_CTL_FORMAT_INDEXED;
3004
	case DRM_FORMAT_RGB565:
2998
	case DRM_FORMAT_RGB565:
3005
		return PLANE_CTL_FORMAT_RGB_565;
2999
		return PLANE_CTL_FORMAT_RGB_565;
3006
	case DRM_FORMAT_XBGR8888:
3000
	case DRM_FORMAT_XBGR8888:
3007
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3001
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3008
	case DRM_FORMAT_XRGB8888:
3002
	case DRM_FORMAT_XRGB8888:
3009
		return PLANE_CTL_FORMAT_XRGB_8888;
3003
		return PLANE_CTL_FORMAT_XRGB_8888;
3010
	/*
3004
	/*
3011
	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3005
	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3012
	 * to be already pre-multiplied. We need to add a knob (or a different
3006
	 * to be already pre-multiplied. We need to add a knob (or a different
3013
	 * DRM_FORMAT) for user-space to configure that.
3007
	 * DRM_FORMAT) for user-space to configure that.
3014
	 */
3008
	 */
3015
	case DRM_FORMAT_ABGR8888:
3009
	case DRM_FORMAT_ABGR8888:
3016
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3010
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3017
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3011
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3018
	case DRM_FORMAT_ARGB8888:
3012
	case DRM_FORMAT_ARGB8888:
3019
		return PLANE_CTL_FORMAT_XRGB_8888 |
3013
		return PLANE_CTL_FORMAT_XRGB_8888 |
3020
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3014
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3021
	case DRM_FORMAT_XRGB2101010:
3015
	case DRM_FORMAT_XRGB2101010:
3022
		return PLANE_CTL_FORMAT_XRGB_2101010;
3016
		return PLANE_CTL_FORMAT_XRGB_2101010;
3023
	case DRM_FORMAT_XBGR2101010:
3017
	case DRM_FORMAT_XBGR2101010:
3024
		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3018
		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3025
	case DRM_FORMAT_YUYV:
3019
	case DRM_FORMAT_YUYV:
3026
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3020
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3027
	case DRM_FORMAT_YVYU:
3021
	case DRM_FORMAT_YVYU:
3028
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3022
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3029
	case DRM_FORMAT_UYVY:
3023
	case DRM_FORMAT_UYVY:
3030
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3024
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3031
	case DRM_FORMAT_VYUY:
3025
	case DRM_FORMAT_VYUY:
3032
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3026
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3033
	default:
3027
	default:
3034
		MISSING_CASE(pixel_format);
3028
		MISSING_CASE(pixel_format);
3035
	}
3029
	}
3036
 
3030
 
3037
	return 0;
3031
	return 0;
3038
}
3032
}
3039
 
3033
 
3040
u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3034
u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3041
{
3035
{
3042
	switch (fb_modifier) {
3036
	switch (fb_modifier) {
3043
	case DRM_FORMAT_MOD_NONE:
3037
	case DRM_FORMAT_MOD_NONE:
3044
		break;
3038
		break;
3045
	case I915_FORMAT_MOD_X_TILED:
3039
	case I915_FORMAT_MOD_X_TILED:
3046
		return PLANE_CTL_TILED_X;
3040
		return PLANE_CTL_TILED_X;
3047
	case I915_FORMAT_MOD_Y_TILED:
3041
	case I915_FORMAT_MOD_Y_TILED:
3048
		return PLANE_CTL_TILED_Y;
3042
		return PLANE_CTL_TILED_Y;
3049
	case I915_FORMAT_MOD_Yf_TILED:
3043
	case I915_FORMAT_MOD_Yf_TILED:
3050
		return PLANE_CTL_TILED_YF;
3044
		return PLANE_CTL_TILED_YF;
3051
	default:
3045
	default:
3052
		MISSING_CASE(fb_modifier);
3046
		MISSING_CASE(fb_modifier);
3053
	}
3047
	}
3054
 
3048
 
3055
	return 0;
3049
	return 0;
3056
}
3050
}
3057
 
3051
 
3058
u32 skl_plane_ctl_rotation(unsigned int rotation)
3052
u32 skl_plane_ctl_rotation(unsigned int rotation)
3059
{
3053
{
3060
	switch (rotation) {
3054
	switch (rotation) {
3061
	case BIT(DRM_ROTATE_0):
3055
	case BIT(DRM_ROTATE_0):
3062
		break;
3056
		break;
3063
	/*
3057
	/*
3064
	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3058
	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3065
	 * while i915 HW rotation is clockwise, thats why this swapping.
3059
	 * while i915 HW rotation is clockwise, thats why this swapping.
3066
	 */
3060
	 */
3067
	case BIT(DRM_ROTATE_90):
3061
	case BIT(DRM_ROTATE_90):
3068
		return PLANE_CTL_ROTATE_270;
3062
		return PLANE_CTL_ROTATE_270;
3069
	case BIT(DRM_ROTATE_180):
3063
	case BIT(DRM_ROTATE_180):
3070
		return PLANE_CTL_ROTATE_180;
3064
		return PLANE_CTL_ROTATE_180;
3071
	case BIT(DRM_ROTATE_270):
3065
	case BIT(DRM_ROTATE_270):
3072
		return PLANE_CTL_ROTATE_90;
3066
		return PLANE_CTL_ROTATE_90;
3073
	default:
3067
	default:
3074
		MISSING_CASE(rotation);
3068
		MISSING_CASE(rotation);
3075
	}
3069
	}
3076
 
3070
 
3077
	return 0;
3071
	return 0;
3078
}
3072
}
3079
 
3073
 
3080
static void skylake_update_primary_plane(struct drm_crtc *crtc,
3074
static void skylake_update_primary_plane(struct drm_plane *plane,
3081
					 struct drm_framebuffer *fb,
3075
					 const struct intel_crtc_state *crtc_state,
3082
					 int x, int y)
3076
					 const struct intel_plane_state *plane_state)
3083
{
3077
{
3084
	struct drm_device *dev = crtc->dev;
3078
	struct drm_device *dev = plane->dev;
3085
	struct drm_i915_private *dev_priv = dev->dev_private;
3079
	struct drm_i915_private *dev_priv = dev->dev_private;
3086
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3080
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3087
	struct drm_plane *plane = crtc->primary;
-
 
3088
	bool visible = to_intel_plane_state(plane->state)->visible;
3081
	struct drm_framebuffer *fb = plane_state->base.fb;
3089
	struct drm_i915_gem_object *obj;
3082
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3090
	int pipe = intel_crtc->pipe;
3083
	int pipe = intel_crtc->pipe;
3091
	u32 plane_ctl, stride_div, stride;
3084
	u32 plane_ctl, stride_div, stride;
3092
	u32 tile_height, plane_offset, plane_size;
3085
	u32 tile_height, plane_offset, plane_size;
3093
	unsigned int rotation;
3086
	unsigned int rotation = plane_state->base.rotation;
3094
	int x_offset, y_offset;
3087
	int x_offset, y_offset;
3095
	u32 surf_addr;
3088
	u32 surf_addr;
3096
	struct intel_crtc_state *crtc_state = intel_crtc->config;
3089
	int scaler_id = plane_state->scaler_id;
3097
	struct intel_plane_state *plane_state;
3090
	int src_x = plane_state->src.x1 >> 16;
3098
	int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3091
	int src_y = plane_state->src.y1 >> 16;
3099
	int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3092
	int src_w = drm_rect_width(&plane_state->src) >> 16;
3100
	int scaler_id = -1;
-
 
3101
 
-
 
3102
	plane_state = to_intel_plane_state(plane->state);
3093
	int src_h = drm_rect_height(&plane_state->src) >> 16;
3103
 
-
 
3104
	if (!visible || !fb) {
3094
	int dst_x = plane_state->dst.x1;
3105
		I915_WRITE(PLANE_CTL(pipe, 0), 0);
3095
	int dst_y = plane_state->dst.y1;
3106
		I915_WRITE(PLANE_SURF(pipe, 0), 0);
3096
	int dst_w = drm_rect_width(&plane_state->dst);
3107
		POSTING_READ(PLANE_CTL(pipe, 0));
3097
	int dst_h = drm_rect_height(&plane_state->dst);
3108
		return;
-
 
3109
	}
-
 
3110
 
3098
 
3111
	plane_ctl = PLANE_CTL_ENABLE |
3099
	plane_ctl = PLANE_CTL_ENABLE |
3112
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3100
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3113
		    PLANE_CTL_PIPE_CSC_ENABLE;
3101
		    PLANE_CTL_PIPE_CSC_ENABLE;
3114
 
3102
 
3115
	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3103
	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3116
	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3104
	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3117
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3105
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3118
 
-
 
3119
	rotation = plane->state->rotation;
-
 
3120
	plane_ctl |= skl_plane_ctl_rotation(rotation);
3106
	plane_ctl |= skl_plane_ctl_rotation(rotation);
3121
 
-
 
3122
	obj = intel_fb_obj(fb);
3107
 
3123
	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3108
	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3124
					       fb->pixel_format);
3109
					       fb->pixel_format);
3125
	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3110
	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3126
 
3111
 
3127
	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3112
	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3128
 
-
 
3129
	scaler_id = plane_state->scaler_id;
-
 
3130
	src_x = plane_state->src.x1 >> 16;
-
 
3131
	src_y = plane_state->src.y1 >> 16;
-
 
3132
	src_w = drm_rect_width(&plane_state->src) >> 16;
-
 
3133
	src_h = drm_rect_height(&plane_state->src) >> 16;
-
 
3134
	dst_x = plane_state->dst.x1;
-
 
3135
	dst_y = plane_state->dst.y1;
-
 
3136
	dst_w = drm_rect_width(&plane_state->dst);
-
 
3137
	dst_h = drm_rect_height(&plane_state->dst);
-
 
3138
 
-
 
3139
	WARN_ON(x != src_x || y != src_y);
-
 
3140
 
3113
 
-
 
3114
	if (intel_rotation_90_or_270(rotation)) {
-
 
3115
		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3141
	if (intel_rotation_90_or_270(rotation)) {
3116
 
3142
		/* stride = Surface height in tiles */
3117
		/* stride = Surface height in tiles */
3143
		tile_height = intel_tile_height(dev, fb->pixel_format,
-
 
3144
						fb->modifier[0], 0);
3118
		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3145
		stride = DIV_ROUND_UP(fb->height, tile_height);
3119
		stride = DIV_ROUND_UP(fb->height, tile_height);
3146
		x_offset = stride * tile_height - y - src_h;
3120
		x_offset = stride * tile_height - src_y - src_h;
3147
		y_offset = x;
3121
		y_offset = src_x;
3148
		plane_size = (src_w - 1) << 16 | (src_h - 1);
3122
		plane_size = (src_w - 1) << 16 | (src_h - 1);
3149
	} else {
3123
	} else {
3150
		stride = fb->pitches[0] / stride_div;
3124
		stride = fb->pitches[0] / stride_div;
3151
		x_offset = x;
3125
		x_offset = src_x;
3152
		y_offset = y;
3126
		y_offset = src_y;
3153
		plane_size = (src_h - 1) << 16 | (src_w - 1);
3127
		plane_size = (src_h - 1) << 16 | (src_w - 1);
3154
	}
3128
	}
3155
	plane_offset = y_offset << 16 | x_offset;
3129
	plane_offset = y_offset << 16 | x_offset;
3156
 
3130
 
3157
	intel_crtc->adjusted_x = x_offset;
3131
	intel_crtc->adjusted_x = x_offset;
3158
	intel_crtc->adjusted_y = y_offset;
3132
	intel_crtc->adjusted_y = y_offset;
3159
 
3133
 
3160
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3134
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3161
	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3135
	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3162
	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3136
	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3163
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3137
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3164
 
3138
 
3165
	if (scaler_id >= 0) {
3139
	if (scaler_id >= 0) {
3166
		uint32_t ps_ctrl = 0;
3140
		uint32_t ps_ctrl = 0;
3167
 
3141
 
3168
		WARN_ON(!dst_w || !dst_h);
3142
		WARN_ON(!dst_w || !dst_h);
3169
		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3143
		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3170
			crtc_state->scaler_state.scalers[scaler_id].mode;
3144
			crtc_state->scaler_state.scalers[scaler_id].mode;
3171
		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3145
		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3172
		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3146
		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3173
		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3147
		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3174
		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3148
		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3175
		I915_WRITE(PLANE_POS(pipe, 0), 0);
3149
		I915_WRITE(PLANE_POS(pipe, 0), 0);
3176
	} else {
3150
	} else {
3177
		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3151
		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3178
	}
3152
	}
3179
 
3153
 
3180
	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3154
	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3181
 
3155
 
3182
	POSTING_READ(PLANE_SURF(pipe, 0));
3156
	POSTING_READ(PLANE_SURF(pipe, 0));
3183
}
3157
}
3184
 
-
 
3185
/* Assume fb object is pinned & idle & fenced and just update base pointers */
-
 
3186
static int
3158
 
3187
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3159
static void skylake_disable_primary_plane(struct drm_plane *primary,
3188
			   int x, int y, enum mode_set_atomic state)
3160
					  struct drm_crtc *crtc)
3189
{
3161
{
3190
	struct drm_device *dev = crtc->dev;
3162
	struct drm_device *dev = crtc->dev;
3191
	struct drm_i915_private *dev_priv = dev->dev_private;
3163
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3164
	int pipe = to_intel_crtc(crtc)->pipe;
-
 
3165
 
3192
 
3166
	I915_WRITE(PLANE_CTL(pipe, 0), 0);
3193
	if (dev_priv->fbc.deactivate)
3167
	I915_WRITE(PLANE_SURF(pipe, 0), 0);
-
 
3168
	POSTING_READ(PLANE_SURF(pipe, 0));
-
 
3169
}
-
 
3170
 
-
 
3171
/* Assume fb object is pinned & idle & fenced and just update base pointers */
-
 
3172
static int
-
 
3173
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-
 
3174
			   int x, int y, enum mode_set_atomic state)
3194
		dev_priv->fbc.deactivate(dev_priv);
3175
{
3195
 
3176
	/* Support for kgdboc is disabled, this needs a major rework. */
3196
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
3177
	DRM_ERROR("legacy panic handler not supported any more.\n");
3197
 
3178
 
3198
	return 0;
3179
	return -ENODEV;
3199
}
3180
}
3200
 
3181
 
3201
static void intel_complete_page_flips(struct drm_device *dev)
3182
static void intel_complete_page_flips(struct drm_device *dev)
3202
{
3183
{
3203
	struct drm_crtc *crtc;
3184
	struct drm_crtc *crtc;
3204
 
3185
 
3205
	for_each_crtc(dev, crtc) {
3186
	for_each_crtc(dev, crtc) {
3206
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3187
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3207
		enum plane plane = intel_crtc->plane;
3188
		enum plane plane = intel_crtc->plane;
3208
 
3189
 
3209
		intel_prepare_page_flip(dev, plane);
3190
		intel_prepare_page_flip(dev, plane);
3210
		intel_finish_page_flip_plane(dev, plane);
3191
		intel_finish_page_flip_plane(dev, plane);
3211
	}
3192
	}
3212
}
3193
}
3213
 
3194
 
3214
static void intel_update_primary_planes(struct drm_device *dev)
3195
static void intel_update_primary_planes(struct drm_device *dev)
3215
{
3196
{
3216
	struct drm_crtc *crtc;
3197
	struct drm_crtc *crtc;
3217
 
3198
 
3218
	for_each_crtc(dev, crtc) {
3199
	for_each_crtc(dev, crtc) {
3219
		struct intel_plane *plane = to_intel_plane(crtc->primary);
3200
		struct intel_plane *plane = to_intel_plane(crtc->primary);
3220
		struct intel_plane_state *plane_state;
3201
		struct intel_plane_state *plane_state;
3221
 
3202
 
3222
		drm_modeset_lock_crtc(crtc, &plane->base);
3203
		drm_modeset_lock_crtc(crtc, &plane->base);
3223
		plane_state = to_intel_plane_state(plane->base.state);
3204
		plane_state = to_intel_plane_state(plane->base.state);
3224
 
3205
 
3225
		if (crtc->state->active && plane_state->base.fb)
3206
		if (plane_state->visible)
-
 
3207
			plane->update_plane(&plane->base,
-
 
3208
					    to_intel_crtc_state(crtc->state),
3226
			plane->commit_plane(&plane->base, plane_state);
3209
					    plane_state);
3227
 
3210
 
3228
		drm_modeset_unlock_crtc(crtc);
3211
		drm_modeset_unlock_crtc(crtc);
3229
	}
3212
	}
3230
}
3213
}
3231
 
3214
 
3232
void intel_prepare_reset(struct drm_device *dev)
3215
void intel_prepare_reset(struct drm_device *dev)
3233
{
3216
{
3234
	/* no reset support for gen2 */
3217
	/* no reset support for gen2 */
3235
	if (IS_GEN2(dev))
3218
	if (IS_GEN2(dev))
3236
		return;
3219
		return;
3237
 
3220
 
3238
	/* reset doesn't touch the display */
3221
	/* reset doesn't touch the display */
3239
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3222
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3240
		return;
3223
		return;
3241
 
3224
 
3242
	drm_modeset_lock_all(dev);
3225
	drm_modeset_lock_all(dev);
3243
	/*
3226
	/*
3244
	 * Disabling the crtcs gracefully seems nicer. Also the
3227
	 * Disabling the crtcs gracefully seems nicer. Also the
3245
	 * g33 docs say we should at least disable all the planes.
3228
	 * g33 docs say we should at least disable all the planes.
3246
	 */
3229
	 */
3247
	intel_display_suspend(dev);
3230
	intel_display_suspend(dev);
3248
}
3231
}
3249
 
3232
 
3250
void intel_finish_reset(struct drm_device *dev)
3233
void intel_finish_reset(struct drm_device *dev)
3251
{
3234
{
3252
	struct drm_i915_private *dev_priv = to_i915(dev);
3235
	struct drm_i915_private *dev_priv = to_i915(dev);
3253
 
3236
 
3254
	/*
3237
	/*
3255
	 * Flips in the rings will be nuked by the reset,
3238
	 * Flips in the rings will be nuked by the reset,
3256
	 * so complete all pending flips so that user space
3239
	 * so complete all pending flips so that user space
3257
	 * will get its events and not get stuck.
3240
	 * will get its events and not get stuck.
3258
	 */
3241
	 */
3259
	intel_complete_page_flips(dev);
3242
	intel_complete_page_flips(dev);
3260
 
3243
 
3261
	/* no reset support for gen2 */
3244
	/* no reset support for gen2 */
3262
	if (IS_GEN2(dev))
3245
	if (IS_GEN2(dev))
3263
		return;
3246
		return;
3264
 
3247
 
3265
	/* reset doesn't touch the display */
3248
	/* reset doesn't touch the display */
3266
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3249
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3267
		/*
3250
		/*
3268
		 * Flips in the rings have been nuked by the reset,
3251
		 * Flips in the rings have been nuked by the reset,
3269
		 * so update the base address of all primary
3252
		 * so update the base address of all primary
3270
		 * planes to the the last fb to make sure we're
3253
		 * planes to the the last fb to make sure we're
3271
		 * showing the correct fb after a reset.
3254
		 * showing the correct fb after a reset.
3272
		 *
3255
		 *
3273
		 * FIXME: Atomic will make this obsolete since we won't schedule
3256
		 * FIXME: Atomic will make this obsolete since we won't schedule
3274
		 * CS-based flips (which might get lost in gpu resets) any more.
3257
		 * CS-based flips (which might get lost in gpu resets) any more.
3275
		 */
3258
		 */
3276
		intel_update_primary_planes(dev);
3259
		intel_update_primary_planes(dev);
3277
		return;
3260
		return;
3278
	}
3261
	}
3279
 
3262
 
3280
	/*
3263
	/*
3281
	 * The display has been reset as well,
3264
	 * The display has been reset as well,
3282
	 * so need a full re-initialization.
3265
	 * so need a full re-initialization.
3283
	 */
3266
	 */
3284
	intel_runtime_pm_disable_interrupts(dev_priv);
3267
	intel_runtime_pm_disable_interrupts(dev_priv);
3285
	intel_runtime_pm_enable_interrupts(dev_priv);
3268
	intel_runtime_pm_enable_interrupts(dev_priv);
3286
 
3269
 
3287
	intel_modeset_init_hw(dev);
3270
	intel_modeset_init_hw(dev);
3288
 
3271
 
3289
	spin_lock_irq(&dev_priv->irq_lock);
3272
	spin_lock_irq(&dev_priv->irq_lock);
3290
	if (dev_priv->display.hpd_irq_setup)
3273
	if (dev_priv->display.hpd_irq_setup)
3291
		dev_priv->display.hpd_irq_setup(dev);
3274
		dev_priv->display.hpd_irq_setup(dev);
3292
	spin_unlock_irq(&dev_priv->irq_lock);
3275
	spin_unlock_irq(&dev_priv->irq_lock);
3293
 
3276
 
3294
	intel_display_resume(dev);
3277
	intel_display_resume(dev);
3295
 
3278
 
3296
	intel_hpd_init(dev_priv);
3279
	intel_hpd_init(dev_priv);
3297
 
3280
 
3298
	drm_modeset_unlock_all(dev);
3281
	drm_modeset_unlock_all(dev);
3299
}
3282
}
3300
 
3283
 
3301
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3284
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3302
{
3285
{
3303
	struct drm_device *dev = crtc->dev;
3286
	struct drm_device *dev = crtc->dev;
3304
	struct drm_i915_private *dev_priv = dev->dev_private;
3287
	struct drm_i915_private *dev_priv = dev->dev_private;
3305
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3288
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3306
	bool pending;
3289
	bool pending;
3307
 
3290
 
3308
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3291
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3309
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3292
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3310
		return false;
3293
		return false;
3311
 
3294
 
3312
	spin_lock_irq(&dev->event_lock);
3295
	spin_lock_irq(&dev->event_lock);
3313
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
3296
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
3314
	spin_unlock_irq(&dev->event_lock);
3297
	spin_unlock_irq(&dev->event_lock);
3315
 
3298
 
3316
	return pending;
3299
	return pending;
3317
}
3300
}
3318
 
3301
 
3319
static void intel_update_pipe_config(struct intel_crtc *crtc,
3302
static void intel_update_pipe_config(struct intel_crtc *crtc,
3320
				     struct intel_crtc_state *old_crtc_state)
3303
				     struct intel_crtc_state *old_crtc_state)
3321
{
3304
{
3322
	struct drm_device *dev = crtc->base.dev;
3305
	struct drm_device *dev = crtc->base.dev;
3323
	struct drm_i915_private *dev_priv = dev->dev_private;
3306
	struct drm_i915_private *dev_priv = dev->dev_private;
3324
	struct intel_crtc_state *pipe_config =
3307
	struct intel_crtc_state *pipe_config =
3325
		to_intel_crtc_state(crtc->base.state);
3308
		to_intel_crtc_state(crtc->base.state);
3326
 
3309
 
3327
	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3310
	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3328
	crtc->base.mode = crtc->base.state->mode;
3311
	crtc->base.mode = crtc->base.state->mode;
3329
 
3312
 
3330
	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3313
	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3331
		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3314
		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3332
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3315
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3333
 
3316
 
3334
	if (HAS_DDI(dev))
3317
	if (HAS_DDI(dev))
3335
		intel_set_pipe_csc(&crtc->base);
3318
		intel_set_pipe_csc(&crtc->base);
3336
 
3319
 
3337
	/*
3320
	/*
3338
	 * Update pipe size and adjust fitter if needed: the reason for this is
3321
	 * Update pipe size and adjust fitter if needed: the reason for this is
3339
	 * that in compute_mode_changes we check the native mode (not the pfit
3322
	 * that in compute_mode_changes we check the native mode (not the pfit
3340
	 * mode) to see if we can flip rather than do a full mode set. In the
3323
	 * mode) to see if we can flip rather than do a full mode set. In the
3341
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3324
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3342
	 * pfit state, we'll end up with a big fb scanned out into the wrong
3325
	 * pfit state, we'll end up with a big fb scanned out into the wrong
3343
	 * sized surface.
3326
	 * sized surface.
3344
	 */
3327
	 */
3345
 
3328
 
3346
	I915_WRITE(PIPESRC(crtc->pipe),
3329
	I915_WRITE(PIPESRC(crtc->pipe),
3347
		   ((pipe_config->pipe_src_w - 1) << 16) |
3330
		   ((pipe_config->pipe_src_w - 1) << 16) |
3348
		   (pipe_config->pipe_src_h - 1));
3331
		   (pipe_config->pipe_src_h - 1));
3349
 
3332
 
3350
	/* on skylake this is done by detaching scalers */
3333
	/* on skylake this is done by detaching scalers */
3351
	if (INTEL_INFO(dev)->gen >= 9) {
3334
	if (INTEL_INFO(dev)->gen >= 9) {
3352
		skl_detach_scalers(crtc);
3335
		skl_detach_scalers(crtc);
3353
 
3336
 
3354
		if (pipe_config->pch_pfit.enabled)
3337
		if (pipe_config->pch_pfit.enabled)
3355
			skylake_pfit_enable(crtc);
3338
			skylake_pfit_enable(crtc);
3356
	} else if (HAS_PCH_SPLIT(dev)) {
3339
	} else if (HAS_PCH_SPLIT(dev)) {
3357
		if (pipe_config->pch_pfit.enabled)
3340
		if (pipe_config->pch_pfit.enabled)
3358
			ironlake_pfit_enable(crtc);
3341
			ironlake_pfit_enable(crtc);
3359
		else if (old_crtc_state->pch_pfit.enabled)
3342
		else if (old_crtc_state->pch_pfit.enabled)
3360
			ironlake_pfit_disable(crtc, true);
3343
			ironlake_pfit_disable(crtc, true);
3361
	}
3344
	}
3362
}
3345
}
3363
 
3346
 
3364
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3347
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3365
{
3348
{
3366
	struct drm_device *dev = crtc->dev;
3349
	struct drm_device *dev = crtc->dev;
3367
	struct drm_i915_private *dev_priv = dev->dev_private;
3350
	struct drm_i915_private *dev_priv = dev->dev_private;
3368
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3351
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3369
	int pipe = intel_crtc->pipe;
3352
	int pipe = intel_crtc->pipe;
3370
	i915_reg_t reg;
3353
	i915_reg_t reg;
3371
	u32 temp;
3354
	u32 temp;
3372
 
3355
 
3373
	/* enable normal train */
3356
	/* enable normal train */
3374
	reg = FDI_TX_CTL(pipe);
3357
	reg = FDI_TX_CTL(pipe);
3375
	temp = I915_READ(reg);
3358
	temp = I915_READ(reg);
3376
	if (IS_IVYBRIDGE(dev)) {
3359
	if (IS_IVYBRIDGE(dev)) {
3377
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3360
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3378
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3361
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3379
	} else {
3362
	} else {
3380
		temp &= ~FDI_LINK_TRAIN_NONE;
3363
		temp &= ~FDI_LINK_TRAIN_NONE;
3381
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3364
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3382
	}
3365
	}
3383
	I915_WRITE(reg, temp);
3366
	I915_WRITE(reg, temp);
3384
 
3367
 
3385
	reg = FDI_RX_CTL(pipe);
3368
	reg = FDI_RX_CTL(pipe);
3386
	temp = I915_READ(reg);
3369
	temp = I915_READ(reg);
3387
	if (HAS_PCH_CPT(dev)) {
3370
	if (HAS_PCH_CPT(dev)) {
3388
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3371
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3389
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3372
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3390
	} else {
3373
	} else {
3391
		temp &= ~FDI_LINK_TRAIN_NONE;
3374
		temp &= ~FDI_LINK_TRAIN_NONE;
3392
		temp |= FDI_LINK_TRAIN_NONE;
3375
		temp |= FDI_LINK_TRAIN_NONE;
3393
	}
3376
	}
3394
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3377
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3395
 
3378
 
3396
	/* wait one idle pattern time */
3379
	/* wait one idle pattern time */
3397
	POSTING_READ(reg);
3380
	POSTING_READ(reg);
3398
	udelay(1000);
3381
	udelay(1000);
3399
 
3382
 
3400
	/* IVB wants error correction enabled */
3383
	/* IVB wants error correction enabled */
3401
	if (IS_IVYBRIDGE(dev))
3384
	if (IS_IVYBRIDGE(dev))
3402
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3385
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3403
			   FDI_FE_ERRC_ENABLE);
3386
			   FDI_FE_ERRC_ENABLE);
3404
}
3387
}
3405
 
3388
 
3406
/* The FDI link training functions for ILK/Ibexpeak. */
3389
/* The FDI link training functions for ILK/Ibexpeak. */
3407
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3390
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3408
{
3391
{
3409
	struct drm_device *dev = crtc->dev;
3392
	struct drm_device *dev = crtc->dev;
3410
	struct drm_i915_private *dev_priv = dev->dev_private;
3393
	struct drm_i915_private *dev_priv = dev->dev_private;
3411
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3394
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3412
	int pipe = intel_crtc->pipe;
3395
	int pipe = intel_crtc->pipe;
3413
	i915_reg_t reg;
3396
	i915_reg_t reg;
3414
	u32 temp, tries;
3397
	u32 temp, tries;
3415
 
3398
 
3416
	/* FDI needs bits from pipe first */
3399
	/* FDI needs bits from pipe first */
3417
	assert_pipe_enabled(dev_priv, pipe);
3400
	assert_pipe_enabled(dev_priv, pipe);
3418
 
3401
 
3419
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3402
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3420
	   for train result */
3403
	   for train result */
3421
	reg = FDI_RX_IMR(pipe);
3404
	reg = FDI_RX_IMR(pipe);
3422
	temp = I915_READ(reg);
3405
	temp = I915_READ(reg);
3423
	temp &= ~FDI_RX_SYMBOL_LOCK;
3406
	temp &= ~FDI_RX_SYMBOL_LOCK;
3424
	temp &= ~FDI_RX_BIT_LOCK;
3407
	temp &= ~FDI_RX_BIT_LOCK;
3425
	I915_WRITE(reg, temp);
3408
	I915_WRITE(reg, temp);
3426
	I915_READ(reg);
3409
	I915_READ(reg);
3427
	udelay(150);
3410
	udelay(150);
3428
 
3411
 
3429
	/* enable CPU FDI TX and PCH FDI RX */
3412
	/* enable CPU FDI TX and PCH FDI RX */
3430
	reg = FDI_TX_CTL(pipe);
3413
	reg = FDI_TX_CTL(pipe);
3431
	temp = I915_READ(reg);
3414
	temp = I915_READ(reg);
3432
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3415
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3433
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3416
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3434
	temp &= ~FDI_LINK_TRAIN_NONE;
3417
	temp &= ~FDI_LINK_TRAIN_NONE;
3435
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3418
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3436
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3419
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3437
 
3420
 
3438
	reg = FDI_RX_CTL(pipe);
3421
	reg = FDI_RX_CTL(pipe);
3439
	temp = I915_READ(reg);
3422
	temp = I915_READ(reg);
3440
	temp &= ~FDI_LINK_TRAIN_NONE;
3423
	temp &= ~FDI_LINK_TRAIN_NONE;
3441
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3424
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3442
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3425
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3443
 
3426
 
3444
	POSTING_READ(reg);
3427
	POSTING_READ(reg);
3445
	udelay(150);
3428
	udelay(150);
3446
 
3429
 
3447
	/* Ironlake workaround, enable clock pointer after FDI enable*/
3430
	/* Ironlake workaround, enable clock pointer after FDI enable*/
3448
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3431
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3449
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3432
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3450
		   FDI_RX_PHASE_SYNC_POINTER_EN);
3433
		   FDI_RX_PHASE_SYNC_POINTER_EN);
3451
 
3434
 
3452
	reg = FDI_RX_IIR(pipe);
3435
	reg = FDI_RX_IIR(pipe);
3453
	for (tries = 0; tries < 5; tries++) {
3436
	for (tries = 0; tries < 5; tries++) {
3454
		temp = I915_READ(reg);
3437
		temp = I915_READ(reg);
3455
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3438
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3456
 
3439
 
3457
		if ((temp & FDI_RX_BIT_LOCK)) {
3440
		if ((temp & FDI_RX_BIT_LOCK)) {
3458
			DRM_DEBUG_KMS("FDI train 1 done.\n");
3441
			DRM_DEBUG_KMS("FDI train 1 done.\n");
3459
			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3442
			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3460
			break;
3443
			break;
3461
		}
3444
		}
3462
	}
3445
	}
3463
	if (tries == 5)
3446
	if (tries == 5)
3464
		DRM_ERROR("FDI train 1 fail!\n");
3447
		DRM_ERROR("FDI train 1 fail!\n");
3465
 
3448
 
3466
	/* Train 2 */
3449
	/* Train 2 */
3467
	reg = FDI_TX_CTL(pipe);
3450
	reg = FDI_TX_CTL(pipe);
3468
	temp = I915_READ(reg);
3451
	temp = I915_READ(reg);
3469
	temp &= ~FDI_LINK_TRAIN_NONE;
3452
	temp &= ~FDI_LINK_TRAIN_NONE;
3470
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3453
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3471
	I915_WRITE(reg, temp);
3454
	I915_WRITE(reg, temp);
3472
 
3455
 
3473
	reg = FDI_RX_CTL(pipe);
3456
	reg = FDI_RX_CTL(pipe);
3474
	temp = I915_READ(reg);
3457
	temp = I915_READ(reg);
3475
	temp &= ~FDI_LINK_TRAIN_NONE;
3458
	temp &= ~FDI_LINK_TRAIN_NONE;
3476
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3459
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3477
	I915_WRITE(reg, temp);
3460
	I915_WRITE(reg, temp);
3478
 
3461
 
3479
	POSTING_READ(reg);
3462
	POSTING_READ(reg);
3480
	udelay(150);
3463
	udelay(150);
3481
 
3464
 
3482
	reg = FDI_RX_IIR(pipe);
3465
	reg = FDI_RX_IIR(pipe);
3483
	for (tries = 0; tries < 5; tries++) {
3466
	for (tries = 0; tries < 5; tries++) {
3484
		temp = I915_READ(reg);
3467
		temp = I915_READ(reg);
3485
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3468
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3486
 
3469
 
3487
		if (temp & FDI_RX_SYMBOL_LOCK) {
3470
		if (temp & FDI_RX_SYMBOL_LOCK) {
3488
			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3471
			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3489
			DRM_DEBUG_KMS("FDI train 2 done.\n");
3472
			DRM_DEBUG_KMS("FDI train 2 done.\n");
3490
			break;
3473
			break;
3491
		}
3474
		}
3492
	}
3475
	}
3493
	if (tries == 5)
3476
	if (tries == 5)
3494
		DRM_ERROR("FDI train 2 fail!\n");
3477
		DRM_ERROR("FDI train 2 fail!\n");
3495
 
3478
 
3496
	DRM_DEBUG_KMS("FDI train done\n");
3479
	DRM_DEBUG_KMS("FDI train done\n");
3497
 
3480
 
3498
}
3481
}
3499
 
3482
 
3500
static const int snb_b_fdi_train_param[] = {
3483
static const int snb_b_fdi_train_param[] = {
3501
	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3484
	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3502
	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3485
	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3503
	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3486
	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3504
	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3487
	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3505
};
3488
};
3506
 
3489
 
3507
/* The FDI link training functions for SNB/Cougarpoint. */
3490
/* The FDI link training functions for SNB/Cougarpoint. */
3508
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3491
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3509
{
3492
{
3510
	struct drm_device *dev = crtc->dev;
3493
	struct drm_device *dev = crtc->dev;
3511
	struct drm_i915_private *dev_priv = dev->dev_private;
3494
	struct drm_i915_private *dev_priv = dev->dev_private;
3512
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3495
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3513
	int pipe = intel_crtc->pipe;
3496
	int pipe = intel_crtc->pipe;
3514
	i915_reg_t reg;
3497
	i915_reg_t reg;
3515
	u32 temp, i, retry;
3498
	u32 temp, i, retry;
3516
 
3499
 
3517
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3500
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3518
	   for train result */
3501
	   for train result */
3519
	reg = FDI_RX_IMR(pipe);
3502
	reg = FDI_RX_IMR(pipe);
3520
	temp = I915_READ(reg);
3503
	temp = I915_READ(reg);
3521
	temp &= ~FDI_RX_SYMBOL_LOCK;
3504
	temp &= ~FDI_RX_SYMBOL_LOCK;
3522
	temp &= ~FDI_RX_BIT_LOCK;
3505
	temp &= ~FDI_RX_BIT_LOCK;
3523
	I915_WRITE(reg, temp);
3506
	I915_WRITE(reg, temp);
3524
 
3507
 
3525
	POSTING_READ(reg);
3508
	POSTING_READ(reg);
3526
	udelay(150);
3509
	udelay(150);
3527
 
3510
 
3528
	/* enable CPU FDI TX and PCH FDI RX */
3511
	/* enable CPU FDI TX and PCH FDI RX */
3529
	reg = FDI_TX_CTL(pipe);
3512
	reg = FDI_TX_CTL(pipe);
3530
	temp = I915_READ(reg);
3513
	temp = I915_READ(reg);
3531
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3514
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3532
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3515
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3533
	temp &= ~FDI_LINK_TRAIN_NONE;
3516
	temp &= ~FDI_LINK_TRAIN_NONE;
3534
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3517
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3535
	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3518
	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3536
	/* SNB-B */
3519
	/* SNB-B */
3537
	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3520
	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3538
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3521
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3539
 
3522
 
3540
	I915_WRITE(FDI_RX_MISC(pipe),
3523
	I915_WRITE(FDI_RX_MISC(pipe),
3541
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3524
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3542
 
3525
 
3543
	reg = FDI_RX_CTL(pipe);
3526
	reg = FDI_RX_CTL(pipe);
3544
	temp = I915_READ(reg);
3527
	temp = I915_READ(reg);
3545
	if (HAS_PCH_CPT(dev)) {
3528
	if (HAS_PCH_CPT(dev)) {
3546
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3529
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3547
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3530
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3548
	} else {
3531
	} else {
3549
		temp &= ~FDI_LINK_TRAIN_NONE;
3532
		temp &= ~FDI_LINK_TRAIN_NONE;
3550
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3533
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3551
	}
3534
	}
3552
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3535
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3553
 
3536
 
3554
	POSTING_READ(reg);
3537
	POSTING_READ(reg);
3555
	udelay(150);
3538
	udelay(150);
3556
 
3539
 
3557
	for (i = 0; i < 4; i++) {
3540
	for (i = 0; i < 4; i++) {
3558
		reg = FDI_TX_CTL(pipe);
3541
		reg = FDI_TX_CTL(pipe);
3559
		temp = I915_READ(reg);
3542
		temp = I915_READ(reg);
3560
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3543
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3561
		temp |= snb_b_fdi_train_param[i];
3544
		temp |= snb_b_fdi_train_param[i];
3562
		I915_WRITE(reg, temp);
3545
		I915_WRITE(reg, temp);
3563
 
3546
 
3564
		POSTING_READ(reg);
3547
		POSTING_READ(reg);
3565
		udelay(500);
3548
		udelay(500);
3566
 
3549
 
3567
		for (retry = 0; retry < 5; retry++) {
3550
		for (retry = 0; retry < 5; retry++) {
3568
			reg = FDI_RX_IIR(pipe);
3551
			reg = FDI_RX_IIR(pipe);
3569
			temp = I915_READ(reg);
3552
			temp = I915_READ(reg);
3570
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3553
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3571
			if (temp & FDI_RX_BIT_LOCK) {
3554
			if (temp & FDI_RX_BIT_LOCK) {
3572
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3555
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3573
				DRM_DEBUG_KMS("FDI train 1 done.\n");
3556
				DRM_DEBUG_KMS("FDI train 1 done.\n");
3574
				break;
3557
				break;
3575
			}
3558
			}
3576
			udelay(50);
3559
			udelay(50);
3577
		}
3560
		}
3578
		if (retry < 5)
3561
		if (retry < 5)
3579
			break;
3562
			break;
3580
	}
3563
	}
3581
	if (i == 4)
3564
	if (i == 4)
3582
		DRM_ERROR("FDI train 1 fail!\n");
3565
		DRM_ERROR("FDI train 1 fail!\n");
3583
 
3566
 
3584
	/* Train 2 */
3567
	/* Train 2 */
3585
	reg = FDI_TX_CTL(pipe);
3568
	reg = FDI_TX_CTL(pipe);
3586
	temp = I915_READ(reg);
3569
	temp = I915_READ(reg);
3587
	temp &= ~FDI_LINK_TRAIN_NONE;
3570
	temp &= ~FDI_LINK_TRAIN_NONE;
3588
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3571
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3589
	if (IS_GEN6(dev)) {
3572
	if (IS_GEN6(dev)) {
3590
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3573
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3591
		/* SNB-B */
3574
		/* SNB-B */
3592
		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3575
		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3593
	}
3576
	}
3594
	I915_WRITE(reg, temp);
3577
	I915_WRITE(reg, temp);
3595
 
3578
 
3596
	reg = FDI_RX_CTL(pipe);
3579
	reg = FDI_RX_CTL(pipe);
3597
	temp = I915_READ(reg);
3580
	temp = I915_READ(reg);
3598
	if (HAS_PCH_CPT(dev)) {
3581
	if (HAS_PCH_CPT(dev)) {
3599
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3582
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3600
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3583
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3601
	} else {
3584
	} else {
3602
		temp &= ~FDI_LINK_TRAIN_NONE;
3585
		temp &= ~FDI_LINK_TRAIN_NONE;
3603
		temp |= FDI_LINK_TRAIN_PATTERN_2;
3586
		temp |= FDI_LINK_TRAIN_PATTERN_2;
3604
	}
3587
	}
3605
	I915_WRITE(reg, temp);
3588
	I915_WRITE(reg, temp);
3606
 
3589
 
3607
	POSTING_READ(reg);
3590
	POSTING_READ(reg);
3608
	udelay(150);
3591
	udelay(150);
3609
 
3592
 
3610
	for (i = 0; i < 4; i++) {
3593
	for (i = 0; i < 4; i++) {
3611
		reg = FDI_TX_CTL(pipe);
3594
		reg = FDI_TX_CTL(pipe);
3612
		temp = I915_READ(reg);
3595
		temp = I915_READ(reg);
3613
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3596
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3614
		temp |= snb_b_fdi_train_param[i];
3597
		temp |= snb_b_fdi_train_param[i];
3615
		I915_WRITE(reg, temp);
3598
		I915_WRITE(reg, temp);
3616
 
3599
 
3617
		POSTING_READ(reg);
3600
		POSTING_READ(reg);
3618
		udelay(500);
3601
		udelay(500);
3619
 
3602
 
3620
		for (retry = 0; retry < 5; retry++) {
3603
		for (retry = 0; retry < 5; retry++) {
3621
			reg = FDI_RX_IIR(pipe);
3604
			reg = FDI_RX_IIR(pipe);
3622
			temp = I915_READ(reg);
3605
			temp = I915_READ(reg);
3623
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3606
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3624
			if (temp & FDI_RX_SYMBOL_LOCK) {
3607
			if (temp & FDI_RX_SYMBOL_LOCK) {
3625
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3608
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3626
				DRM_DEBUG_KMS("FDI train 2 done.\n");
3609
				DRM_DEBUG_KMS("FDI train 2 done.\n");
3627
				break;
3610
				break;
3628
			}
3611
			}
3629
			udelay(50);
3612
			udelay(50);
3630
		}
3613
		}
3631
		if (retry < 5)
3614
		if (retry < 5)
3632
			break;
3615
			break;
3633
	}
3616
	}
3634
	if (i == 4)
3617
	if (i == 4)
3635
		DRM_ERROR("FDI train 2 fail!\n");
3618
		DRM_ERROR("FDI train 2 fail!\n");
3636
 
3619
 
3637
	DRM_DEBUG_KMS("FDI train done.\n");
3620
	DRM_DEBUG_KMS("FDI train done.\n");
3638
}
3621
}
3639
 
3622
 
3640
/* Manual link training for Ivy Bridge A0 parts */
3623
/* Manual link training for Ivy Bridge A0 parts */
3641
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3624
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3642
{
3625
{
3643
	struct drm_device *dev = crtc->dev;
3626
	struct drm_device *dev = crtc->dev;
3644
	struct drm_i915_private *dev_priv = dev->dev_private;
3627
	struct drm_i915_private *dev_priv = dev->dev_private;
3645
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3628
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3646
	int pipe = intel_crtc->pipe;
3629
	int pipe = intel_crtc->pipe;
3647
	i915_reg_t reg;
3630
	i915_reg_t reg;
3648
	u32 temp, i, j;
3631
	u32 temp, i, j;
3649
 
3632
 
3650
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3633
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3651
	   for train result */
3634
	   for train result */
3652
	reg = FDI_RX_IMR(pipe);
3635
	reg = FDI_RX_IMR(pipe);
3653
	temp = I915_READ(reg);
3636
	temp = I915_READ(reg);
3654
	temp &= ~FDI_RX_SYMBOL_LOCK;
3637
	temp &= ~FDI_RX_SYMBOL_LOCK;
3655
	temp &= ~FDI_RX_BIT_LOCK;
3638
	temp &= ~FDI_RX_BIT_LOCK;
3656
	I915_WRITE(reg, temp);
3639
	I915_WRITE(reg, temp);
3657
 
3640
 
3658
	POSTING_READ(reg);
3641
	POSTING_READ(reg);
3659
	udelay(150);
3642
	udelay(150);
3660
 
3643
 
3661
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3644
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3662
		      I915_READ(FDI_RX_IIR(pipe)));
3645
		      I915_READ(FDI_RX_IIR(pipe)));
3663
 
3646
 
3664
	/* Try each vswing and preemphasis setting twice before moving on */
3647
	/* Try each vswing and preemphasis setting twice before moving on */
3665
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3648
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3666
		/* disable first in case we need to retry */
3649
		/* disable first in case we need to retry */
3667
		reg = FDI_TX_CTL(pipe);
3650
		reg = FDI_TX_CTL(pipe);
3668
		temp = I915_READ(reg);
3651
		temp = I915_READ(reg);
3669
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3652
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3670
		temp &= ~FDI_TX_ENABLE;
3653
		temp &= ~FDI_TX_ENABLE;
3671
		I915_WRITE(reg, temp);
3654
		I915_WRITE(reg, temp);
3672
 
3655
 
3673
		reg = FDI_RX_CTL(pipe);
3656
		reg = FDI_RX_CTL(pipe);
3674
		temp = I915_READ(reg);
3657
		temp = I915_READ(reg);
3675
		temp &= ~FDI_LINK_TRAIN_AUTO;
3658
		temp &= ~FDI_LINK_TRAIN_AUTO;
3676
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3659
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3677
		temp &= ~FDI_RX_ENABLE;
3660
		temp &= ~FDI_RX_ENABLE;
3678
		I915_WRITE(reg, temp);
3661
		I915_WRITE(reg, temp);
3679
 
3662
 
3680
		/* enable CPU FDI TX and PCH FDI RX */
3663
		/* enable CPU FDI TX and PCH FDI RX */
3681
		reg = FDI_TX_CTL(pipe);
3664
		reg = FDI_TX_CTL(pipe);
3682
		temp = I915_READ(reg);
3665
		temp = I915_READ(reg);
3683
		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3666
		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3684
		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3667
		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3685
		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3668
		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3686
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3669
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3687
		temp |= snb_b_fdi_train_param[j/2];
3670
		temp |= snb_b_fdi_train_param[j/2];
3688
		temp |= FDI_COMPOSITE_SYNC;
3671
		temp |= FDI_COMPOSITE_SYNC;
3689
		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3672
		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3690
 
3673
 
3691
		I915_WRITE(FDI_RX_MISC(pipe),
3674
		I915_WRITE(FDI_RX_MISC(pipe),
3692
			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3675
			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3693
 
3676
 
3694
		reg = FDI_RX_CTL(pipe);
3677
		reg = FDI_RX_CTL(pipe);
3695
		temp = I915_READ(reg);
3678
		temp = I915_READ(reg);
3696
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3679
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3697
		temp |= FDI_COMPOSITE_SYNC;
3680
		temp |= FDI_COMPOSITE_SYNC;
3698
		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3681
		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3699
 
3682
 
3700
		POSTING_READ(reg);
3683
		POSTING_READ(reg);
3701
		udelay(1); /* should be 0.5us */
3684
		udelay(1); /* should be 0.5us */
3702
 
3685
 
3703
		for (i = 0; i < 4; i++) {
3686
		for (i = 0; i < 4; i++) {
3704
			reg = FDI_RX_IIR(pipe);
3687
			reg = FDI_RX_IIR(pipe);
3705
			temp = I915_READ(reg);
3688
			temp = I915_READ(reg);
3706
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3689
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3707
 
3690
 
3708
			if (temp & FDI_RX_BIT_LOCK ||
3691
			if (temp & FDI_RX_BIT_LOCK ||
3709
			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3692
			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3710
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3693
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3711
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3694
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3712
					      i);
3695
					      i);
3713
				break;
3696
				break;
3714
			}
3697
			}
3715
			udelay(1); /* should be 0.5us */
3698
			udelay(1); /* should be 0.5us */
3716
		}
3699
		}
3717
		if (i == 4) {
3700
		if (i == 4) {
3718
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3701
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3719
			continue;
3702
			continue;
3720
		}
3703
		}
3721
 
3704
 
3722
		/* Train 2 */
3705
		/* Train 2 */
3723
		reg = FDI_TX_CTL(pipe);
3706
		reg = FDI_TX_CTL(pipe);
3724
		temp = I915_READ(reg);
3707
		temp = I915_READ(reg);
3725
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3708
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3726
		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3709
		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3727
		I915_WRITE(reg, temp);
3710
		I915_WRITE(reg, temp);
3728
 
3711
 
3729
		reg = FDI_RX_CTL(pipe);
3712
		reg = FDI_RX_CTL(pipe);
3730
		temp = I915_READ(reg);
3713
		temp = I915_READ(reg);
3731
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3714
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3732
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3715
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3733
		I915_WRITE(reg, temp);
3716
		I915_WRITE(reg, temp);
3734
 
3717
 
3735
		POSTING_READ(reg);
3718
		POSTING_READ(reg);
3736
		udelay(2); /* should be 1.5us */
3719
		udelay(2); /* should be 1.5us */
3737
 
3720
 
3738
		for (i = 0; i < 4; i++) {
3721
		for (i = 0; i < 4; i++) {
3739
			reg = FDI_RX_IIR(pipe);
3722
			reg = FDI_RX_IIR(pipe);
3740
			temp = I915_READ(reg);
3723
			temp = I915_READ(reg);
3741
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3724
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3742
 
3725
 
3743
			if (temp & FDI_RX_SYMBOL_LOCK ||
3726
			if (temp & FDI_RX_SYMBOL_LOCK ||
3744
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3727
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3745
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3728
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3746
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3729
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3747
					      i);
3730
					      i);
3748
				goto train_done;
3731
				goto train_done;
3749
			}
3732
			}
3750
			udelay(2); /* should be 1.5us */
3733
			udelay(2); /* should be 1.5us */
3751
		}
3734
		}
3752
		if (i == 4)
3735
		if (i == 4)
3753
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3736
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3754
	}
3737
	}
3755
 
3738
 
3756
train_done:
3739
train_done:
3757
	DRM_DEBUG_KMS("FDI train done.\n");
3740
	DRM_DEBUG_KMS("FDI train done.\n");
3758
}
3741
}
3759
 
3742
 
3760
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3743
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3761
{
3744
{
3762
	struct drm_device *dev = intel_crtc->base.dev;
3745
	struct drm_device *dev = intel_crtc->base.dev;
3763
	struct drm_i915_private *dev_priv = dev->dev_private;
3746
	struct drm_i915_private *dev_priv = dev->dev_private;
3764
	int pipe = intel_crtc->pipe;
3747
	int pipe = intel_crtc->pipe;
3765
	i915_reg_t reg;
3748
	i915_reg_t reg;
3766
	u32 temp;
3749
	u32 temp;
3767
 
3750
 
3768
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3751
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3769
	reg = FDI_RX_CTL(pipe);
3752
	reg = FDI_RX_CTL(pipe);
3770
	temp = I915_READ(reg);
3753
	temp = I915_READ(reg);
3771
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3754
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3772
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3755
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3773
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3756
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3774
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3757
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3775
 
3758
 
3776
	POSTING_READ(reg);
3759
	POSTING_READ(reg);
3777
	udelay(200);
3760
	udelay(200);
3778
 
3761
 
3779
	/* Switch from Rawclk to PCDclk */
3762
	/* Switch from Rawclk to PCDclk */
3780
	temp = I915_READ(reg);
3763
	temp = I915_READ(reg);
3781
	I915_WRITE(reg, temp | FDI_PCDCLK);
3764
	I915_WRITE(reg, temp | FDI_PCDCLK);
3782
 
3765
 
3783
	POSTING_READ(reg);
3766
	POSTING_READ(reg);
3784
	udelay(200);
3767
	udelay(200);
3785
 
3768
 
3786
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3769
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3787
	reg = FDI_TX_CTL(pipe);
3770
	reg = FDI_TX_CTL(pipe);
3788
	temp = I915_READ(reg);
3771
	temp = I915_READ(reg);
3789
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3772
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3790
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3773
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3791
 
3774
 
3792
		POSTING_READ(reg);
3775
		POSTING_READ(reg);
3793
		udelay(100);
3776
		udelay(100);
3794
	}
3777
	}
3795
}
3778
}
3796
 
3779
 
3797
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3780
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3798
{
3781
{
3799
	struct drm_device *dev = intel_crtc->base.dev;
3782
	struct drm_device *dev = intel_crtc->base.dev;
3800
	struct drm_i915_private *dev_priv = dev->dev_private;
3783
	struct drm_i915_private *dev_priv = dev->dev_private;
3801
	int pipe = intel_crtc->pipe;
3784
	int pipe = intel_crtc->pipe;
3802
	i915_reg_t reg;
3785
	i915_reg_t reg;
3803
	u32 temp;
3786
	u32 temp;
3804
 
3787
 
3805
	/* Switch from PCDclk to Rawclk */
3788
	/* Switch from PCDclk to Rawclk */
3806
	reg = FDI_RX_CTL(pipe);
3789
	reg = FDI_RX_CTL(pipe);
3807
	temp = I915_READ(reg);
3790
	temp = I915_READ(reg);
3808
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3791
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3809
 
3792
 
3810
	/* Disable CPU FDI TX PLL */
3793
	/* Disable CPU FDI TX PLL */
3811
	reg = FDI_TX_CTL(pipe);
3794
	reg = FDI_TX_CTL(pipe);
3812
	temp = I915_READ(reg);
3795
	temp = I915_READ(reg);
3813
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3796
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3814
 
3797
 
3815
	POSTING_READ(reg);
3798
	POSTING_READ(reg);
3816
	udelay(100);
3799
	udelay(100);
3817
 
3800
 
3818
	reg = FDI_RX_CTL(pipe);
3801
	reg = FDI_RX_CTL(pipe);
3819
	temp = I915_READ(reg);
3802
	temp = I915_READ(reg);
3820
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3803
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3821
 
3804
 
3822
	/* Wait for the clocks to turn off. */
3805
	/* Wait for the clocks to turn off. */
3823
	POSTING_READ(reg);
3806
	POSTING_READ(reg);
3824
	udelay(100);
3807
	udelay(100);
3825
}
3808
}
3826
 
3809
 
3827
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3810
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3828
{
3811
{
3829
	struct drm_device *dev = crtc->dev;
3812
	struct drm_device *dev = crtc->dev;
3830
	struct drm_i915_private *dev_priv = dev->dev_private;
3813
	struct drm_i915_private *dev_priv = dev->dev_private;
3831
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3814
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3832
	int pipe = intel_crtc->pipe;
3815
	int pipe = intel_crtc->pipe;
3833
	i915_reg_t reg;
3816
	i915_reg_t reg;
3834
	u32 temp;
3817
	u32 temp;
3835
 
3818
 
3836
	/* disable CPU FDI tx and PCH FDI rx */
3819
	/* disable CPU FDI tx and PCH FDI rx */
3837
	reg = FDI_TX_CTL(pipe);
3820
	reg = FDI_TX_CTL(pipe);
3838
	temp = I915_READ(reg);
3821
	temp = I915_READ(reg);
3839
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3822
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3840
	POSTING_READ(reg);
3823
	POSTING_READ(reg);
3841
 
3824
 
3842
	reg = FDI_RX_CTL(pipe);
3825
	reg = FDI_RX_CTL(pipe);
3843
	temp = I915_READ(reg);
3826
	temp = I915_READ(reg);
3844
	temp &= ~(0x7 << 16);
3827
	temp &= ~(0x7 << 16);
3845
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3828
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3846
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3829
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3847
 
3830
 
3848
	POSTING_READ(reg);
3831
	POSTING_READ(reg);
3849
	udelay(100);
3832
	udelay(100);
3850
 
3833
 
3851
	/* Ironlake workaround, disable clock pointer after downing FDI */
3834
	/* Ironlake workaround, disable clock pointer after downing FDI */
3852
	if (HAS_PCH_IBX(dev))
3835
	if (HAS_PCH_IBX(dev))
3853
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3836
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3854
 
3837
 
3855
	/* still set train pattern 1 */
3838
	/* still set train pattern 1 */
3856
	reg = FDI_TX_CTL(pipe);
3839
	reg = FDI_TX_CTL(pipe);
3857
	temp = I915_READ(reg);
3840
	temp = I915_READ(reg);
3858
	temp &= ~FDI_LINK_TRAIN_NONE;
3841
	temp &= ~FDI_LINK_TRAIN_NONE;
3859
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3842
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3860
	I915_WRITE(reg, temp);
3843
	I915_WRITE(reg, temp);
3861
 
3844
 
3862
	reg = FDI_RX_CTL(pipe);
3845
	reg = FDI_RX_CTL(pipe);
3863
	temp = I915_READ(reg);
3846
	temp = I915_READ(reg);
3864
	if (HAS_PCH_CPT(dev)) {
3847
	if (HAS_PCH_CPT(dev)) {
3865
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3848
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3866
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3849
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3867
	} else {
3850
	} else {
3868
		temp &= ~FDI_LINK_TRAIN_NONE;
3851
		temp &= ~FDI_LINK_TRAIN_NONE;
3869
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3852
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3870
	}
3853
	}
3871
	/* BPC in FDI rx is consistent with that in PIPECONF */
3854
	/* BPC in FDI rx is consistent with that in PIPECONF */
3872
	temp &= ~(0x07 << 16);
3855
	temp &= ~(0x07 << 16);
3873
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3856
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3874
	I915_WRITE(reg, temp);
3857
	I915_WRITE(reg, temp);
3875
 
3858
 
3876
	POSTING_READ(reg);
3859
	POSTING_READ(reg);
3877
	udelay(100);
3860
	udelay(100);
3878
}
3861
}
3879
 
3862
 
3880
bool intel_has_pending_fb_unpin(struct drm_device *dev)
3863
bool intel_has_pending_fb_unpin(struct drm_device *dev)
3881
{
3864
{
3882
	struct intel_crtc *crtc;
3865
	struct intel_crtc *crtc;
3883
 
3866
 
3884
	/* Note that we don't need to be called with mode_config.lock here
3867
	/* Note that we don't need to be called with mode_config.lock here
3885
	 * as our list of CRTC objects is static for the lifetime of the
3868
	 * as our list of CRTC objects is static for the lifetime of the
3886
	 * device and so cannot disappear as we iterate. Similarly, we can
3869
	 * device and so cannot disappear as we iterate. Similarly, we can
3887
	 * happily treat the predicates as racy, atomic checks as userspace
3870
	 * happily treat the predicates as racy, atomic checks as userspace
3888
	 * cannot claim and pin a new fb without at least acquring the
3871
	 * cannot claim and pin a new fb without at least acquring the
3889
	 * struct_mutex and so serialising with us.
3872
	 * struct_mutex and so serialising with us.
3890
	 */
3873
	 */
3891
	for_each_intel_crtc(dev, crtc) {
3874
	for_each_intel_crtc(dev, crtc) {
3892
		if (atomic_read(&crtc->unpin_work_count) == 0)
3875
		if (atomic_read(&crtc->unpin_work_count) == 0)
3893
			continue;
3876
			continue;
3894
 
3877
 
3895
		if (crtc->unpin_work)
3878
		if (crtc->unpin_work)
3896
			intel_wait_for_vblank(dev, crtc->pipe);
3879
			intel_wait_for_vblank(dev, crtc->pipe);
3897
 
3880
 
3898
		return true;
3881
		return true;
3899
	}
3882
	}
3900
 
3883
 
3901
	return false;
3884
	return false;
3902
}
3885
}
3903
 
3886
 
3904
static void page_flip_completed(struct intel_crtc *intel_crtc)
3887
static void page_flip_completed(struct intel_crtc *intel_crtc)
3905
{
3888
{
3906
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3889
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3907
	struct intel_unpin_work *work = intel_crtc->unpin_work;
3890
	struct intel_unpin_work *work = intel_crtc->unpin_work;
3908
 
3891
 
3909
	/* ensure that the unpin work is consistent wrt ->pending. */
3892
	/* ensure that the unpin work is consistent wrt ->pending. */
3910
	smp_rmb();
3893
	smp_rmb();
3911
	intel_crtc->unpin_work = NULL;
3894
	intel_crtc->unpin_work = NULL;
3912
 
3895
 
3913
	if (work->event)
3896
	if (work->event)
3914
		drm_send_vblank_event(intel_crtc->base.dev,
3897
		drm_send_vblank_event(intel_crtc->base.dev,
3915
				      intel_crtc->pipe,
3898
				      intel_crtc->pipe,
3916
				      work->event);
3899
				      work->event);
3917
 
3900
 
3918
	drm_crtc_vblank_put(&intel_crtc->base);
3901
	drm_crtc_vblank_put(&intel_crtc->base);
3919
 
3902
 
3920
	wake_up_all(&dev_priv->pending_flip_queue);
3903
	wake_up_all(&dev_priv->pending_flip_queue);
3921
	queue_work(dev_priv->wq, &work->work);
3904
	queue_work(dev_priv->wq, &work->work);
3922
 
3905
 
3923
	trace_i915_flip_complete(intel_crtc->plane,
3906
	trace_i915_flip_complete(intel_crtc->plane,
3924
				 work->pending_flip_obj);
3907
				 work->pending_flip_obj);
3925
}
3908
}
3926
 
3909
 
3927
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3910
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3928
{
3911
{
3929
	struct drm_device *dev = crtc->dev;
3912
	struct drm_device *dev = crtc->dev;
3930
	struct drm_i915_private *dev_priv = dev->dev_private;
3913
	struct drm_i915_private *dev_priv = dev->dev_private;
3931
	long ret;
3914
	long ret;
3932
 
3915
 
3933
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3916
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3934
 
3917
 
3935
	ret = wait_event_interruptible_timeout(
3918
	ret = wait_event_interruptible_timeout(
3936
					dev_priv->pending_flip_queue,
3919
					dev_priv->pending_flip_queue,
3937
					!intel_crtc_has_pending_flip(crtc),
3920
					!intel_crtc_has_pending_flip(crtc),
3938
					60*HZ);
3921
					60*HZ);
3939
 
3922
 
3940
	if (ret < 0)
3923
	if (ret < 0)
3941
		return ret;
3924
		return ret;
3942
 
3925
 
3943
	if (ret == 0) {
3926
	if (ret == 0) {
3944
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3927
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3945
 
3928
 
3946
		spin_lock_irq(&dev->event_lock);
3929
		spin_lock_irq(&dev->event_lock);
3947
		if (intel_crtc->unpin_work) {
3930
		if (intel_crtc->unpin_work) {
3948
			WARN_ONCE(1, "Removing stuck page flip\n");
3931
			WARN_ONCE(1, "Removing stuck page flip\n");
3949
			page_flip_completed(intel_crtc);
3932
			page_flip_completed(intel_crtc);
3950
		}
3933
		}
3951
		spin_unlock_irq(&dev->event_lock);
3934
		spin_unlock_irq(&dev->event_lock);
3952
	}
3935
	}
3953
 
3936
 
3954
	return 0;
3937
	return 0;
3955
	}
3938
}
3956
 
3939
 
3957
static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3940
static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3958
{
3941
{
3959
	u32 temp;
3942
	u32 temp;
3960
 
3943
 
3961
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3944
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3962
 
3945
 
3963
	mutex_lock(&dev_priv->sb_lock);
3946
	mutex_lock(&dev_priv->sb_lock);
3964
 
3947
 
3965
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3948
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3966
	temp |= SBI_SSCCTL_DISABLE;
3949
	temp |= SBI_SSCCTL_DISABLE;
3967
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3950
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3968
 
3951
 
3969
	mutex_unlock(&dev_priv->sb_lock);
3952
	mutex_unlock(&dev_priv->sb_lock);
3970
}
3953
}
3971
 
3954
 
3972
/* Program iCLKIP clock to the desired frequency */
3955
/* Program iCLKIP clock to the desired frequency */
3973
static void lpt_program_iclkip(struct drm_crtc *crtc)
3956
static void lpt_program_iclkip(struct drm_crtc *crtc)
3974
{
3957
{
3975
	struct drm_device *dev = crtc->dev;
3958
	struct drm_device *dev = crtc->dev;
3976
	struct drm_i915_private *dev_priv = dev->dev_private;
3959
	struct drm_i915_private *dev_priv = dev->dev_private;
3977
	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3960
	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3978
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3961
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3979
	u32 temp;
3962
	u32 temp;
3980
 
3963
 
3981
	lpt_disable_iclkip(dev_priv);
3964
	lpt_disable_iclkip(dev_priv);
3982
 
3965
 
3983
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3966
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3984
	if (clock == 20000) {
3967
	if (clock == 20000) {
3985
		auxdiv = 1;
3968
		auxdiv = 1;
3986
		divsel = 0x41;
3969
		divsel = 0x41;
3987
		phaseinc = 0x20;
3970
		phaseinc = 0x20;
3988
	} else {
3971
	} else {
3989
		/* The iCLK virtual clock root frequency is in MHz,
3972
		/* The iCLK virtual clock root frequency is in MHz,
3990
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3973
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3991
		 * divisors, it is necessary to divide one by another, so we
3974
		 * divisors, it is necessary to divide one by another, so we
3992
		 * convert the virtual clock precision to KHz here for higher
3975
		 * convert the virtual clock precision to KHz here for higher
3993
		 * precision.
3976
		 * precision.
3994
		 */
3977
		 */
3995
		u32 iclk_virtual_root_freq = 172800 * 1000;
3978
		u32 iclk_virtual_root_freq = 172800 * 1000;
3996
		u32 iclk_pi_range = 64;
3979
		u32 iclk_pi_range = 64;
3997
		u32 desired_divisor, msb_divisor_value, pi_value;
3980
		u32 desired_divisor, msb_divisor_value, pi_value;
3998
 
3981
 
3999
		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
3982
		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
4000
		msb_divisor_value = desired_divisor / iclk_pi_range;
3983
		msb_divisor_value = desired_divisor / iclk_pi_range;
4001
		pi_value = desired_divisor % iclk_pi_range;
3984
		pi_value = desired_divisor % iclk_pi_range;
4002
 
3985
 
4003
		auxdiv = 0;
3986
		auxdiv = 0;
4004
		divsel = msb_divisor_value - 2;
3987
		divsel = msb_divisor_value - 2;
4005
		phaseinc = pi_value;
3988
		phaseinc = pi_value;
4006
	}
3989
	}
4007
 
3990
 
4008
	/* This should not happen with any sane values */
3991
	/* This should not happen with any sane values */
4009
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3992
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4010
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3993
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4011
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3994
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4012
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3995
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4013
 
3996
 
4014
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3997
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4015
			clock,
3998
			clock,
4016
			auxdiv,
3999
			auxdiv,
4017
			divsel,
4000
			divsel,
4018
			phasedir,
4001
			phasedir,
4019
			phaseinc);
4002
			phaseinc);
4020
 
4003
 
4021
	mutex_lock(&dev_priv->sb_lock);
4004
	mutex_lock(&dev_priv->sb_lock);
4022
 
4005
 
4023
	/* Program SSCDIVINTPHASE6 */
4006
	/* Program SSCDIVINTPHASE6 */
4024
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4007
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4025
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4008
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4026
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4009
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4027
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4010
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4028
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4011
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4029
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4012
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4030
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4013
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4031
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4014
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4032
 
4015
 
4033
	/* Program SSCAUXDIV */
4016
	/* Program SSCAUXDIV */
4034
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4017
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4035
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4018
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4036
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4019
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4037
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4020
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4038
 
4021
 
4039
	/* Enable modulator and associated divider */
4022
	/* Enable modulator and associated divider */
4040
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4023
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4041
	temp &= ~SBI_SSCCTL_DISABLE;
4024
	temp &= ~SBI_SSCCTL_DISABLE;
4042
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4025
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4043
 
4026
 
4044
	mutex_unlock(&dev_priv->sb_lock);
4027
	mutex_unlock(&dev_priv->sb_lock);
4045
 
4028
 
4046
	/* Wait for initialization time */
4029
	/* Wait for initialization time */
4047
	udelay(24);
4030
	udelay(24);
4048
 
4031
 
4049
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4032
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4050
}
4033
}
4051
 
4034
 
4052
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4035
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4053
						enum pipe pch_transcoder)
4036
						enum pipe pch_transcoder)
4054
{
4037
{
4055
	struct drm_device *dev = crtc->base.dev;
4038
	struct drm_device *dev = crtc->base.dev;
4056
	struct drm_i915_private *dev_priv = dev->dev_private;
4039
	struct drm_i915_private *dev_priv = dev->dev_private;
4057
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4040
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4058
 
4041
 
4059
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4042
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4060
		   I915_READ(HTOTAL(cpu_transcoder)));
4043
		   I915_READ(HTOTAL(cpu_transcoder)));
4061
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4044
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4062
		   I915_READ(HBLANK(cpu_transcoder)));
4045
		   I915_READ(HBLANK(cpu_transcoder)));
4063
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4046
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4064
		   I915_READ(HSYNC(cpu_transcoder)));
4047
		   I915_READ(HSYNC(cpu_transcoder)));
4065
 
4048
 
4066
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4049
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4067
		   I915_READ(VTOTAL(cpu_transcoder)));
4050
		   I915_READ(VTOTAL(cpu_transcoder)));
4068
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4051
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4069
		   I915_READ(VBLANK(cpu_transcoder)));
4052
		   I915_READ(VBLANK(cpu_transcoder)));
4070
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4053
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4071
		   I915_READ(VSYNC(cpu_transcoder)));
4054
		   I915_READ(VSYNC(cpu_transcoder)));
4072
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4055
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4073
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4056
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4074
}
4057
}
4075
 
4058
 
4076
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4059
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4077
{
4060
{
4078
	struct drm_i915_private *dev_priv = dev->dev_private;
4061
	struct drm_i915_private *dev_priv = dev->dev_private;
4079
	uint32_t temp;
4062
	uint32_t temp;
4080
 
4063
 
4081
	temp = I915_READ(SOUTH_CHICKEN1);
4064
	temp = I915_READ(SOUTH_CHICKEN1);
4082
	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4065
	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4083
		return;
4066
		return;
4084
 
4067
 
4085
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4068
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4086
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4069
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4087
 
4070
 
4088
	temp &= ~FDI_BC_BIFURCATION_SELECT;
4071
	temp &= ~FDI_BC_BIFURCATION_SELECT;
4089
	if (enable)
4072
	if (enable)
4090
		temp |= FDI_BC_BIFURCATION_SELECT;
4073
		temp |= FDI_BC_BIFURCATION_SELECT;
4091
 
4074
 
4092
	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4075
	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4093
	I915_WRITE(SOUTH_CHICKEN1, temp);
4076
	I915_WRITE(SOUTH_CHICKEN1, temp);
4094
	POSTING_READ(SOUTH_CHICKEN1);
4077
	POSTING_READ(SOUTH_CHICKEN1);
4095
}
4078
}
4096
 
4079
 
4097
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4080
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4098
{
4081
{
4099
	struct drm_device *dev = intel_crtc->base.dev;
4082
	struct drm_device *dev = intel_crtc->base.dev;
4100
 
4083
 
4101
	switch (intel_crtc->pipe) {
4084
	switch (intel_crtc->pipe) {
4102
	case PIPE_A:
4085
	case PIPE_A:
4103
		break;
4086
		break;
4104
	case PIPE_B:
4087
	case PIPE_B:
4105
		if (intel_crtc->config->fdi_lanes > 2)
4088
		if (intel_crtc->config->fdi_lanes > 2)
4106
			cpt_set_fdi_bc_bifurcation(dev, false);
4089
			cpt_set_fdi_bc_bifurcation(dev, false);
4107
		else
4090
		else
4108
			cpt_set_fdi_bc_bifurcation(dev, true);
4091
			cpt_set_fdi_bc_bifurcation(dev, true);
4109
 
4092
 
4110
		break;
4093
		break;
4111
	case PIPE_C:
4094
	case PIPE_C:
4112
		cpt_set_fdi_bc_bifurcation(dev, true);
4095
		cpt_set_fdi_bc_bifurcation(dev, true);
4113
 
4096
 
4114
		break;
4097
		break;
4115
	default:
4098
	default:
4116
		BUG();
4099
		BUG();
4117
	}
4100
	}
4118
}
4101
}
4119
 
4102
 
4120
/* Return which DP Port should be selected for Transcoder DP control */
4103
/* Return which DP Port should be selected for Transcoder DP control */
4121
static enum port
4104
static enum port
4122
intel_trans_dp_port_sel(struct drm_crtc *crtc)
4105
intel_trans_dp_port_sel(struct drm_crtc *crtc)
4123
{
4106
{
4124
	struct drm_device *dev = crtc->dev;
4107
	struct drm_device *dev = crtc->dev;
4125
	struct intel_encoder *encoder;
4108
	struct intel_encoder *encoder;
4126
 
4109
 
4127
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4110
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4128
		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4111
		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4129
		    encoder->type == INTEL_OUTPUT_EDP)
4112
		    encoder->type == INTEL_OUTPUT_EDP)
4130
			return enc_to_dig_port(&encoder->base)->port;
4113
			return enc_to_dig_port(&encoder->base)->port;
4131
	}
4114
	}
4132
 
4115
 
4133
	return -1;
4116
	return -1;
4134
}
4117
}
4135
 
4118
 
4136
/*
4119
/*
4137
 * Enable PCH resources required for PCH ports:
4120
 * Enable PCH resources required for PCH ports:
4138
 *   - PCH PLLs
4121
 *   - PCH PLLs
4139
 *   - FDI training & RX/TX
4122
 *   - FDI training & RX/TX
4140
 *   - update transcoder timings
4123
 *   - update transcoder timings
4141
 *   - DP transcoding bits
4124
 *   - DP transcoding bits
4142
 *   - transcoder
4125
 *   - transcoder
4143
 */
4126
 */
4144
static void ironlake_pch_enable(struct drm_crtc *crtc)
4127
static void ironlake_pch_enable(struct drm_crtc *crtc)
4145
{
4128
{
4146
	struct drm_device *dev = crtc->dev;
4129
	struct drm_device *dev = crtc->dev;
4147
	struct drm_i915_private *dev_priv = dev->dev_private;
4130
	struct drm_i915_private *dev_priv = dev->dev_private;
4148
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4131
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4149
	int pipe = intel_crtc->pipe;
4132
	int pipe = intel_crtc->pipe;
4150
	u32 temp;
4133
	u32 temp;
4151
 
4134
 
4152
	assert_pch_transcoder_disabled(dev_priv, pipe);
4135
	assert_pch_transcoder_disabled(dev_priv, pipe);
4153
 
4136
 
4154
	if (IS_IVYBRIDGE(dev))
4137
	if (IS_IVYBRIDGE(dev))
4155
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4138
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4156
 
4139
 
4157
	/* Write the TU size bits before fdi link training, so that error
4140
	/* Write the TU size bits before fdi link training, so that error
4158
	 * detection works. */
4141
	 * detection works. */
4159
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4142
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4160
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4143
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4161
 
4144
 
4162
	/*
4145
	/*
4163
	 * Sometimes spurious CPU pipe underruns happen during FDI
4146
	 * Sometimes spurious CPU pipe underruns happen during FDI
4164
	 * training, at least with VGA+HDMI cloning. Suppress them.
4147
	 * training, at least with VGA+HDMI cloning. Suppress them.
4165
	 */
4148
	 */
4166
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4149
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4167
 
4150
 
4168
	/* For PCH output, training FDI link */
4151
	/* For PCH output, training FDI link */
4169
	dev_priv->display.fdi_link_train(crtc);
4152
	dev_priv->display.fdi_link_train(crtc);
4170
 
4153
 
4171
	/* We need to program the right clock selection before writing the pixel
4154
	/* We need to program the right clock selection before writing the pixel
4172
	 * mutliplier into the DPLL. */
4155
	 * mutliplier into the DPLL. */
4173
	if (HAS_PCH_CPT(dev)) {
4156
	if (HAS_PCH_CPT(dev)) {
4174
		u32 sel;
4157
		u32 sel;
4175
 
4158
 
4176
		temp = I915_READ(PCH_DPLL_SEL);
4159
		temp = I915_READ(PCH_DPLL_SEL);
4177
		temp |= TRANS_DPLL_ENABLE(pipe);
4160
		temp |= TRANS_DPLL_ENABLE(pipe);
4178
		sel = TRANS_DPLLB_SEL(pipe);
4161
		sel = TRANS_DPLLB_SEL(pipe);
4179
		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4162
		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4180
			temp |= sel;
4163
			temp |= sel;
4181
		else
4164
		else
4182
			temp &= ~sel;
4165
			temp &= ~sel;
4183
		I915_WRITE(PCH_DPLL_SEL, temp);
4166
		I915_WRITE(PCH_DPLL_SEL, temp);
4184
	}
4167
	}
4185
 
4168
 
4186
	/* XXX: pch pll's can be enabled any time before we enable the PCH
4169
	/* XXX: pch pll's can be enabled any time before we enable the PCH
4187
	 * transcoder, and we actually should do this to not upset any PCH
4170
	 * transcoder, and we actually should do this to not upset any PCH
4188
	 * transcoder that already use the clock when we share it.
4171
	 * transcoder that already use the clock when we share it.
4189
	 *
4172
	 *
4190
	 * Note that enable_shared_dpll tries to do the right thing, but
4173
	 * Note that enable_shared_dpll tries to do the right thing, but
4191
	 * get_shared_dpll unconditionally resets the pll - we need that to have
4174
	 * get_shared_dpll unconditionally resets the pll - we need that to have
4192
	 * the right LVDS enable sequence. */
4175
	 * the right LVDS enable sequence. */
4193
	intel_enable_shared_dpll(intel_crtc);
4176
	intel_enable_shared_dpll(intel_crtc);
4194
 
4177
 
4195
	/* set transcoder timing, panel must allow it */
4178
	/* set transcoder timing, panel must allow it */
4196
	assert_panel_unlocked(dev_priv, pipe);
4179
	assert_panel_unlocked(dev_priv, pipe);
4197
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4180
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4198
 
4181
 
4199
	intel_fdi_normal_train(crtc);
4182
	intel_fdi_normal_train(crtc);
4200
 
4183
 
4201
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4184
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4202
 
4185
 
4203
	/* For PCH DP, enable TRANS_DP_CTL */
4186
	/* For PCH DP, enable TRANS_DP_CTL */
4204
	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4187
	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4205
		const struct drm_display_mode *adjusted_mode =
4188
		const struct drm_display_mode *adjusted_mode =
4206
			&intel_crtc->config->base.adjusted_mode;
4189
			&intel_crtc->config->base.adjusted_mode;
4207
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4190
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4208
		i915_reg_t reg = TRANS_DP_CTL(pipe);
4191
		i915_reg_t reg = TRANS_DP_CTL(pipe);
4209
		temp = I915_READ(reg);
4192
		temp = I915_READ(reg);
4210
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4193
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4211
			  TRANS_DP_SYNC_MASK |
4194
			  TRANS_DP_SYNC_MASK |
4212
			  TRANS_DP_BPC_MASK);
4195
			  TRANS_DP_BPC_MASK);
4213
		temp |= TRANS_DP_OUTPUT_ENABLE;
4196
		temp |= TRANS_DP_OUTPUT_ENABLE;
4214
		temp |= bpc << 9; /* same format but at 11:9 */
4197
		temp |= bpc << 9; /* same format but at 11:9 */
4215
 
4198
 
4216
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4199
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4217
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4200
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4218
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4201
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4219
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4202
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4220
 
4203
 
4221
		switch (intel_trans_dp_port_sel(crtc)) {
4204
		switch (intel_trans_dp_port_sel(crtc)) {
4222
		case PORT_B:
4205
		case PORT_B:
4223
			temp |= TRANS_DP_PORT_SEL_B;
4206
			temp |= TRANS_DP_PORT_SEL_B;
4224
			break;
4207
			break;
4225
		case PORT_C:
4208
		case PORT_C:
4226
			temp |= TRANS_DP_PORT_SEL_C;
4209
			temp |= TRANS_DP_PORT_SEL_C;
4227
			break;
4210
			break;
4228
		case PORT_D:
4211
		case PORT_D:
4229
			temp |= TRANS_DP_PORT_SEL_D;
4212
			temp |= TRANS_DP_PORT_SEL_D;
4230
			break;
4213
			break;
4231
		default:
4214
		default:
4232
			BUG();
4215
			BUG();
4233
		}
4216
		}
4234
 
4217
 
4235
		I915_WRITE(reg, temp);
4218
		I915_WRITE(reg, temp);
4236
	}
4219
	}
4237
 
4220
 
4238
	ironlake_enable_pch_transcoder(dev_priv, pipe);
4221
	ironlake_enable_pch_transcoder(dev_priv, pipe);
4239
}
4222
}
4240
 
4223
 
4241
static void lpt_pch_enable(struct drm_crtc *crtc)
4224
static void lpt_pch_enable(struct drm_crtc *crtc)
4242
{
4225
{
4243
	struct drm_device *dev = crtc->dev;
4226
	struct drm_device *dev = crtc->dev;
4244
	struct drm_i915_private *dev_priv = dev->dev_private;
4227
	struct drm_i915_private *dev_priv = dev->dev_private;
4245
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4228
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4246
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4229
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4247
 
4230
 
4248
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4231
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4249
 
4232
 
4250
	lpt_program_iclkip(crtc);
4233
	lpt_program_iclkip(crtc);
4251
 
4234
 
4252
	/* Set transcoder timing. */
4235
	/* Set transcoder timing. */
4253
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4236
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4254
 
4237
 
4255
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4238
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4256
}
4239
}
4257
 
4240
 
4258
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4241
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4259
						struct intel_crtc_state *crtc_state)
4242
						struct intel_crtc_state *crtc_state)
4260
{
4243
{
4261
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4244
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4262
	struct intel_shared_dpll *pll;
4245
	struct intel_shared_dpll *pll;
4263
	struct intel_shared_dpll_config *shared_dpll;
4246
	struct intel_shared_dpll_config *shared_dpll;
4264
	enum intel_dpll_id i;
4247
	enum intel_dpll_id i;
4265
	int max = dev_priv->num_shared_dpll;
4248
	int max = dev_priv->num_shared_dpll;
4266
 
4249
 
4267
	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4250
	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4268
 
4251
 
4269
	if (HAS_PCH_IBX(dev_priv->dev)) {
4252
	if (HAS_PCH_IBX(dev_priv->dev)) {
4270
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4253
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4271
		i = (enum intel_dpll_id) crtc->pipe;
4254
		i = (enum intel_dpll_id) crtc->pipe;
4272
		pll = &dev_priv->shared_dplls[i];
4255
		pll = &dev_priv->shared_dplls[i];
4273
 
4256
 
4274
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4257
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4275
			      crtc->base.base.id, pll->name);
4258
			      crtc->base.base.id, pll->name);
4276
 
4259
 
4277
		WARN_ON(shared_dpll[i].crtc_mask);
4260
		WARN_ON(shared_dpll[i].crtc_mask);
4278
 
4261
 
4279
		goto found;
4262
		goto found;
4280
	}
4263
	}
4281
 
4264
 
4282
	if (IS_BROXTON(dev_priv->dev)) {
4265
	if (IS_BROXTON(dev_priv->dev)) {
4283
		/* PLL is attached to port in bxt */
4266
		/* PLL is attached to port in bxt */
4284
		struct intel_encoder *encoder;
4267
		struct intel_encoder *encoder;
4285
		struct intel_digital_port *intel_dig_port;
4268
		struct intel_digital_port *intel_dig_port;
4286
 
4269
 
4287
		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4270
		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4288
		if (WARN_ON(!encoder))
4271
		if (WARN_ON(!encoder))
4289
			return NULL;
4272
			return NULL;
4290
 
4273
 
4291
		intel_dig_port = enc_to_dig_port(&encoder->base);
4274
		intel_dig_port = enc_to_dig_port(&encoder->base);
4292
		/* 1:1 mapping between ports and PLLs */
4275
		/* 1:1 mapping between ports and PLLs */
4293
		i = (enum intel_dpll_id)intel_dig_port->port;
4276
		i = (enum intel_dpll_id)intel_dig_port->port;
4294
		pll = &dev_priv->shared_dplls[i];
4277
		pll = &dev_priv->shared_dplls[i];
4295
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4278
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4296
			crtc->base.base.id, pll->name);
4279
			crtc->base.base.id, pll->name);
4297
		WARN_ON(shared_dpll[i].crtc_mask);
4280
		WARN_ON(shared_dpll[i].crtc_mask);
4298
 
4281
 
4299
		goto found;
4282
		goto found;
4300
	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4283
	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4301
		/* Do not consider SPLL */
4284
		/* Do not consider SPLL */
4302
		max = 2;
4285
		max = 2;
4303
 
4286
 
4304
	for (i = 0; i < max; i++) {
4287
	for (i = 0; i < max; i++) {
4305
		pll = &dev_priv->shared_dplls[i];
4288
		pll = &dev_priv->shared_dplls[i];
4306
 
4289
 
4307
		/* Only want to check enabled timings first */
4290
		/* Only want to check enabled timings first */
4308
		if (shared_dpll[i].crtc_mask == 0)
4291
		if (shared_dpll[i].crtc_mask == 0)
4309
			continue;
4292
			continue;
4310
 
4293
 
4311
		if (memcmp(&crtc_state->dpll_hw_state,
4294
		if (memcmp(&crtc_state->dpll_hw_state,
4312
			   &shared_dpll[i].hw_state,
4295
			   &shared_dpll[i].hw_state,
4313
			   sizeof(crtc_state->dpll_hw_state)) == 0) {
4296
			   sizeof(crtc_state->dpll_hw_state)) == 0) {
4314
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4297
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4315
				      crtc->base.base.id, pll->name,
4298
				      crtc->base.base.id, pll->name,
4316
				      shared_dpll[i].crtc_mask,
4299
				      shared_dpll[i].crtc_mask,
4317
				      pll->active);
4300
				      pll->active);
4318
			goto found;
4301
			goto found;
4319
		}
4302
		}
4320
	}
4303
	}
4321
 
4304
 
4322
	/* Ok no matching timings, maybe there's a free one? */
4305
	/* Ok no matching timings, maybe there's a free one? */
4323
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4306
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4324
		pll = &dev_priv->shared_dplls[i];
4307
		pll = &dev_priv->shared_dplls[i];
4325
		if (shared_dpll[i].crtc_mask == 0) {
4308
		if (shared_dpll[i].crtc_mask == 0) {
4326
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4309
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4327
				      crtc->base.base.id, pll->name);
4310
				      crtc->base.base.id, pll->name);
4328
			goto found;
4311
			goto found;
4329
		}
4312
		}
4330
	}
4313
	}
4331
 
4314
 
4332
	return NULL;
4315
	return NULL;
4333
 
4316
 
4334
found:
4317
found:
4335
	if (shared_dpll[i].crtc_mask == 0)
4318
	if (shared_dpll[i].crtc_mask == 0)
4336
		shared_dpll[i].hw_state =
4319
		shared_dpll[i].hw_state =
4337
			crtc_state->dpll_hw_state;
4320
			crtc_state->dpll_hw_state;
4338
 
4321
 
4339
	crtc_state->shared_dpll = i;
4322
	crtc_state->shared_dpll = i;
4340
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4323
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4341
			 pipe_name(crtc->pipe));
4324
			 pipe_name(crtc->pipe));
4342
 
4325
 
4343
	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4326
	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4344
 
4327
 
4345
	return pll;
4328
	return pll;
4346
}
4329
}
4347
 
4330
 
4348
static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4331
static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4349
{
4332
{
4350
	struct drm_i915_private *dev_priv = to_i915(state->dev);
4333
	struct drm_i915_private *dev_priv = to_i915(state->dev);
4351
	struct intel_shared_dpll_config *shared_dpll;
4334
	struct intel_shared_dpll_config *shared_dpll;
4352
	struct intel_shared_dpll *pll;
4335
	struct intel_shared_dpll *pll;
4353
	enum intel_dpll_id i;
4336
	enum intel_dpll_id i;
4354
 
4337
 
4355
	if (!to_intel_atomic_state(state)->dpll_set)
4338
	if (!to_intel_atomic_state(state)->dpll_set)
4356
		return;
4339
		return;
4357
 
4340
 
4358
	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4341
	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4359
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4342
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4360
		pll = &dev_priv->shared_dplls[i];
4343
		pll = &dev_priv->shared_dplls[i];
4361
		pll->config = shared_dpll[i];
4344
		pll->config = shared_dpll[i];
4362
	}
4345
	}
4363
}
4346
}
4364
 
4347
 
4365
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4348
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4366
{
4349
{
4367
	struct drm_i915_private *dev_priv = dev->dev_private;
4350
	struct drm_i915_private *dev_priv = dev->dev_private;
4368
	i915_reg_t dslreg = PIPEDSL(pipe);
4351
	i915_reg_t dslreg = PIPEDSL(pipe);
4369
	u32 temp;
4352
	u32 temp;
4370
 
4353
 
4371
	temp = I915_READ(dslreg);
4354
	temp = I915_READ(dslreg);
4372
	udelay(500);
4355
	udelay(500);
4373
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4356
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4374
		if (wait_for(I915_READ(dslreg) != temp, 5))
4357
		if (wait_for(I915_READ(dslreg) != temp, 5))
4375
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4358
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4376
	}
4359
	}
4377
}
4360
}
4378
 
4361
 
4379
static int
4362
static int
4380
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4363
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4381
		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4364
		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4382
		  int src_w, int src_h, int dst_w, int dst_h)
4365
		  int src_w, int src_h, int dst_w, int dst_h)
4383
{
4366
{
4384
	struct intel_crtc_scaler_state *scaler_state =
4367
	struct intel_crtc_scaler_state *scaler_state =
4385
		&crtc_state->scaler_state;
4368
		&crtc_state->scaler_state;
4386
	struct intel_crtc *intel_crtc =
4369
	struct intel_crtc *intel_crtc =
4387
		to_intel_crtc(crtc_state->base.crtc);
4370
		to_intel_crtc(crtc_state->base.crtc);
4388
	int need_scaling;
4371
	int need_scaling;
4389
 
4372
 
4390
	need_scaling = intel_rotation_90_or_270(rotation) ?
4373
	need_scaling = intel_rotation_90_or_270(rotation) ?
4391
		(src_h != dst_w || src_w != dst_h):
4374
		(src_h != dst_w || src_w != dst_h):
4392
		(src_w != dst_w || src_h != dst_h);
4375
		(src_w != dst_w || src_h != dst_h);
4393
 
4376
 
4394
	/*
4377
	/*
4395
	 * if plane is being disabled or scaler is no more required or force detach
4378
	 * if plane is being disabled or scaler is no more required or force detach
4396
	 *  - free scaler binded to this plane/crtc
4379
	 *  - free scaler binded to this plane/crtc
4397
	 *  - in order to do this, update crtc->scaler_usage
4380
	 *  - in order to do this, update crtc->scaler_usage
4398
	 *
4381
	 *
4399
	 * Here scaler state in crtc_state is set free so that
4382
	 * Here scaler state in crtc_state is set free so that
4400
	 * scaler can be assigned to other user. Actual register
4383
	 * scaler can be assigned to other user. Actual register
4401
	 * update to free the scaler is done in plane/panel-fit programming.
4384
	 * update to free the scaler is done in plane/panel-fit programming.
4402
	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4385
	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4403
	 */
4386
	 */
4404
	if (force_detach || !need_scaling) {
4387
	if (force_detach || !need_scaling) {
4405
		if (*scaler_id >= 0) {
4388
		if (*scaler_id >= 0) {
4406
			scaler_state->scaler_users &= ~(1 << scaler_user);
4389
			scaler_state->scaler_users &= ~(1 << scaler_user);
4407
			scaler_state->scalers[*scaler_id].in_use = 0;
4390
			scaler_state->scalers[*scaler_id].in_use = 0;
4408
 
4391
 
4409
			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4392
			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4410
				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4393
				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4411
				intel_crtc->pipe, scaler_user, *scaler_id,
4394
				intel_crtc->pipe, scaler_user, *scaler_id,
4412
				scaler_state->scaler_users);
4395
				scaler_state->scaler_users);
4413
			*scaler_id = -1;
4396
			*scaler_id = -1;
4414
		}
4397
		}
4415
		return 0;
4398
		return 0;
4416
	}
4399
	}
4417
 
4400
 
4418
	/* range checks */
4401
	/* range checks */
4419
	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4402
	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4420
		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4403
		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4421
 
4404
 
4422
		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4405
		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4423
		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4406
		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4424
		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4407
		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4425
			"size is out of scaler range\n",
4408
			"size is out of scaler range\n",
4426
			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4409
			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4427
		return -EINVAL;
4410
		return -EINVAL;
4428
	}
4411
	}
4429
 
4412
 
4430
	/* mark this plane as a scaler user in crtc_state */
4413
	/* mark this plane as a scaler user in crtc_state */
4431
	scaler_state->scaler_users |= (1 << scaler_user);
4414
	scaler_state->scaler_users |= (1 << scaler_user);
4432
	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4415
	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4433
		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4416
		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4434
		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4417
		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4435
		scaler_state->scaler_users);
4418
		scaler_state->scaler_users);
4436
 
4419
 
4437
	return 0;
4420
	return 0;
4438
}
4421
}
4439
 
4422
 
4440
/**
4423
/**
4441
 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4424
 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4442
 *
4425
 *
4443
 * @state: crtc's scaler state
4426
 * @state: crtc's scaler state
4444
 *
4427
 *
4445
 * Return
4428
 * Return
4446
 *     0 - scaler_usage updated successfully
4429
 *     0 - scaler_usage updated successfully
4447
 *    error - requested scaling cannot be supported or other error condition
4430
 *    error - requested scaling cannot be supported or other error condition
4448
 */
4431
 */
4449
int skl_update_scaler_crtc(struct intel_crtc_state *state)
4432
int skl_update_scaler_crtc(struct intel_crtc_state *state)
4450
{
4433
{
4451
	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4434
	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4452
	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4435
	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4453
 
4436
 
4454
	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4437
	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4455
		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4438
		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4456
 
4439
 
4457
	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4440
	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4458
		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4441
		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4459
		state->pipe_src_w, state->pipe_src_h,
4442
		state->pipe_src_w, state->pipe_src_h,
4460
		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4443
		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4461
}
4444
}
4462
 
4445
 
4463
/**
4446
/**
4464
 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4447
 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4465
 *
4448
 *
4466
 * @state: crtc's scaler state
4449
 * @state: crtc's scaler state
4467
 * @plane_state: atomic plane state to update
4450
 * @plane_state: atomic plane state to update
4468
 *
4451
 *
4469
 * Return
4452
 * Return
4470
 *     0 - scaler_usage updated successfully
4453
 *     0 - scaler_usage updated successfully
4471
 *    error - requested scaling cannot be supported or other error condition
4454
 *    error - requested scaling cannot be supported or other error condition
4472
 */
4455
 */
4473
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4456
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4474
				   struct intel_plane_state *plane_state)
4457
				   struct intel_plane_state *plane_state)
4475
{
4458
{
4476
 
4459
 
4477
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4460
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4478
	struct intel_plane *intel_plane =
4461
	struct intel_plane *intel_plane =
4479
		to_intel_plane(plane_state->base.plane);
4462
		to_intel_plane(plane_state->base.plane);
4480
	struct drm_framebuffer *fb = plane_state->base.fb;
4463
	struct drm_framebuffer *fb = plane_state->base.fb;
4481
	int ret;
4464
	int ret;
4482
 
4465
 
4483
	bool force_detach = !fb || !plane_state->visible;
4466
	bool force_detach = !fb || !plane_state->visible;
4484
 
4467
 
4485
	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4468
	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4486
		      intel_plane->base.base.id, intel_crtc->pipe,
4469
		      intel_plane->base.base.id, intel_crtc->pipe,
4487
		      drm_plane_index(&intel_plane->base));
4470
		      drm_plane_index(&intel_plane->base));
4488
 
4471
 
4489
	ret = skl_update_scaler(crtc_state, force_detach,
4472
	ret = skl_update_scaler(crtc_state, force_detach,
4490
				drm_plane_index(&intel_plane->base),
4473
				drm_plane_index(&intel_plane->base),
4491
				&plane_state->scaler_id,
4474
				&plane_state->scaler_id,
4492
				plane_state->base.rotation,
4475
				plane_state->base.rotation,
4493
				drm_rect_width(&plane_state->src) >> 16,
4476
				drm_rect_width(&plane_state->src) >> 16,
4494
				drm_rect_height(&plane_state->src) >> 16,
4477
				drm_rect_height(&plane_state->src) >> 16,
4495
				drm_rect_width(&plane_state->dst),
4478
				drm_rect_width(&plane_state->dst),
4496
				drm_rect_height(&plane_state->dst));
4479
				drm_rect_height(&plane_state->dst));
4497
 
4480
 
4498
	if (ret || plane_state->scaler_id < 0)
4481
	if (ret || plane_state->scaler_id < 0)
4499
		return ret;
4482
		return ret;
4500
 
4483
 
4501
	/* check colorkey */
4484
	/* check colorkey */
4502
	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4485
	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4503
		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4486
		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4504
			      intel_plane->base.base.id);
4487
			      intel_plane->base.base.id);
4505
		return -EINVAL;
4488
		return -EINVAL;
4506
	}
4489
	}
4507
 
4490
 
4508
	/* Check src format */
4491
	/* Check src format */
4509
	switch (fb->pixel_format) {
4492
	switch (fb->pixel_format) {
4510
	case DRM_FORMAT_RGB565:
4493
	case DRM_FORMAT_RGB565:
4511
	case DRM_FORMAT_XBGR8888:
4494
	case DRM_FORMAT_XBGR8888:
4512
	case DRM_FORMAT_XRGB8888:
4495
	case DRM_FORMAT_XRGB8888:
4513
	case DRM_FORMAT_ABGR8888:
4496
	case DRM_FORMAT_ABGR8888:
4514
	case DRM_FORMAT_ARGB8888:
4497
	case DRM_FORMAT_ARGB8888:
4515
	case DRM_FORMAT_XRGB2101010:
4498
	case DRM_FORMAT_XRGB2101010:
4516
	case DRM_FORMAT_XBGR2101010:
4499
	case DRM_FORMAT_XBGR2101010:
4517
	case DRM_FORMAT_YUYV:
4500
	case DRM_FORMAT_YUYV:
4518
	case DRM_FORMAT_YVYU:
4501
	case DRM_FORMAT_YVYU:
4519
	case DRM_FORMAT_UYVY:
4502
	case DRM_FORMAT_UYVY:
4520
	case DRM_FORMAT_VYUY:
4503
	case DRM_FORMAT_VYUY:
4521
		break;
4504
		break;
4522
	default:
4505
	default:
4523
		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4506
		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4524
			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4507
			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4525
		return -EINVAL;
4508
		return -EINVAL;
4526
	}
4509
	}
4527
 
4510
 
4528
	return 0;
4511
	return 0;
4529
}
4512
}
4530
 
4513
 
4531
static void skylake_scaler_disable(struct intel_crtc *crtc)
4514
static void skylake_scaler_disable(struct intel_crtc *crtc)
4532
{
4515
{
4533
	int i;
4516
	int i;
4534
 
4517
 
4535
	for (i = 0; i < crtc->num_scalers; i++)
4518
	for (i = 0; i < crtc->num_scalers; i++)
4536
		skl_detach_scaler(crtc, i);
4519
		skl_detach_scaler(crtc, i);
4537
}
4520
}
4538
 
4521
 
4539
static void skylake_pfit_enable(struct intel_crtc *crtc)
4522
static void skylake_pfit_enable(struct intel_crtc *crtc)
4540
{
4523
{
4541
	struct drm_device *dev = crtc->base.dev;
4524
	struct drm_device *dev = crtc->base.dev;
4542
	struct drm_i915_private *dev_priv = dev->dev_private;
4525
	struct drm_i915_private *dev_priv = dev->dev_private;
4543
	int pipe = crtc->pipe;
4526
	int pipe = crtc->pipe;
4544
	struct intel_crtc_scaler_state *scaler_state =
4527
	struct intel_crtc_scaler_state *scaler_state =
4545
		&crtc->config->scaler_state;
4528
		&crtc->config->scaler_state;
4546
 
4529
 
4547
	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4530
	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4548
 
4531
 
4549
	if (crtc->config->pch_pfit.enabled) {
4532
	if (crtc->config->pch_pfit.enabled) {
4550
		int id;
4533
		int id;
4551
 
4534
 
4552
		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4535
		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4553
			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4536
			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4554
			return;
4537
			return;
4555
		}
4538
		}
4556
 
4539
 
4557
		id = scaler_state->scaler_id;
4540
		id = scaler_state->scaler_id;
4558
		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4541
		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4559
			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4542
			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4560
		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4543
		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4561
		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4544
		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4562
 
4545
 
4563
		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4546
		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4564
	}
4547
	}
4565
}
4548
}
4566
 
4549
 
4567
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4550
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4568
{
4551
{
4569
	struct drm_device *dev = crtc->base.dev;
4552
	struct drm_device *dev = crtc->base.dev;
4570
	struct drm_i915_private *dev_priv = dev->dev_private;
4553
	struct drm_i915_private *dev_priv = dev->dev_private;
4571
	int pipe = crtc->pipe;
4554
	int pipe = crtc->pipe;
4572
 
4555
 
4573
	if (crtc->config->pch_pfit.enabled) {
4556
	if (crtc->config->pch_pfit.enabled) {
4574
		/* Force use of hard-coded filter coefficients
4557
		/* Force use of hard-coded filter coefficients
4575
		 * as some pre-programmed values are broken,
4558
		 * as some pre-programmed values are broken,
4576
		 * e.g. x201.
4559
		 * e.g. x201.
4577
		 */
4560
		 */
4578
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4561
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4579
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4562
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4580
						 PF_PIPE_SEL_IVB(pipe));
4563
						 PF_PIPE_SEL_IVB(pipe));
4581
		else
4564
		else
4582
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4565
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4583
		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4566
		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4584
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4567
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4585
	}
4568
	}
4586
}
4569
}
4587
 
4570
 
4588
void hsw_enable_ips(struct intel_crtc *crtc)
4571
void hsw_enable_ips(struct intel_crtc *crtc)
4589
{
4572
{
4590
	struct drm_device *dev = crtc->base.dev;
4573
	struct drm_device *dev = crtc->base.dev;
4591
	struct drm_i915_private *dev_priv = dev->dev_private;
4574
	struct drm_i915_private *dev_priv = dev->dev_private;
4592
 
4575
 
4593
	if (!crtc->config->ips_enabled)
4576
	if (!crtc->config->ips_enabled)
4594
		return;
4577
		return;
4595
 
4578
 
4596
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4579
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4597
	intel_wait_for_vblank(dev, crtc->pipe);
4580
	intel_wait_for_vblank(dev, crtc->pipe);
4598
 
4581
 
4599
	assert_plane_enabled(dev_priv, crtc->plane);
4582
	assert_plane_enabled(dev_priv, crtc->plane);
4600
	if (IS_BROADWELL(dev)) {
4583
	if (IS_BROADWELL(dev)) {
4601
		mutex_lock(&dev_priv->rps.hw_lock);
4584
		mutex_lock(&dev_priv->rps.hw_lock);
4602
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4585
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4603
		mutex_unlock(&dev_priv->rps.hw_lock);
4586
		mutex_unlock(&dev_priv->rps.hw_lock);
4604
		/* Quoting Art Runyan: "its not safe to expect any particular
4587
		/* Quoting Art Runyan: "its not safe to expect any particular
4605
		 * value in IPS_CTL bit 31 after enabling IPS through the
4588
		 * value in IPS_CTL bit 31 after enabling IPS through the
4606
		 * mailbox." Moreover, the mailbox may return a bogus state,
4589
		 * mailbox." Moreover, the mailbox may return a bogus state,
4607
		 * so we need to just enable it and continue on.
4590
		 * so we need to just enable it and continue on.
4608
		 */
4591
		 */
4609
	} else {
4592
	} else {
4610
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4593
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4611
		/* The bit only becomes 1 in the next vblank, so this wait here
4594
		/* The bit only becomes 1 in the next vblank, so this wait here
4612
		 * is essentially intel_wait_for_vblank. If we don't have this
4595
		 * is essentially intel_wait_for_vblank. If we don't have this
4613
		 * and don't wait for vblanks until the end of crtc_enable, then
4596
		 * and don't wait for vblanks until the end of crtc_enable, then
4614
		 * the HW state readout code will complain that the expected
4597
		 * the HW state readout code will complain that the expected
4615
		 * IPS_CTL value is not the one we read. */
4598
		 * IPS_CTL value is not the one we read. */
4616
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4599
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4617
			DRM_ERROR("Timed out waiting for IPS enable\n");
4600
			DRM_ERROR("Timed out waiting for IPS enable\n");
4618
	}
4601
	}
4619
}
4602
}
4620
 
4603
 
4621
void hsw_disable_ips(struct intel_crtc *crtc)
4604
void hsw_disable_ips(struct intel_crtc *crtc)
4622
{
4605
{
4623
	struct drm_device *dev = crtc->base.dev;
4606
	struct drm_device *dev = crtc->base.dev;
4624
	struct drm_i915_private *dev_priv = dev->dev_private;
4607
	struct drm_i915_private *dev_priv = dev->dev_private;
4625
 
4608
 
4626
	if (!crtc->config->ips_enabled)
4609
	if (!crtc->config->ips_enabled)
4627
		return;
4610
		return;
4628
 
4611
 
4629
	assert_plane_enabled(dev_priv, crtc->plane);
4612
	assert_plane_enabled(dev_priv, crtc->plane);
4630
	if (IS_BROADWELL(dev)) {
4613
	if (IS_BROADWELL(dev)) {
4631
		mutex_lock(&dev_priv->rps.hw_lock);
4614
		mutex_lock(&dev_priv->rps.hw_lock);
4632
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4615
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4633
		mutex_unlock(&dev_priv->rps.hw_lock);
4616
		mutex_unlock(&dev_priv->rps.hw_lock);
4634
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4617
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4635
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4618
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4636
			DRM_ERROR("Timed out waiting for IPS disable\n");
4619
			DRM_ERROR("Timed out waiting for IPS disable\n");
4637
	} else {
4620
	} else {
4638
		I915_WRITE(IPS_CTL, 0);
4621
		I915_WRITE(IPS_CTL, 0);
4639
		POSTING_READ(IPS_CTL);
4622
		POSTING_READ(IPS_CTL);
4640
	}
4623
	}
4641
 
4624
 
4642
	/* We need to wait for a vblank before we can disable the plane. */
4625
	/* We need to wait for a vblank before we can disable the plane. */
4643
	intel_wait_for_vblank(dev, crtc->pipe);
4626
	intel_wait_for_vblank(dev, crtc->pipe);
4644
}
4627
}
4645
 
4628
 
4646
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4629
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4647
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4630
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4648
{
4631
{
4649
	struct drm_device *dev = crtc->dev;
4632
	struct drm_device *dev = crtc->dev;
4650
	struct drm_i915_private *dev_priv = dev->dev_private;
4633
	struct drm_i915_private *dev_priv = dev->dev_private;
4651
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4634
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4652
	enum pipe pipe = intel_crtc->pipe;
4635
	enum pipe pipe = intel_crtc->pipe;
4653
	int i;
4636
	int i;
4654
	bool reenable_ips = false;
4637
	bool reenable_ips = false;
4655
 
4638
 
4656
	/* The clocks have to be on to load the palette. */
4639
	/* The clocks have to be on to load the palette. */
4657
	if (!crtc->state->active)
4640
	if (!crtc->state->active)
4658
		return;
4641
		return;
4659
 
4642
 
4660
	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4643
	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4661
		if (intel_crtc->config->has_dsi_encoder)
4644
		if (intel_crtc->config->has_dsi_encoder)
4662
			assert_dsi_pll_enabled(dev_priv);
4645
			assert_dsi_pll_enabled(dev_priv);
4663
		else
4646
		else
4664
			assert_pll_enabled(dev_priv, pipe);
4647
			assert_pll_enabled(dev_priv, pipe);
4665
	}
4648
	}
4666
 
4649
 
4667
	/* Workaround : Do not read or write the pipe palette/gamma data while
4650
	/* Workaround : Do not read or write the pipe palette/gamma data while
4668
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4651
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4669
	 */
4652
	 */
4670
	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4653
	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4671
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4654
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4672
	     GAMMA_MODE_MODE_SPLIT)) {
4655
	     GAMMA_MODE_MODE_SPLIT)) {
4673
		hsw_disable_ips(intel_crtc);
4656
		hsw_disable_ips(intel_crtc);
4674
		reenable_ips = true;
4657
		reenable_ips = true;
4675
	}
4658
	}
4676
 
4659
 
4677
	for (i = 0; i < 256; i++) {
4660
	for (i = 0; i < 256; i++) {
4678
		i915_reg_t palreg;
4661
		i915_reg_t palreg;
4679
 
4662
 
4680
		if (HAS_GMCH_DISPLAY(dev))
4663
		if (HAS_GMCH_DISPLAY(dev))
4681
			palreg = PALETTE(pipe, i);
4664
			palreg = PALETTE(pipe, i);
4682
		else
4665
		else
4683
			palreg = LGC_PALETTE(pipe, i);
4666
			palreg = LGC_PALETTE(pipe, i);
4684
 
4667
 
4685
		I915_WRITE(palreg,
4668
		I915_WRITE(palreg,
4686
			   (intel_crtc->lut_r[i] << 16) |
4669
			   (intel_crtc->lut_r[i] << 16) |
4687
			   (intel_crtc->lut_g[i] << 8) |
4670
			   (intel_crtc->lut_g[i] << 8) |
4688
			   intel_crtc->lut_b[i]);
4671
			   intel_crtc->lut_b[i]);
4689
	}
4672
	}
4690
 
4673
 
4691
	if (reenable_ips)
4674
	if (reenable_ips)
4692
		hsw_enable_ips(intel_crtc);
4675
		hsw_enable_ips(intel_crtc);
4693
}
4676
}
4694
 
4677
 
4695
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4678
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4696
{
4679
{
4697
	if (intel_crtc->overlay) {
4680
	if (intel_crtc->overlay) {
4698
		struct drm_device *dev = intel_crtc->base.dev;
4681
		struct drm_device *dev = intel_crtc->base.dev;
4699
		struct drm_i915_private *dev_priv = dev->dev_private;
4682
		struct drm_i915_private *dev_priv = dev->dev_private;
4700
 
4683
 
4701
		mutex_lock(&dev->struct_mutex);
4684
		mutex_lock(&dev->struct_mutex);
4702
		dev_priv->mm.interruptible = false;
4685
		dev_priv->mm.interruptible = false;
4703
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
4686
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
4704
		dev_priv->mm.interruptible = true;
4687
		dev_priv->mm.interruptible = true;
4705
		mutex_unlock(&dev->struct_mutex);
4688
		mutex_unlock(&dev->struct_mutex);
4706
	}
4689
	}
4707
 
4690
 
4708
	/* Let userspace switch the overlay on again. In most cases userspace
4691
	/* Let userspace switch the overlay on again. In most cases userspace
4709
	 * has to recompute where to put it anyway.
4692
	 * has to recompute where to put it anyway.
4710
	 */
4693
	 */
4711
}
4694
}
4712
 
4695
 
4713
/**
4696
/**
4714
 * intel_post_enable_primary - Perform operations after enabling primary plane
4697
 * intel_post_enable_primary - Perform operations after enabling primary plane
4715
 * @crtc: the CRTC whose primary plane was just enabled
4698
 * @crtc: the CRTC whose primary plane was just enabled
4716
 *
4699
 *
4717
 * Performs potentially sleeping operations that must be done after the primary
4700
 * Performs potentially sleeping operations that must be done after the primary
4718
 * plane is enabled, such as updating FBC and IPS.  Note that this may be
4701
 * plane is enabled, such as updating FBC and IPS.  Note that this may be
4719
 * called due to an explicit primary plane update, or due to an implicit
4702
 * called due to an explicit primary plane update, or due to an implicit
4720
 * re-enable that is caused when a sprite plane is updated to no longer
4703
 * re-enable that is caused when a sprite plane is updated to no longer
4721
 * completely hide the primary plane.
4704
 * completely hide the primary plane.
4722
 */
4705
 */
4723
static void
4706
static void
4724
intel_post_enable_primary(struct drm_crtc *crtc)
4707
intel_post_enable_primary(struct drm_crtc *crtc)
4725
{
4708
{
4726
	struct drm_device *dev = crtc->dev;
4709
	struct drm_device *dev = crtc->dev;
4727
	struct drm_i915_private *dev_priv = dev->dev_private;
4710
	struct drm_i915_private *dev_priv = dev->dev_private;
4728
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4711
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4729
	int pipe = intel_crtc->pipe;
4712
	int pipe = intel_crtc->pipe;
4730
 
4713
 
4731
	/*
4714
	/*
4732
	 * FIXME IPS should be fine as long as one plane is
4715
	 * FIXME IPS should be fine as long as one plane is
4733
	 * enabled, but in practice it seems to have problems
4716
	 * enabled, but in practice it seems to have problems
4734
	 * when going from primary only to sprite only and vice
4717
	 * when going from primary only to sprite only and vice
4735
	 * versa.
4718
	 * versa.
4736
	 */
4719
	 */
4737
	hsw_enable_ips(intel_crtc);
4720
	hsw_enable_ips(intel_crtc);
4738
 
4721
 
4739
	/*
4722
	/*
4740
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4723
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4741
	 * So don't enable underrun reporting before at least some planes
4724
	 * So don't enable underrun reporting before at least some planes
4742
	 * are enabled.
4725
	 * are enabled.
4743
	 * FIXME: Need to fix the logic to work when we turn off all planes
4726
	 * FIXME: Need to fix the logic to work when we turn off all planes
4744
	 * but leave the pipe running.
4727
	 * but leave the pipe running.
4745
	 */
4728
	 */
4746
	if (IS_GEN2(dev))
4729
	if (IS_GEN2(dev))
4747
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4730
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4748
 
4731
 
4749
	/* Underruns don't always raise interrupts, so check manually. */
4732
	/* Underruns don't always raise interrupts, so check manually. */
4750
	intel_check_cpu_fifo_underruns(dev_priv);
4733
	intel_check_cpu_fifo_underruns(dev_priv);
4751
	intel_check_pch_fifo_underruns(dev_priv);
4734
	intel_check_pch_fifo_underruns(dev_priv);
4752
}
4735
}
4753
 
4736
 
4754
/**
4737
/**
4755
 * intel_pre_disable_primary - Perform operations before disabling primary plane
4738
 * intel_pre_disable_primary - Perform operations before disabling primary plane
4756
 * @crtc: the CRTC whose primary plane is to be disabled
4739
 * @crtc: the CRTC whose primary plane is to be disabled
4757
 *
4740
 *
4758
 * Performs potentially sleeping operations that must be done before the
4741
 * Performs potentially sleeping operations that must be done before the
4759
 * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4742
 * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4760
 * be called due to an explicit primary plane update, or due to an implicit
4743
 * be called due to an explicit primary plane update, or due to an implicit
4761
 * disable that is caused when a sprite plane completely hides the primary
4744
 * disable that is caused when a sprite plane completely hides the primary
4762
 * plane.
4745
 * plane.
4763
 */
4746
 */
4764
static void
4747
static void
4765
intel_pre_disable_primary(struct drm_crtc *crtc)
4748
intel_pre_disable_primary(struct drm_crtc *crtc)
4766
{
4749
{
4767
	struct drm_device *dev = crtc->dev;
4750
	struct drm_device *dev = crtc->dev;
4768
	struct drm_i915_private *dev_priv = dev->dev_private;
4751
	struct drm_i915_private *dev_priv = dev->dev_private;
4769
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4752
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4770
	int pipe = intel_crtc->pipe;
4753
	int pipe = intel_crtc->pipe;
4771
 
4754
 
4772
	/*
4755
	/*
4773
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4756
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4774
	 * So diasble underrun reporting before all the planes get disabled.
4757
	 * So diasble underrun reporting before all the planes get disabled.
4775
	 * FIXME: Need to fix the logic to work when we turn off all planes
4758
	 * FIXME: Need to fix the logic to work when we turn off all planes
4776
	 * but leave the pipe running.
4759
	 * but leave the pipe running.
4777
	 */
4760
	 */
4778
	if (IS_GEN2(dev))
4761
	if (IS_GEN2(dev))
4779
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4762
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4780
 
4763
 
4781
	/*
4764
	/*
4782
	 * Vblank time updates from the shadow to live plane control register
4765
	 * Vblank time updates from the shadow to live plane control register
4783
	 * are blocked if the memory self-refresh mode is active at that
4766
	 * are blocked if the memory self-refresh mode is active at that
4784
	 * moment. So to make sure the plane gets truly disabled, disable
4767
	 * moment. So to make sure the plane gets truly disabled, disable
4785
	 * first the self-refresh mode. The self-refresh enable bit in turn
4768
	 * first the self-refresh mode. The self-refresh enable bit in turn
4786
	 * will be checked/applied by the HW only at the next frame start
4769
	 * will be checked/applied by the HW only at the next frame start
4787
	 * event which is after the vblank start event, so we need to have a
4770
	 * event which is after the vblank start event, so we need to have a
4788
	 * wait-for-vblank between disabling the plane and the pipe.
4771
	 * wait-for-vblank between disabling the plane and the pipe.
4789
	 */
4772
	 */
4790
	if (HAS_GMCH_DISPLAY(dev)) {
4773
	if (HAS_GMCH_DISPLAY(dev)) {
4791
		intel_set_memory_cxsr(dev_priv, false);
4774
		intel_set_memory_cxsr(dev_priv, false);
4792
		dev_priv->wm.vlv.cxsr = false;
4775
		dev_priv->wm.vlv.cxsr = false;
4793
		intel_wait_for_vblank(dev, pipe);
4776
		intel_wait_for_vblank(dev, pipe);
4794
	}
4777
	}
4795
 
4778
 
4796
	/*
4779
	/*
4797
	 * FIXME IPS should be fine as long as one plane is
4780
	 * FIXME IPS should be fine as long as one plane is
4798
	 * enabled, but in practice it seems to have problems
4781
	 * enabled, but in practice it seems to have problems
4799
	 * when going from primary only to sprite only and vice
4782
	 * when going from primary only to sprite only and vice
4800
	 * versa.
4783
	 * versa.
4801
	 */
4784
	 */
4802
	hsw_disable_ips(intel_crtc);
4785
	hsw_disable_ips(intel_crtc);
4803
}
4786
}
4804
 
4787
 
4805
static void intel_post_plane_update(struct intel_crtc *crtc)
4788
static void intel_post_plane_update(struct intel_crtc *crtc)
4806
{
4789
{
4807
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4790
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4808
	struct intel_crtc_state *pipe_config =
4791
	struct intel_crtc_state *pipe_config =
4809
		to_intel_crtc_state(crtc->base.state);
4792
		to_intel_crtc_state(crtc->base.state);
4810
	struct drm_device *dev = crtc->base.dev;
4793
	struct drm_device *dev = crtc->base.dev;
4811
 
-
 
4812
	if (atomic->wait_vblank)
-
 
4813
		intel_wait_for_vblank(dev, crtc->pipe);
-
 
4814
 
4794
 
4815
	intel_frontbuffer_flip(dev, atomic->fb_bits);
4795
	intel_frontbuffer_flip(dev, atomic->fb_bits);
4816
 
4796
 
4817
		crtc->wm.cxsr_allowed = true;
4797
	crtc->wm.cxsr_allowed = true;
4818
 
4798
 
4819
	if (pipe_config->update_wm_post && pipe_config->base.active)
4799
	if (pipe_config->update_wm_post && pipe_config->base.active)
4820
		intel_update_watermarks(&crtc->base);
4800
		intel_update_watermarks(&crtc->base);
4821
 
4801
 
4822
	if (atomic->update_fbc)
4802
	if (atomic->update_fbc)
4823
		intel_fbc_update(crtc);
4803
		intel_fbc_post_update(crtc);
4824
 
4804
 
4825
	if (atomic->post_enable_primary)
4805
	if (atomic->post_enable_primary)
4826
		intel_post_enable_primary(&crtc->base);
4806
		intel_post_enable_primary(&crtc->base);
4827
 
4807
 
4828
	memset(atomic, 0, sizeof(*atomic));
4808
	memset(atomic, 0, sizeof(*atomic));
4829
}
4809
}
4830
 
4810
 
4831
static void intel_pre_plane_update(struct intel_crtc *crtc)
4811
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
-
 
4812
{
4832
{
4813
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4833
	struct drm_device *dev = crtc->base.dev;
4814
	struct drm_device *dev = crtc->base.dev;
4834
	struct drm_i915_private *dev_priv = dev->dev_private;
4815
	struct drm_i915_private *dev_priv = dev->dev_private;
4835
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4816
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4836
	struct intel_crtc_state *pipe_config =
4817
	struct intel_crtc_state *pipe_config =
4837
		to_intel_crtc_state(crtc->base.state);
4818
		to_intel_crtc_state(crtc->base.state);
-
 
4819
	struct drm_atomic_state *old_state = old_crtc_state->base.state;
-
 
4820
	struct drm_plane *primary = crtc->base.primary;
-
 
4821
	struct drm_plane_state *old_pri_state =
-
 
4822
		drm_atomic_get_existing_plane_state(old_state, primary);
-
 
4823
	bool modeset = needs_modeset(&pipe_config->base);
4838
 
4824
 
4839
	if (atomic->disable_fbc)
4825
	if (atomic->update_fbc)
4840
		intel_fbc_deactivate(crtc);
4826
		intel_fbc_pre_update(crtc);
-
 
4827
 
-
 
4828
	if (old_pri_state) {
-
 
4829
		struct intel_plane_state *primary_state =
4841
 
4830
			to_intel_plane_state(primary->state);
4842
	if (crtc->atomic.disable_ips)
4831
		struct intel_plane_state *old_primary_state =
-
 
4832
			to_intel_plane_state(old_pri_state);
4843
		hsw_disable_ips(crtc);
4833
 
-
 
4834
		if (old_primary_state->visible &&
4844
 
4835
		    (modeset || !primary_state->visible))
4845
	if (atomic->pre_disable_primary)
4836
			intel_pre_disable_primary(&crtc->base);
4846
		intel_pre_disable_primary(&crtc->base);
4837
	}
4847
 
4838
 
4848
	if (pipe_config->disable_cxsr) {
4839
	if (pipe_config->disable_cxsr) {
4849
		crtc->wm.cxsr_allowed = false;
4840
		crtc->wm.cxsr_allowed = false;
-
 
4841
 
-
 
4842
		if (old_crtc_state->base.active)
4850
		intel_set_memory_cxsr(dev_priv, false);
4843
			intel_set_memory_cxsr(dev_priv, false);
4851
	}
4844
	}
4852
 
4845
 
4853
	if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
4846
	if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
4854
		intel_update_watermarks(&crtc->base);
4847
		intel_update_watermarks(&crtc->base);
4855
}
4848
}
4856
 
4849
 
4857
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4850
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4858
{
4851
{
4859
	struct drm_device *dev = crtc->dev;
4852
	struct drm_device *dev = crtc->dev;
4860
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4853
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4861
	struct drm_plane *p;
4854
	struct drm_plane *p;
4862
	int pipe = intel_crtc->pipe;
4855
	int pipe = intel_crtc->pipe;
4863
 
4856
 
4864
	intel_crtc_dpms_overlay_disable(intel_crtc);
4857
	intel_crtc_dpms_overlay_disable(intel_crtc);
4865
 
4858
 
4866
	drm_for_each_plane_mask(p, dev, plane_mask)
4859
	drm_for_each_plane_mask(p, dev, plane_mask)
4867
		to_intel_plane(p)->disable_plane(p, crtc);
4860
		to_intel_plane(p)->disable_plane(p, crtc);
4868
 
4861
 
4869
	/*
4862
	/*
4870
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4863
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4871
	 * to compute the mask of flip planes precisely. For the time being
4864
	 * to compute the mask of flip planes precisely. For the time being
4872
	 * consider this a flip to a NULL plane.
4865
	 * consider this a flip to a NULL plane.
4873
	 */
4866
	 */
4874
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4867
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4875
}
4868
}
4876
 
4869
 
4877
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4870
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4878
{
4871
{
4879
	struct drm_device *dev = crtc->dev;
4872
	struct drm_device *dev = crtc->dev;
4880
	struct drm_i915_private *dev_priv = dev->dev_private;
4873
	struct drm_i915_private *dev_priv = dev->dev_private;
4881
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4874
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4882
	struct intel_encoder *encoder;
4875
	struct intel_encoder *encoder;
4883
	int pipe = intel_crtc->pipe;
4876
	int pipe = intel_crtc->pipe;
4884
 
4877
 
4885
	if (WARN_ON(intel_crtc->active))
4878
	if (WARN_ON(intel_crtc->active))
4886
		return;
4879
		return;
4887
 
4880
 
4888
	if (intel_crtc->config->has_pch_encoder)
4881
	if (intel_crtc->config->has_pch_encoder)
4889
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4882
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4890
 
4883
 
4891
	if (intel_crtc->config->has_pch_encoder)
4884
	if (intel_crtc->config->has_pch_encoder)
4892
		intel_prepare_shared_dpll(intel_crtc);
4885
		intel_prepare_shared_dpll(intel_crtc);
4893
 
4886
 
4894
	if (intel_crtc->config->has_dp_encoder)
4887
	if (intel_crtc->config->has_dp_encoder)
4895
		intel_dp_set_m_n(intel_crtc, M1_N1);
4888
		intel_dp_set_m_n(intel_crtc, M1_N1);
4896
 
4889
 
4897
	intel_set_pipe_timings(intel_crtc);
4890
	intel_set_pipe_timings(intel_crtc);
4898
 
4891
 
4899
	if (intel_crtc->config->has_pch_encoder) {
4892
	if (intel_crtc->config->has_pch_encoder) {
4900
		intel_cpu_transcoder_set_m_n(intel_crtc,
4893
		intel_cpu_transcoder_set_m_n(intel_crtc,
4901
				     &intel_crtc->config->fdi_m_n, NULL);
4894
				     &intel_crtc->config->fdi_m_n, NULL);
4902
	}
4895
	}
4903
 
4896
 
4904
	ironlake_set_pipeconf(crtc);
4897
	ironlake_set_pipeconf(crtc);
4905
 
4898
 
4906
	intel_crtc->active = true;
4899
	intel_crtc->active = true;
4907
 
4900
 
4908
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4901
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4909
 
4902
 
4910
	for_each_encoder_on_crtc(dev, crtc, encoder)
4903
	for_each_encoder_on_crtc(dev, crtc, encoder)
4911
		if (encoder->pre_enable)
4904
		if (encoder->pre_enable)
4912
			encoder->pre_enable(encoder);
4905
			encoder->pre_enable(encoder);
4913
 
4906
 
4914
	if (intel_crtc->config->has_pch_encoder) {
4907
	if (intel_crtc->config->has_pch_encoder) {
4915
		/* Note: FDI PLL enabling _must_ be done before we enable the
4908
		/* Note: FDI PLL enabling _must_ be done before we enable the
4916
		 * cpu pipes, hence this is separate from all the other fdi/pch
4909
		 * cpu pipes, hence this is separate from all the other fdi/pch
4917
		 * enabling. */
4910
		 * enabling. */
4918
		ironlake_fdi_pll_enable(intel_crtc);
4911
		ironlake_fdi_pll_enable(intel_crtc);
4919
	} else {
4912
	} else {
4920
		assert_fdi_tx_disabled(dev_priv, pipe);
4913
		assert_fdi_tx_disabled(dev_priv, pipe);
4921
		assert_fdi_rx_disabled(dev_priv, pipe);
4914
		assert_fdi_rx_disabled(dev_priv, pipe);
4922
	}
4915
	}
4923
 
4916
 
4924
	ironlake_pfit_enable(intel_crtc);
4917
	ironlake_pfit_enable(intel_crtc);
4925
 
4918
 
4926
	/*
4919
	/*
4927
	 * On ILK+ LUT must be loaded before the pipe is running but with
4920
	 * On ILK+ LUT must be loaded before the pipe is running but with
4928
	 * clocks enabled
4921
	 * clocks enabled
4929
	 */
4922
	 */
4930
	intel_crtc_load_lut(crtc);
4923
	intel_crtc_load_lut(crtc);
4931
 
4924
 
4932
	intel_update_watermarks(crtc);
4925
	intel_update_watermarks(crtc);
4933
	intel_enable_pipe(intel_crtc);
4926
	intel_enable_pipe(intel_crtc);
4934
 
4927
 
4935
	if (intel_crtc->config->has_pch_encoder)
4928
	if (intel_crtc->config->has_pch_encoder)
4936
		ironlake_pch_enable(crtc);
4929
		ironlake_pch_enable(crtc);
4937
 
4930
 
4938
	assert_vblank_disabled(crtc);
4931
	assert_vblank_disabled(crtc);
4939
	drm_crtc_vblank_on(crtc);
4932
	drm_crtc_vblank_on(crtc);
4940
 
4933
 
4941
	for_each_encoder_on_crtc(dev, crtc, encoder)
4934
	for_each_encoder_on_crtc(dev, crtc, encoder)
4942
		encoder->enable(encoder);
4935
		encoder->enable(encoder);
4943
 
4936
 
4944
	if (HAS_PCH_CPT(dev))
4937
	if (HAS_PCH_CPT(dev))
4945
		cpt_verify_modeset(dev, intel_crtc->pipe);
4938
		cpt_verify_modeset(dev, intel_crtc->pipe);
4946
 
4939
 
4947
	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4940
	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4948
	if (intel_crtc->config->has_pch_encoder)
4941
	if (intel_crtc->config->has_pch_encoder)
4949
		intel_wait_for_vblank(dev, pipe);
4942
		intel_wait_for_vblank(dev, pipe);
4950
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4943
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4951
 
-
 
4952
	intel_fbc_enable(intel_crtc);
-
 
4953
}
4944
}
4954
 
4945
 
4955
/* IPS only exists on ULT machines and is tied to pipe A. */
4946
/* IPS only exists on ULT machines and is tied to pipe A. */
4956
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4947
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4957
{
4948
{
4958
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4949
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4959
}
4950
}
4960
 
4951
 
4961
static void haswell_crtc_enable(struct drm_crtc *crtc)
4952
static void haswell_crtc_enable(struct drm_crtc *crtc)
4962
{
4953
{
4963
	struct drm_device *dev = crtc->dev;
4954
	struct drm_device *dev = crtc->dev;
4964
	struct drm_i915_private *dev_priv = dev->dev_private;
4955
	struct drm_i915_private *dev_priv = dev->dev_private;
4965
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4956
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4966
	struct intel_encoder *encoder;
4957
	struct intel_encoder *encoder;
4967
	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4958
	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4968
	struct intel_crtc_state *pipe_config =
4959
	struct intel_crtc_state *pipe_config =
4969
		to_intel_crtc_state(crtc->state);
4960
		to_intel_crtc_state(crtc->state);
4970
 
4961
 
4971
	if (WARN_ON(intel_crtc->active))
4962
	if (WARN_ON(intel_crtc->active))
4972
		return;
4963
		return;
4973
 
4964
 
4974
	if (intel_crtc->config->has_pch_encoder)
4965
	if (intel_crtc->config->has_pch_encoder)
4975
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4966
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4976
						      false);
4967
						      false);
4977
 
4968
 
4978
	if (intel_crtc_to_shared_dpll(intel_crtc))
4969
	if (intel_crtc_to_shared_dpll(intel_crtc))
4979
		intel_enable_shared_dpll(intel_crtc);
4970
		intel_enable_shared_dpll(intel_crtc);
4980
 
4971
 
4981
	if (intel_crtc->config->has_dp_encoder)
4972
	if (intel_crtc->config->has_dp_encoder)
4982
		intel_dp_set_m_n(intel_crtc, M1_N1);
4973
		intel_dp_set_m_n(intel_crtc, M1_N1);
4983
 
4974
 
4984
	intel_set_pipe_timings(intel_crtc);
4975
	intel_set_pipe_timings(intel_crtc);
4985
 
4976
 
4986
	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4977
	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4987
		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4978
		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4988
			   intel_crtc->config->pixel_multiplier - 1);
4979
			   intel_crtc->config->pixel_multiplier - 1);
4989
	}
4980
	}
4990
 
4981
 
4991
	if (intel_crtc->config->has_pch_encoder) {
4982
	if (intel_crtc->config->has_pch_encoder) {
4992
		intel_cpu_transcoder_set_m_n(intel_crtc,
4983
		intel_cpu_transcoder_set_m_n(intel_crtc,
4993
				     &intel_crtc->config->fdi_m_n, NULL);
4984
				     &intel_crtc->config->fdi_m_n, NULL);
4994
	}
4985
	}
4995
 
4986
 
4996
	haswell_set_pipeconf(crtc);
4987
	haswell_set_pipeconf(crtc);
4997
 
4988
 
4998
	intel_set_pipe_csc(crtc);
4989
	intel_set_pipe_csc(crtc);
4999
 
4990
 
5000
	intel_crtc->active = true;
4991
	intel_crtc->active = true;
5001
 
4992
 
5002
	if (intel_crtc->config->has_pch_encoder)
4993
	if (intel_crtc->config->has_pch_encoder)
5003
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4994
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5004
	else
4995
	else
5005
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4996
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5006
 
4997
 
5007
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4998
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5008
		if (encoder->pre_enable)
4999
		if (encoder->pre_enable)
5009
			encoder->pre_enable(encoder);
5000
			encoder->pre_enable(encoder);
5010
	}
5001
	}
5011
 
5002
 
5012
	if (intel_crtc->config->has_pch_encoder)
5003
	if (intel_crtc->config->has_pch_encoder)
5013
		dev_priv->display.fdi_link_train(crtc);
5004
		dev_priv->display.fdi_link_train(crtc);
5014
 
5005
 
5015
	if (!intel_crtc->config->has_dsi_encoder)
5006
	if (!intel_crtc->config->has_dsi_encoder)
5016
		intel_ddi_enable_pipe_clock(intel_crtc);
5007
		intel_ddi_enable_pipe_clock(intel_crtc);
5017
 
5008
 
5018
	if (INTEL_INFO(dev)->gen >= 9)
5009
	if (INTEL_INFO(dev)->gen >= 9)
5019
		skylake_pfit_enable(intel_crtc);
5010
		skylake_pfit_enable(intel_crtc);
5020
	else
5011
	else
5021
		ironlake_pfit_enable(intel_crtc);
5012
		ironlake_pfit_enable(intel_crtc);
5022
 
5013
 
5023
	/*
5014
	/*
5024
	 * On ILK+ LUT must be loaded before the pipe is running but with
5015
	 * On ILK+ LUT must be loaded before the pipe is running but with
5025
	 * clocks enabled
5016
	 * clocks enabled
5026
	 */
5017
	 */
5027
	intel_crtc_load_lut(crtc);
5018
	intel_crtc_load_lut(crtc);
5028
 
5019
 
5029
	intel_ddi_set_pipe_settings(crtc);
5020
	intel_ddi_set_pipe_settings(crtc);
5030
	if (!intel_crtc->config->has_dsi_encoder)
5021
	if (!intel_crtc->config->has_dsi_encoder)
5031
		intel_ddi_enable_transcoder_func(crtc);
5022
		intel_ddi_enable_transcoder_func(crtc);
5032
 
5023
 
5033
	intel_update_watermarks(crtc);
5024
	intel_update_watermarks(crtc);
5034
	intel_enable_pipe(intel_crtc);
5025
	intel_enable_pipe(intel_crtc);
5035
 
5026
 
5036
	if (intel_crtc->config->has_pch_encoder)
5027
	if (intel_crtc->config->has_pch_encoder)
5037
		lpt_pch_enable(crtc);
5028
		lpt_pch_enable(crtc);
5038
 
5029
 
5039
	if (intel_crtc->config->dp_encoder_is_mst)
5030
	if (intel_crtc->config->dp_encoder_is_mst)
5040
		intel_ddi_set_vc_payload_alloc(crtc, true);
5031
		intel_ddi_set_vc_payload_alloc(crtc, true);
5041
 
5032
 
5042
	assert_vblank_disabled(crtc);
5033
	assert_vblank_disabled(crtc);
5043
	drm_crtc_vblank_on(crtc);
5034
	drm_crtc_vblank_on(crtc);
5044
 
5035
 
5045
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5036
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5046
		encoder->enable(encoder);
5037
		encoder->enable(encoder);
5047
		intel_opregion_notify_encoder(encoder, true);
5038
		intel_opregion_notify_encoder(encoder, true);
5048
	}
5039
	}
5049
 
5040
 
5050
	if (intel_crtc->config->has_pch_encoder) {
5041
	if (intel_crtc->config->has_pch_encoder) {
5051
		intel_wait_for_vblank(dev, pipe);
5042
		intel_wait_for_vblank(dev, pipe);
5052
		intel_wait_for_vblank(dev, pipe);
5043
		intel_wait_for_vblank(dev, pipe);
5053
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5044
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5054
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5045
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5055
						      true);
5046
						      true);
5056
	}
5047
	}
5057
 
5048
 
5058
	/* If we change the relative order between pipe/planes enabling, we need
5049
	/* If we change the relative order between pipe/planes enabling, we need
5059
	 * to change the workaround. */
5050
	 * to change the workaround. */
5060
	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5051
	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5061
	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5052
	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5062
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5053
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5063
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5054
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5064
	}
5055
	}
5065
 
-
 
5066
	intel_fbc_enable(intel_crtc);
-
 
5067
}
5056
}
5068
 
5057
 
5069
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5058
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5070
{
5059
{
5071
	struct drm_device *dev = crtc->base.dev;
5060
	struct drm_device *dev = crtc->base.dev;
5072
	struct drm_i915_private *dev_priv = dev->dev_private;
5061
	struct drm_i915_private *dev_priv = dev->dev_private;
5073
	int pipe = crtc->pipe;
5062
	int pipe = crtc->pipe;
5074
 
5063
 
5075
	/* To avoid upsetting the power well on haswell only disable the pfit if
5064
	/* To avoid upsetting the power well on haswell only disable the pfit if
5076
	 * it's in use. The hw state code will make sure we get this right. */
5065
	 * it's in use. The hw state code will make sure we get this right. */
5077
	if (force || crtc->config->pch_pfit.enabled) {
5066
	if (force || crtc->config->pch_pfit.enabled) {
5078
		I915_WRITE(PF_CTL(pipe), 0);
5067
		I915_WRITE(PF_CTL(pipe), 0);
5079
		I915_WRITE(PF_WIN_POS(pipe), 0);
5068
		I915_WRITE(PF_WIN_POS(pipe), 0);
5080
		I915_WRITE(PF_WIN_SZ(pipe), 0);
5069
		I915_WRITE(PF_WIN_SZ(pipe), 0);
5081
	}
5070
	}
5082
}
5071
}
5083
 
5072
 
5084
static void ironlake_crtc_disable(struct drm_crtc *crtc)
5073
static void ironlake_crtc_disable(struct drm_crtc *crtc)
5085
{
5074
{
5086
	struct drm_device *dev = crtc->dev;
5075
	struct drm_device *dev = crtc->dev;
5087
	struct drm_i915_private *dev_priv = dev->dev_private;
5076
	struct drm_i915_private *dev_priv = dev->dev_private;
5088
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5077
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5089
	struct intel_encoder *encoder;
5078
	struct intel_encoder *encoder;
5090
	int pipe = intel_crtc->pipe;
5079
	int pipe = intel_crtc->pipe;
5091
 
5080
 
5092
	if (intel_crtc->config->has_pch_encoder)
5081
	if (intel_crtc->config->has_pch_encoder)
5093
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5082
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5094
 
5083
 
5095
	for_each_encoder_on_crtc(dev, crtc, encoder)
5084
	for_each_encoder_on_crtc(dev, crtc, encoder)
5096
		encoder->disable(encoder);
5085
		encoder->disable(encoder);
5097
 
5086
 
5098
	drm_crtc_vblank_off(crtc);
5087
	drm_crtc_vblank_off(crtc);
5099
	assert_vblank_disabled(crtc);
5088
	assert_vblank_disabled(crtc);
5100
 
5089
 
5101
	/*
5090
	/*
5102
	 * Sometimes spurious CPU pipe underruns happen when the
5091
	 * Sometimes spurious CPU pipe underruns happen when the
5103
	 * pipe is already disabled, but FDI RX/TX is still enabled.
5092
	 * pipe is already disabled, but FDI RX/TX is still enabled.
5104
	 * Happens at least with VGA+HDMI cloning. Suppress them.
5093
	 * Happens at least with VGA+HDMI cloning. Suppress them.
5105
	 */
5094
	 */
5106
	if (intel_crtc->config->has_pch_encoder)
5095
	if (intel_crtc->config->has_pch_encoder)
5107
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5096
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5108
 
5097
 
5109
	intel_disable_pipe(intel_crtc);
5098
	intel_disable_pipe(intel_crtc);
5110
 
5099
 
5111
	ironlake_pfit_disable(intel_crtc, false);
5100
	ironlake_pfit_disable(intel_crtc, false);
5112
 
5101
 
5113
	if (intel_crtc->config->has_pch_encoder) {
5102
	if (intel_crtc->config->has_pch_encoder) {
5114
		ironlake_fdi_disable(crtc);
5103
		ironlake_fdi_disable(crtc);
5115
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5104
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5116
	}
5105
	}
5117
 
5106
 
5118
	for_each_encoder_on_crtc(dev, crtc, encoder)
5107
	for_each_encoder_on_crtc(dev, crtc, encoder)
5119
		if (encoder->post_disable)
5108
		if (encoder->post_disable)
5120
			encoder->post_disable(encoder);
5109
			encoder->post_disable(encoder);
5121
 
5110
 
5122
	if (intel_crtc->config->has_pch_encoder) {
5111
	if (intel_crtc->config->has_pch_encoder) {
5123
		ironlake_disable_pch_transcoder(dev_priv, pipe);
5112
		ironlake_disable_pch_transcoder(dev_priv, pipe);
5124
 
5113
 
5125
		if (HAS_PCH_CPT(dev)) {
5114
		if (HAS_PCH_CPT(dev)) {
5126
			i915_reg_t reg;
5115
			i915_reg_t reg;
5127
			u32 temp;
5116
			u32 temp;
5128
 
5117
 
5129
			/* disable TRANS_DP_CTL */
5118
			/* disable TRANS_DP_CTL */
5130
			reg = TRANS_DP_CTL(pipe);
5119
			reg = TRANS_DP_CTL(pipe);
5131
			temp = I915_READ(reg);
5120
			temp = I915_READ(reg);
5132
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5121
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5133
				  TRANS_DP_PORT_SEL_MASK);
5122
				  TRANS_DP_PORT_SEL_MASK);
5134
			temp |= TRANS_DP_PORT_SEL_NONE;
5123
			temp |= TRANS_DP_PORT_SEL_NONE;
5135
			I915_WRITE(reg, temp);
5124
			I915_WRITE(reg, temp);
5136
 
5125
 
5137
			/* disable DPLL_SEL */
5126
			/* disable DPLL_SEL */
5138
			temp = I915_READ(PCH_DPLL_SEL);
5127
			temp = I915_READ(PCH_DPLL_SEL);
5139
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5128
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5140
			I915_WRITE(PCH_DPLL_SEL, temp);
5129
			I915_WRITE(PCH_DPLL_SEL, temp);
5141
		}
5130
		}
5142
 
5131
 
5143
		ironlake_fdi_pll_disable(intel_crtc);
5132
		ironlake_fdi_pll_disable(intel_crtc);
5144
	}
5133
	}
5145
 
5134
 
5146
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5135
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5147
 
-
 
5148
	intel_fbc_disable_crtc(intel_crtc);
-
 
5149
}
5136
}
5150
 
5137
 
5151
static void haswell_crtc_disable(struct drm_crtc *crtc)
5138
static void haswell_crtc_disable(struct drm_crtc *crtc)
5152
{
5139
{
5153
	struct drm_device *dev = crtc->dev;
5140
	struct drm_device *dev = crtc->dev;
5154
	struct drm_i915_private *dev_priv = dev->dev_private;
5141
	struct drm_i915_private *dev_priv = dev->dev_private;
5155
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5142
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5156
	struct intel_encoder *encoder;
5143
	struct intel_encoder *encoder;
5157
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5144
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5158
 
5145
 
5159
	if (intel_crtc->config->has_pch_encoder)
5146
	if (intel_crtc->config->has_pch_encoder)
5160
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5147
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5161
						      false);
5148
						      false);
5162
 
5149
 
5163
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5150
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5164
		intel_opregion_notify_encoder(encoder, false);
5151
		intel_opregion_notify_encoder(encoder, false);
5165
		encoder->disable(encoder);
5152
		encoder->disable(encoder);
5166
	}
5153
	}
5167
 
5154
 
5168
	drm_crtc_vblank_off(crtc);
5155
	drm_crtc_vblank_off(crtc);
5169
	assert_vblank_disabled(crtc);
5156
	assert_vblank_disabled(crtc);
5170
 
5157
 
5171
	intel_disable_pipe(intel_crtc);
5158
	intel_disable_pipe(intel_crtc);
5172
 
5159
 
5173
	if (intel_crtc->config->dp_encoder_is_mst)
5160
	if (intel_crtc->config->dp_encoder_is_mst)
5174
		intel_ddi_set_vc_payload_alloc(crtc, false);
5161
		intel_ddi_set_vc_payload_alloc(crtc, false);
5175
 
5162
 
5176
	if (!intel_crtc->config->has_dsi_encoder)
5163
	if (!intel_crtc->config->has_dsi_encoder)
5177
		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5164
		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5178
 
5165
 
5179
	if (INTEL_INFO(dev)->gen >= 9)
5166
	if (INTEL_INFO(dev)->gen >= 9)
5180
		skylake_scaler_disable(intel_crtc);
5167
		skylake_scaler_disable(intel_crtc);
5181
	else
5168
	else
5182
		ironlake_pfit_disable(intel_crtc, false);
5169
		ironlake_pfit_disable(intel_crtc, false);
5183
 
5170
 
5184
	if (!intel_crtc->config->has_dsi_encoder)
5171
	if (!intel_crtc->config->has_dsi_encoder)
5185
		intel_ddi_disable_pipe_clock(intel_crtc);
5172
		intel_ddi_disable_pipe_clock(intel_crtc);
5186
 
5173
 
5187
	for_each_encoder_on_crtc(dev, crtc, encoder)
5174
	for_each_encoder_on_crtc(dev, crtc, encoder)
5188
		if (encoder->post_disable)
5175
		if (encoder->post_disable)
5189
			encoder->post_disable(encoder);
5176
			encoder->post_disable(encoder);
5190
 
5177
 
5191
	if (intel_crtc->config->has_pch_encoder) {
5178
	if (intel_crtc->config->has_pch_encoder) {
5192
		lpt_disable_pch_transcoder(dev_priv);
5179
		lpt_disable_pch_transcoder(dev_priv);
5193
		lpt_disable_iclkip(dev_priv);
5180
		lpt_disable_iclkip(dev_priv);
5194
		intel_ddi_fdi_disable(crtc);
5181
		intel_ddi_fdi_disable(crtc);
5195
 
5182
 
5196
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5183
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5197
						      true);
5184
						      true);
5198
	}
5185
	}
5199
 
-
 
5200
	intel_fbc_disable_crtc(intel_crtc);
-
 
5201
}
5186
}
5202
 
5187
 
5203
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5188
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5204
{
5189
{
5205
	struct drm_device *dev = crtc->base.dev;
5190
	struct drm_device *dev = crtc->base.dev;
5206
	struct drm_i915_private *dev_priv = dev->dev_private;
5191
	struct drm_i915_private *dev_priv = dev->dev_private;
5207
	struct intel_crtc_state *pipe_config = crtc->config;
5192
	struct intel_crtc_state *pipe_config = crtc->config;
5208
 
5193
 
5209
	if (!pipe_config->gmch_pfit.control)
5194
	if (!pipe_config->gmch_pfit.control)
5210
		return;
5195
		return;
5211
 
5196
 
5212
	/*
5197
	/*
5213
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5198
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5214
	 * according to register description and PRM.
5199
	 * according to register description and PRM.
5215
	 */
5200
	 */
5216
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5201
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5217
	assert_pipe_disabled(dev_priv, crtc->pipe);
5202
	assert_pipe_disabled(dev_priv, crtc->pipe);
5218
 
5203
 
5219
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5204
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5220
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5205
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5221
 
5206
 
5222
	/* Border color in case we don't scale up to the full screen. Black by
5207
	/* Border color in case we don't scale up to the full screen. Black by
5223
	 * default, change to something else for debugging. */
5208
	 * default, change to something else for debugging. */
5224
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5209
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5225
}
5210
}
5226
 
5211
 
5227
static enum intel_display_power_domain port_to_power_domain(enum port port)
5212
static enum intel_display_power_domain port_to_power_domain(enum port port)
5228
{
5213
{
5229
	switch (port) {
5214
	switch (port) {
5230
	case PORT_A:
5215
	case PORT_A:
5231
		return POWER_DOMAIN_PORT_DDI_A_LANES;
5216
		return POWER_DOMAIN_PORT_DDI_A_LANES;
5232
	case PORT_B:
5217
	case PORT_B:
5233
		return POWER_DOMAIN_PORT_DDI_B_LANES;
5218
		return POWER_DOMAIN_PORT_DDI_B_LANES;
5234
	case PORT_C:
5219
	case PORT_C:
5235
		return POWER_DOMAIN_PORT_DDI_C_LANES;
5220
		return POWER_DOMAIN_PORT_DDI_C_LANES;
5236
	case PORT_D:
5221
	case PORT_D:
5237
		return POWER_DOMAIN_PORT_DDI_D_LANES;
5222
		return POWER_DOMAIN_PORT_DDI_D_LANES;
5238
	case PORT_E:
5223
	case PORT_E:
5239
		return POWER_DOMAIN_PORT_DDI_E_LANES;
5224
		return POWER_DOMAIN_PORT_DDI_E_LANES;
5240
	default:
5225
	default:
5241
		MISSING_CASE(port);
5226
		MISSING_CASE(port);
5242
		return POWER_DOMAIN_PORT_OTHER;
5227
		return POWER_DOMAIN_PORT_OTHER;
5243
	}
5228
	}
5244
}
5229
}
5245
 
5230
 
5246
static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5231
static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5247
{
5232
{
5248
	switch (port) {
5233
	switch (port) {
5249
	case PORT_A:
5234
	case PORT_A:
5250
		return POWER_DOMAIN_AUX_A;
5235
		return POWER_DOMAIN_AUX_A;
5251
	case PORT_B:
5236
	case PORT_B:
5252
		return POWER_DOMAIN_AUX_B;
5237
		return POWER_DOMAIN_AUX_B;
5253
	case PORT_C:
5238
	case PORT_C:
5254
		return POWER_DOMAIN_AUX_C;
5239
		return POWER_DOMAIN_AUX_C;
5255
	case PORT_D:
5240
	case PORT_D:
5256
		return POWER_DOMAIN_AUX_D;
5241
		return POWER_DOMAIN_AUX_D;
5257
	case PORT_E:
5242
	case PORT_E:
5258
		/* FIXME: Check VBT for actual wiring of PORT E */
5243
		/* FIXME: Check VBT for actual wiring of PORT E */
5259
		return POWER_DOMAIN_AUX_D;
5244
		return POWER_DOMAIN_AUX_D;
5260
	default:
5245
	default:
5261
		MISSING_CASE(port);
5246
		MISSING_CASE(port);
5262
		return POWER_DOMAIN_AUX_A;
5247
		return POWER_DOMAIN_AUX_A;
5263
	}
5248
	}
5264
}
5249
}
5265
 
5250
 
5266
enum intel_display_power_domain
5251
enum intel_display_power_domain
5267
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5252
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5268
{
5253
{
5269
	struct drm_device *dev = intel_encoder->base.dev;
5254
	struct drm_device *dev = intel_encoder->base.dev;
5270
	struct intel_digital_port *intel_dig_port;
5255
	struct intel_digital_port *intel_dig_port;
5271
 
5256
 
5272
	switch (intel_encoder->type) {
5257
	switch (intel_encoder->type) {
5273
	case INTEL_OUTPUT_UNKNOWN:
5258
	case INTEL_OUTPUT_UNKNOWN:
5274
		/* Only DDI platforms should ever use this output type */
5259
		/* Only DDI platforms should ever use this output type */
5275
		WARN_ON_ONCE(!HAS_DDI(dev));
5260
		WARN_ON_ONCE(!HAS_DDI(dev));
5276
	case INTEL_OUTPUT_DISPLAYPORT:
5261
	case INTEL_OUTPUT_DISPLAYPORT:
5277
	case INTEL_OUTPUT_HDMI:
5262
	case INTEL_OUTPUT_HDMI:
5278
	case INTEL_OUTPUT_EDP:
5263
	case INTEL_OUTPUT_EDP:
5279
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5264
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5280
		return port_to_power_domain(intel_dig_port->port);
5265
		return port_to_power_domain(intel_dig_port->port);
5281
	case INTEL_OUTPUT_DP_MST:
5266
	case INTEL_OUTPUT_DP_MST:
5282
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5267
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5283
		return port_to_power_domain(intel_dig_port->port);
5268
		return port_to_power_domain(intel_dig_port->port);
5284
	case INTEL_OUTPUT_ANALOG:
5269
	case INTEL_OUTPUT_ANALOG:
5285
		return POWER_DOMAIN_PORT_CRT;
5270
		return POWER_DOMAIN_PORT_CRT;
5286
	case INTEL_OUTPUT_DSI:
5271
	case INTEL_OUTPUT_DSI:
5287
		return POWER_DOMAIN_PORT_DSI;
5272
		return POWER_DOMAIN_PORT_DSI;
5288
	default:
5273
	default:
5289
		return POWER_DOMAIN_PORT_OTHER;
5274
		return POWER_DOMAIN_PORT_OTHER;
5290
	}
5275
	}
5291
}
5276
}
5292
 
5277
 
5293
enum intel_display_power_domain
5278
enum intel_display_power_domain
5294
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5279
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5295
{
5280
{
5296
	struct drm_device *dev = intel_encoder->base.dev;
5281
	struct drm_device *dev = intel_encoder->base.dev;
5297
	struct intel_digital_port *intel_dig_port;
5282
	struct intel_digital_port *intel_dig_port;
5298
 
5283
 
5299
	switch (intel_encoder->type) {
5284
	switch (intel_encoder->type) {
5300
	case INTEL_OUTPUT_UNKNOWN:
5285
	case INTEL_OUTPUT_UNKNOWN:
5301
	case INTEL_OUTPUT_HDMI:
5286
	case INTEL_OUTPUT_HDMI:
5302
		/*
5287
		/*
5303
		 * Only DDI platforms should ever use these output types.
5288
		 * Only DDI platforms should ever use these output types.
5304
		 * We can get here after the HDMI detect code has already set
5289
		 * We can get here after the HDMI detect code has already set
5305
		 * the type of the shared encoder. Since we can't be sure
5290
		 * the type of the shared encoder. Since we can't be sure
5306
		 * what's the status of the given connectors, play safe and
5291
		 * what's the status of the given connectors, play safe and
5307
		 * run the DP detection too.
5292
		 * run the DP detection too.
5308
		 */
5293
		 */
5309
		WARN_ON_ONCE(!HAS_DDI(dev));
5294
		WARN_ON_ONCE(!HAS_DDI(dev));
5310
	case INTEL_OUTPUT_DISPLAYPORT:
5295
	case INTEL_OUTPUT_DISPLAYPORT:
5311
	case INTEL_OUTPUT_EDP:
5296
	case INTEL_OUTPUT_EDP:
5312
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5297
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5313
		return port_to_aux_power_domain(intel_dig_port->port);
5298
		return port_to_aux_power_domain(intel_dig_port->port);
5314
	case INTEL_OUTPUT_DP_MST:
5299
	case INTEL_OUTPUT_DP_MST:
5315
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5300
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5316
		return port_to_aux_power_domain(intel_dig_port->port);
5301
		return port_to_aux_power_domain(intel_dig_port->port);
5317
	default:
5302
	default:
5318
		MISSING_CASE(intel_encoder->type);
5303
		MISSING_CASE(intel_encoder->type);
5319
		return POWER_DOMAIN_AUX_A;
5304
		return POWER_DOMAIN_AUX_A;
5320
	}
5305
	}
5321
}
5306
}
5322
 
5307
 
-
 
5308
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5323
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5309
					    struct intel_crtc_state *crtc_state)
5324
{
5310
{
5325
	struct drm_device *dev = crtc->dev;
5311
	struct drm_device *dev = crtc->dev;
5326
	struct intel_encoder *intel_encoder;
5312
	struct drm_encoder *encoder;
5327
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5313
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5328
	enum pipe pipe = intel_crtc->pipe;
5314
	enum pipe pipe = intel_crtc->pipe;
5329
	unsigned long mask;
5315
	unsigned long mask;
5330
	enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
5316
	enum transcoder transcoder = crtc_state->cpu_transcoder;
5331
 
5317
 
5332
	if (!crtc->state->active)
5318
	if (!crtc_state->base.active)
5333
		return 0;
5319
		return 0;
5334
 
5320
 
5335
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5321
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5336
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5322
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5337
	if (intel_crtc->config->pch_pfit.enabled ||
5323
	if (crtc_state->pch_pfit.enabled ||
5338
	    intel_crtc->config->pch_pfit.force_thru)
5324
	    crtc_state->pch_pfit.force_thru)
5339
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5325
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5340
 
5326
 
-
 
5327
	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
-
 
5328
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5341
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5329
 
-
 
5330
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5342
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5331
	}
5343
 
5332
 
5344
	return mask;
5333
	return mask;
5345
}
5334
}
-
 
5335
 
5346
 
5336
static unsigned long
-
 
5337
modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5347
static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5338
			       struct intel_crtc_state *crtc_state)
5348
{
5339
{
5349
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5340
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5350
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5341
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5351
	enum intel_display_power_domain domain;
5342
	enum intel_display_power_domain domain;
5352
	unsigned long domains, new_domains, old_domains;
5343
	unsigned long domains, new_domains, old_domains;
5353
 
5344
 
5354
	old_domains = intel_crtc->enabled_power_domains;
5345
	old_domains = intel_crtc->enabled_power_domains;
5355
	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5346
	intel_crtc->enabled_power_domains = new_domains =
-
 
5347
		get_crtc_power_domains(crtc, crtc_state);
5356
 
5348
 
5357
	domains = new_domains & ~old_domains;
5349
	domains = new_domains & ~old_domains;
5358
 
5350
 
5359
	for_each_power_domain(domain, domains)
5351
	for_each_power_domain(domain, domains)
5360
		intel_display_power_get(dev_priv, domain);
5352
		intel_display_power_get(dev_priv, domain);
5361
 
5353
 
5362
	return old_domains & ~new_domains;
5354
	return old_domains & ~new_domains;
5363
}
5355
}
5364
 
5356
 
5365
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5357
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5366
				      unsigned long domains)
5358
				      unsigned long domains)
5367
{
5359
{
5368
	enum intel_display_power_domain domain;
5360
	enum intel_display_power_domain domain;
5369
 
5361
 
5370
	for_each_power_domain(domain, domains)
5362
	for_each_power_domain(domain, domains)
5371
		intel_display_power_put(dev_priv, domain);
5363
		intel_display_power_put(dev_priv, domain);
5372
}
5364
}
5373
 
-
 
5374
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
-
 
5375
{
-
 
5376
	struct drm_device *dev = state->dev;
-
 
5377
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5378
	unsigned long put_domains[I915_MAX_PIPES] = {};
-
 
5379
	struct drm_crtc_state *crtc_state;
-
 
5380
	struct drm_crtc *crtc;
-
 
5381
	int i;
-
 
5382
 
-
 
5383
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-
 
5384
		if (needs_modeset(crtc->state))
-
 
5385
			put_domains[to_intel_crtc(crtc)->pipe] =
-
 
5386
				modeset_get_crtc_power_domains(crtc);
-
 
5387
	}
-
 
5388
 
-
 
5389
	if (dev_priv->display.modeset_commit_cdclk) {
-
 
5390
		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
-
 
5391
 
-
 
5392
		if (cdclk != dev_priv->cdclk_freq &&
-
 
5393
		    !WARN_ON(!state->allow_modeset))
-
 
5394
			dev_priv->display.modeset_commit_cdclk(state);
-
 
5395
	}
-
 
5396
 
-
 
5397
	for (i = 0; i < I915_MAX_PIPES; i++)
-
 
5398
		if (put_domains[i])
-
 
5399
			modeset_put_power_domains(dev_priv, put_domains[i]);
-
 
5400
}
-
 
5401
 
5365
 
5402
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5366
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5403
{
5367
{
5404
	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5368
	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5405
 
5369
 
5406
	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5370
	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5407
	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5371
	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5408
		return max_cdclk_freq;
5372
		return max_cdclk_freq;
5409
	else if (IS_CHERRYVIEW(dev_priv))
5373
	else if (IS_CHERRYVIEW(dev_priv))
5410
		return max_cdclk_freq*95/100;
5374
		return max_cdclk_freq*95/100;
5411
	else if (INTEL_INFO(dev_priv)->gen < 4)
5375
	else if (INTEL_INFO(dev_priv)->gen < 4)
5412
		return 2*max_cdclk_freq*90/100;
5376
		return 2*max_cdclk_freq*90/100;
5413
	else
5377
	else
5414
		return max_cdclk_freq*90/100;
5378
		return max_cdclk_freq*90/100;
5415
}
5379
}
5416
 
5380
 
5417
static void intel_update_max_cdclk(struct drm_device *dev)
5381
static void intel_update_max_cdclk(struct drm_device *dev)
5418
{
5382
{
5419
	struct drm_i915_private *dev_priv = dev->dev_private;
5383
	struct drm_i915_private *dev_priv = dev->dev_private;
5420
 
5384
 
5421
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5385
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5422
		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5386
		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5423
 
5387
 
5424
		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5388
		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5425
			dev_priv->max_cdclk_freq = 675000;
5389
			dev_priv->max_cdclk_freq = 675000;
5426
		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5390
		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5427
			dev_priv->max_cdclk_freq = 540000;
5391
			dev_priv->max_cdclk_freq = 540000;
5428
		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5392
		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5429
			dev_priv->max_cdclk_freq = 450000;
5393
			dev_priv->max_cdclk_freq = 450000;
5430
		else
5394
		else
5431
			dev_priv->max_cdclk_freq = 337500;
5395
			dev_priv->max_cdclk_freq = 337500;
5432
	} else if (IS_BROADWELL(dev))  {
5396
	} else if (IS_BROADWELL(dev))  {
5433
		/*
5397
		/*
5434
		 * FIXME with extra cooling we can allow
5398
		 * FIXME with extra cooling we can allow
5435
		 * 540 MHz for ULX and 675 Mhz for ULT.
5399
		 * 540 MHz for ULX and 675 Mhz for ULT.
5436
		 * How can we know if extra cooling is
5400
		 * How can we know if extra cooling is
5437
		 * available? PCI ID, VTB, something else?
5401
		 * available? PCI ID, VTB, something else?
5438
		 */
5402
		 */
5439
		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5403
		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5440
			dev_priv->max_cdclk_freq = 450000;
5404
			dev_priv->max_cdclk_freq = 450000;
5441
		else if (IS_BDW_ULX(dev))
5405
		else if (IS_BDW_ULX(dev))
5442
			dev_priv->max_cdclk_freq = 450000;
5406
			dev_priv->max_cdclk_freq = 450000;
5443
		else if (IS_BDW_ULT(dev))
5407
		else if (IS_BDW_ULT(dev))
5444
			dev_priv->max_cdclk_freq = 540000;
5408
			dev_priv->max_cdclk_freq = 540000;
5445
		else
5409
		else
5446
			dev_priv->max_cdclk_freq = 675000;
5410
			dev_priv->max_cdclk_freq = 675000;
5447
	} else if (IS_CHERRYVIEW(dev)) {
5411
	} else if (IS_CHERRYVIEW(dev)) {
5448
		dev_priv->max_cdclk_freq = 320000;
5412
		dev_priv->max_cdclk_freq = 320000;
5449
	} else if (IS_VALLEYVIEW(dev)) {
5413
	} else if (IS_VALLEYVIEW(dev)) {
5450
		dev_priv->max_cdclk_freq = 400000;
5414
		dev_priv->max_cdclk_freq = 400000;
5451
	} else {
5415
	} else {
5452
		/* otherwise assume cdclk is fixed */
5416
		/* otherwise assume cdclk is fixed */
5453
		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5417
		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5454
	}
5418
	}
5455
 
5419
 
5456
	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5420
	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5457
 
5421
 
5458
	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5422
	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5459
			 dev_priv->max_cdclk_freq);
5423
			 dev_priv->max_cdclk_freq);
5460
 
5424
 
5461
	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5425
	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5462
			 dev_priv->max_dotclk_freq);
5426
			 dev_priv->max_dotclk_freq);
5463
}
5427
}
5464
 
5428
 
5465
static void intel_update_cdclk(struct drm_device *dev)
5429
static void intel_update_cdclk(struct drm_device *dev)
5466
{
5430
{
5467
	struct drm_i915_private *dev_priv = dev->dev_private;
5431
	struct drm_i915_private *dev_priv = dev->dev_private;
5468
 
5432
 
5469
	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5433
	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5470
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5434
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5471
			 dev_priv->cdclk_freq);
5435
			 dev_priv->cdclk_freq);
5472
 
5436
 
5473
	/*
5437
	/*
5474
	 * Program the gmbus_freq based on the cdclk frequency.
5438
	 * Program the gmbus_freq based on the cdclk frequency.
5475
	 * BSpec erroneously claims we should aim for 4MHz, but
5439
	 * BSpec erroneously claims we should aim for 4MHz, but
5476
	 * in fact 1MHz is the correct frequency.
5440
	 * in fact 1MHz is the correct frequency.
5477
	 */
5441
	 */
5478
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5442
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5479
		/*
5443
		/*
5480
		 * Program the gmbus_freq based on the cdclk frequency.
5444
		 * Program the gmbus_freq based on the cdclk frequency.
5481
		 * BSpec erroneously claims we should aim for 4MHz, but
5445
		 * BSpec erroneously claims we should aim for 4MHz, but
5482
		 * in fact 1MHz is the correct frequency.
5446
		 * in fact 1MHz is the correct frequency.
5483
		 */
5447
		 */
5484
		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5448
		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5485
	}
5449
	}
5486
 
5450
 
5487
	if (dev_priv->max_cdclk_freq == 0)
5451
	if (dev_priv->max_cdclk_freq == 0)
5488
		intel_update_max_cdclk(dev);
5452
		intel_update_max_cdclk(dev);
5489
}
5453
}
5490
 
5454
 
5491
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5455
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5492
{
5456
{
5493
	struct drm_i915_private *dev_priv = dev->dev_private;
5457
	struct drm_i915_private *dev_priv = dev->dev_private;
5494
	uint32_t divider;
5458
	uint32_t divider;
5495
	uint32_t ratio;
5459
	uint32_t ratio;
5496
	uint32_t current_freq;
5460
	uint32_t current_freq;
5497
	int ret;
5461
	int ret;
5498
 
5462
 
5499
	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5463
	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5500
	switch (frequency) {
5464
	switch (frequency) {
5501
	case 144000:
5465
	case 144000:
5502
		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5466
		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5503
		ratio = BXT_DE_PLL_RATIO(60);
5467
		ratio = BXT_DE_PLL_RATIO(60);
5504
		break;
5468
		break;
5505
	case 288000:
5469
	case 288000:
5506
		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5470
		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5507
		ratio = BXT_DE_PLL_RATIO(60);
5471
		ratio = BXT_DE_PLL_RATIO(60);
5508
		break;
5472
		break;
5509
	case 384000:
5473
	case 384000:
5510
		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5474
		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5511
		ratio = BXT_DE_PLL_RATIO(60);
5475
		ratio = BXT_DE_PLL_RATIO(60);
5512
		break;
5476
		break;
5513
	case 576000:
5477
	case 576000:
5514
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5478
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5515
		ratio = BXT_DE_PLL_RATIO(60);
5479
		ratio = BXT_DE_PLL_RATIO(60);
5516
		break;
5480
		break;
5517
	case 624000:
5481
	case 624000:
5518
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5482
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5519
		ratio = BXT_DE_PLL_RATIO(65);
5483
		ratio = BXT_DE_PLL_RATIO(65);
5520
		break;
5484
		break;
5521
	case 19200:
5485
	case 19200:
5522
		/*
5486
		/*
5523
		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5487
		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5524
		 * to suppress GCC warning.
5488
		 * to suppress GCC warning.
5525
		 */
5489
		 */
5526
		ratio = 0;
5490
		ratio = 0;
5527
		divider = 0;
5491
		divider = 0;
5528
		break;
5492
		break;
5529
	default:
5493
	default:
5530
		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5494
		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5531
 
5495
 
5532
		return;
5496
		return;
5533
	}
5497
	}
5534
 
5498
 
5535
	mutex_lock(&dev_priv->rps.hw_lock);
5499
	mutex_lock(&dev_priv->rps.hw_lock);
5536
	/* Inform power controller of upcoming frequency change */
5500
	/* Inform power controller of upcoming frequency change */
5537
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5501
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5538
				      0x80000000);
5502
				      0x80000000);
5539
	mutex_unlock(&dev_priv->rps.hw_lock);
5503
	mutex_unlock(&dev_priv->rps.hw_lock);
5540
 
5504
 
5541
	if (ret) {
5505
	if (ret) {
5542
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5506
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5543
			  ret, frequency);
5507
			  ret, frequency);
5544
		return;
5508
		return;
5545
	}
5509
	}
5546
 
5510
 
5547
	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5511
	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5548
	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5512
	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5549
	current_freq = current_freq * 500 + 1000;
5513
	current_freq = current_freq * 500 + 1000;
5550
 
5514
 
5551
	/*
5515
	/*
5552
	 * DE PLL has to be disabled when
5516
	 * DE PLL has to be disabled when
5553
	 * - setting to 19.2MHz (bypass, PLL isn't used)
5517
	 * - setting to 19.2MHz (bypass, PLL isn't used)
5554
	 * - before setting to 624MHz (PLL needs toggling)
5518
	 * - before setting to 624MHz (PLL needs toggling)
5555
	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5519
	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5556
	 */
5520
	 */
5557
	if (frequency == 19200 || frequency == 624000 ||
5521
	if (frequency == 19200 || frequency == 624000 ||
5558
	    current_freq == 624000) {
5522
	    current_freq == 624000) {
5559
		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5523
		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5560
		/* Timeout 200us */
5524
		/* Timeout 200us */
5561
		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5525
		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5562
			     1))
5526
			     1))
5563
			DRM_ERROR("timout waiting for DE PLL unlock\n");
5527
			DRM_ERROR("timout waiting for DE PLL unlock\n");
5564
	}
5528
	}
5565
 
5529
 
5566
	if (frequency != 19200) {
5530
	if (frequency != 19200) {
5567
		uint32_t val;
5531
		uint32_t val;
5568
 
5532
 
5569
		val = I915_READ(BXT_DE_PLL_CTL);
5533
		val = I915_READ(BXT_DE_PLL_CTL);
5570
		val &= ~BXT_DE_PLL_RATIO_MASK;
5534
		val &= ~BXT_DE_PLL_RATIO_MASK;
5571
		val |= ratio;
5535
		val |= ratio;
5572
		I915_WRITE(BXT_DE_PLL_CTL, val);
5536
		I915_WRITE(BXT_DE_PLL_CTL, val);
5573
 
5537
 
5574
		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5538
		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5575
		/* Timeout 200us */
5539
		/* Timeout 200us */
5576
		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5540
		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5577
			DRM_ERROR("timeout waiting for DE PLL lock\n");
5541
			DRM_ERROR("timeout waiting for DE PLL lock\n");
5578
 
5542
 
5579
		val = I915_READ(CDCLK_CTL);
5543
		val = I915_READ(CDCLK_CTL);
5580
		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5544
		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5581
		val |= divider;
5545
		val |= divider;
5582
		/*
5546
		/*
5583
		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5547
		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5584
		 * enable otherwise.
5548
		 * enable otherwise.
5585
		 */
5549
		 */
5586
		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5550
		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5587
		if (frequency >= 500000)
5551
		if (frequency >= 500000)
5588
			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5552
			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5589
 
5553
 
5590
		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5554
		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5591
		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5555
		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5592
		val |= (frequency - 1000) / 500;
5556
		val |= (frequency - 1000) / 500;
5593
		I915_WRITE(CDCLK_CTL, val);
5557
		I915_WRITE(CDCLK_CTL, val);
5594
	}
5558
	}
5595
 
5559
 
5596
	mutex_lock(&dev_priv->rps.hw_lock);
5560
	mutex_lock(&dev_priv->rps.hw_lock);
5597
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5561
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5598
				      DIV_ROUND_UP(frequency, 25000));
5562
				      DIV_ROUND_UP(frequency, 25000));
5599
	mutex_unlock(&dev_priv->rps.hw_lock);
5563
	mutex_unlock(&dev_priv->rps.hw_lock);
5600
 
5564
 
5601
	if (ret) {
5565
	if (ret) {
5602
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5566
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5603
			  ret, frequency);
5567
			  ret, frequency);
5604
		return;
5568
		return;
5605
	}
5569
	}
5606
 
5570
 
5607
	intel_update_cdclk(dev);
5571
	intel_update_cdclk(dev);
5608
}
5572
}
5609
 
5573
 
5610
void broxton_init_cdclk(struct drm_device *dev)
5574
void broxton_init_cdclk(struct drm_device *dev)
5611
{
5575
{
5612
	struct drm_i915_private *dev_priv = dev->dev_private;
5576
	struct drm_i915_private *dev_priv = dev->dev_private;
5613
	uint32_t val;
5577
	uint32_t val;
5614
 
5578
 
5615
	/*
5579
	/*
5616
	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5580
	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5617
	 * or else the reset will hang because there is no PCH to respond.
5581
	 * or else the reset will hang because there is no PCH to respond.
5618
	 * Move the handshake programming to initialization sequence.
5582
	 * Move the handshake programming to initialization sequence.
5619
	 * Previously was left up to BIOS.
5583
	 * Previously was left up to BIOS.
5620
	 */
5584
	 */
5621
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5585
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5622
	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5586
	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5623
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5587
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5624
 
5588
 
5625
	/* Enable PG1 for cdclk */
5589
	/* Enable PG1 for cdclk */
5626
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5590
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5627
 
5591
 
5628
	/* check if cd clock is enabled */
5592
	/* check if cd clock is enabled */
5629
	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5593
	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5630
		DRM_DEBUG_KMS("Display already initialized\n");
5594
		DRM_DEBUG_KMS("Display already initialized\n");
5631
		return;
5595
		return;
5632
	}
5596
	}
5633
 
5597
 
5634
	/*
5598
	/*
5635
	 * FIXME:
5599
	 * FIXME:
5636
	 * - The initial CDCLK needs to be read from VBT.
5600
	 * - The initial CDCLK needs to be read from VBT.
5637
	 *   Need to make this change after VBT has changes for BXT.
5601
	 *   Need to make this change after VBT has changes for BXT.
5638
	 * - check if setting the max (or any) cdclk freq is really necessary
5602
	 * - check if setting the max (or any) cdclk freq is really necessary
5639
	 *   here, it belongs to modeset time
5603
	 *   here, it belongs to modeset time
5640
	 */
5604
	 */
5641
	broxton_set_cdclk(dev, 624000);
5605
	broxton_set_cdclk(dev, 624000);
5642
 
5606
 
5643
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5607
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5644
	POSTING_READ(DBUF_CTL);
5608
	POSTING_READ(DBUF_CTL);
5645
 
5609
 
5646
	udelay(10);
5610
	udelay(10);
5647
 
5611
 
5648
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5612
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5649
		DRM_ERROR("DBuf power enable timeout!\n");
5613
		DRM_ERROR("DBuf power enable timeout!\n");
5650
}
5614
}
5651
 
5615
 
5652
void broxton_uninit_cdclk(struct drm_device *dev)
5616
void broxton_uninit_cdclk(struct drm_device *dev)
5653
{
5617
{
5654
	struct drm_i915_private *dev_priv = dev->dev_private;
5618
	struct drm_i915_private *dev_priv = dev->dev_private;
5655
 
5619
 
5656
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5620
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5657
	POSTING_READ(DBUF_CTL);
5621
	POSTING_READ(DBUF_CTL);
5658
 
5622
 
5659
	udelay(10);
5623
	udelay(10);
5660
 
5624
 
5661
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5625
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5662
		DRM_ERROR("DBuf power disable timeout!\n");
5626
		DRM_ERROR("DBuf power disable timeout!\n");
5663
 
5627
 
5664
	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5628
	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5665
	broxton_set_cdclk(dev, 19200);
5629
	broxton_set_cdclk(dev, 19200);
5666
 
5630
 
5667
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5631
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5668
}
5632
}
5669
 
5633
 
5670
static const struct skl_cdclk_entry {
5634
static const struct skl_cdclk_entry {
5671
	unsigned int freq;
5635
	unsigned int freq;
5672
	unsigned int vco;
5636
	unsigned int vco;
5673
} skl_cdclk_frequencies[] = {
5637
} skl_cdclk_frequencies[] = {
5674
	{ .freq = 308570, .vco = 8640 },
5638
	{ .freq = 308570, .vco = 8640 },
5675
	{ .freq = 337500, .vco = 8100 },
5639
	{ .freq = 337500, .vco = 8100 },
5676
	{ .freq = 432000, .vco = 8640 },
5640
	{ .freq = 432000, .vco = 8640 },
5677
	{ .freq = 450000, .vco = 8100 },
5641
	{ .freq = 450000, .vco = 8100 },
5678
	{ .freq = 540000, .vco = 8100 },
5642
	{ .freq = 540000, .vco = 8100 },
5679
	{ .freq = 617140, .vco = 8640 },
5643
	{ .freq = 617140, .vco = 8640 },
5680
	{ .freq = 675000, .vco = 8100 },
5644
	{ .freq = 675000, .vco = 8100 },
5681
};
5645
};
5682
 
5646
 
5683
static unsigned int skl_cdclk_decimal(unsigned int freq)
5647
static unsigned int skl_cdclk_decimal(unsigned int freq)
5684
{
5648
{
5685
	return (freq - 1000) / 500;
5649
	return (freq - 1000) / 500;
5686
}
5650
}
5687
 
5651
 
5688
static unsigned int skl_cdclk_get_vco(unsigned int freq)
5652
static unsigned int skl_cdclk_get_vco(unsigned int freq)
5689
{
5653
{
5690
	unsigned int i;
5654
	unsigned int i;
5691
 
5655
 
5692
	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5656
	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5693
		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5657
		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5694
 
5658
 
5695
		if (e->freq == freq)
5659
		if (e->freq == freq)
5696
			return e->vco;
5660
			return e->vco;
5697
	}
5661
	}
5698
 
5662
 
5699
	return 8100;
5663
	return 8100;
5700
}
5664
}
5701
 
5665
 
5702
static void
5666
static void
5703
skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5667
skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5704
{
5668
{
5705
	unsigned int min_freq;
5669
	unsigned int min_freq;
5706
	u32 val;
5670
	u32 val;
5707
 
5671
 
5708
	/* select the minimum CDCLK before enabling DPLL 0 */
5672
	/* select the minimum CDCLK before enabling DPLL 0 */
5709
	val = I915_READ(CDCLK_CTL);
5673
	val = I915_READ(CDCLK_CTL);
5710
	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5674
	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5711
	val |= CDCLK_FREQ_337_308;
5675
	val |= CDCLK_FREQ_337_308;
5712
 
5676
 
5713
	if (required_vco == 8640)
5677
	if (required_vco == 8640)
5714
		min_freq = 308570;
5678
		min_freq = 308570;
5715
	else
5679
	else
5716
		min_freq = 337500;
5680
		min_freq = 337500;
5717
 
5681
 
5718
	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5682
	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5719
 
5683
 
5720
	I915_WRITE(CDCLK_CTL, val);
5684
	I915_WRITE(CDCLK_CTL, val);
5721
	POSTING_READ(CDCLK_CTL);
5685
	POSTING_READ(CDCLK_CTL);
5722
 
5686
 
5723
	/*
5687
	/*
5724
	 * We always enable DPLL0 with the lowest link rate possible, but still
5688
	 * We always enable DPLL0 with the lowest link rate possible, but still
5725
	 * taking into account the VCO required to operate the eDP panel at the
5689
	 * taking into account the VCO required to operate the eDP panel at the
5726
	 * desired frequency. The usual DP link rates operate with a VCO of
5690
	 * desired frequency. The usual DP link rates operate with a VCO of
5727
	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5691
	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5728
	 * The modeset code is responsible for the selection of the exact link
5692
	 * The modeset code is responsible for the selection of the exact link
5729
	 * rate later on, with the constraint of choosing a frequency that
5693
	 * rate later on, with the constraint of choosing a frequency that
5730
	 * works with required_vco.
5694
	 * works with required_vco.
5731
	 */
5695
	 */
5732
	val = I915_READ(DPLL_CTRL1);
5696
	val = I915_READ(DPLL_CTRL1);
5733
 
5697
 
5734
	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5698
	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5735
		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5699
		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5736
	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5700
	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5737
	if (required_vco == 8640)
5701
	if (required_vco == 8640)
5738
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5702
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5739
					    SKL_DPLL0);
5703
					    SKL_DPLL0);
5740
	else
5704
	else
5741
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5705
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5742
					    SKL_DPLL0);
5706
					    SKL_DPLL0);
5743
 
5707
 
5744
	I915_WRITE(DPLL_CTRL1, val);
5708
	I915_WRITE(DPLL_CTRL1, val);
5745
	POSTING_READ(DPLL_CTRL1);
5709
	POSTING_READ(DPLL_CTRL1);
5746
 
5710
 
5747
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5711
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5748
 
5712
 
5749
	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5713
	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5750
		DRM_ERROR("DPLL0 not locked\n");
5714
		DRM_ERROR("DPLL0 not locked\n");
5751
}
5715
}
5752
 
5716
 
5753
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5717
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5754
{
5718
{
5755
	int ret;
5719
	int ret;
5756
	u32 val;
5720
	u32 val;
5757
 
5721
 
5758
	/* inform PCU we want to change CDCLK */
5722
	/* inform PCU we want to change CDCLK */
5759
	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5723
	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5760
	mutex_lock(&dev_priv->rps.hw_lock);
5724
	mutex_lock(&dev_priv->rps.hw_lock);
5761
	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5725
	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5762
	mutex_unlock(&dev_priv->rps.hw_lock);
5726
	mutex_unlock(&dev_priv->rps.hw_lock);
5763
 
5727
 
5764
	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5728
	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5765
}
5729
}
5766
 
5730
 
5767
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5731
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5768
{
5732
{
5769
	unsigned int i;
5733
	unsigned int i;
5770
 
5734
 
5771
	for (i = 0; i < 15; i++) {
5735
	for (i = 0; i < 15; i++) {
5772
		if (skl_cdclk_pcu_ready(dev_priv))
5736
		if (skl_cdclk_pcu_ready(dev_priv))
5773
			return true;
5737
			return true;
5774
		udelay(10);
5738
		udelay(10);
5775
	}
5739
	}
5776
 
5740
 
5777
	return false;
5741
	return false;
5778
}
5742
}
5779
 
5743
 
5780
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5744
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5781
{
5745
{
5782
	struct drm_device *dev = dev_priv->dev;
5746
	struct drm_device *dev = dev_priv->dev;
5783
	u32 freq_select, pcu_ack;
5747
	u32 freq_select, pcu_ack;
5784
 
5748
 
5785
	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5749
	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5786
 
5750
 
5787
	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5751
	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5788
		DRM_ERROR("failed to inform PCU about cdclk change\n");
5752
		DRM_ERROR("failed to inform PCU about cdclk change\n");
5789
		return;
5753
		return;
5790
	}
5754
	}
5791
 
5755
 
5792
	/* set CDCLK_CTL */
5756
	/* set CDCLK_CTL */
5793
	switch(freq) {
5757
	switch(freq) {
5794
	case 450000:
5758
	case 450000:
5795
	case 432000:
5759
	case 432000:
5796
		freq_select = CDCLK_FREQ_450_432;
5760
		freq_select = CDCLK_FREQ_450_432;
5797
		pcu_ack = 1;
5761
		pcu_ack = 1;
5798
		break;
5762
		break;
5799
	case 540000:
5763
	case 540000:
5800
		freq_select = CDCLK_FREQ_540;
5764
		freq_select = CDCLK_FREQ_540;
5801
		pcu_ack = 2;
5765
		pcu_ack = 2;
5802
		break;
5766
		break;
5803
	case 308570:
5767
	case 308570:
5804
	case 337500:
5768
	case 337500:
5805
	default:
5769
	default:
5806
		freq_select = CDCLK_FREQ_337_308;
5770
		freq_select = CDCLK_FREQ_337_308;
5807
		pcu_ack = 0;
5771
		pcu_ack = 0;
5808
		break;
5772
		break;
5809
	case 617140:
5773
	case 617140:
5810
	case 675000:
5774
	case 675000:
5811
		freq_select = CDCLK_FREQ_675_617;
5775
		freq_select = CDCLK_FREQ_675_617;
5812
		pcu_ack = 3;
5776
		pcu_ack = 3;
5813
		break;
5777
		break;
5814
	}
5778
	}
5815
 
5779
 
5816
	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5780
	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5817
	POSTING_READ(CDCLK_CTL);
5781
	POSTING_READ(CDCLK_CTL);
5818
 
5782
 
5819
	/* inform PCU of the change */
5783
	/* inform PCU of the change */
5820
	mutex_lock(&dev_priv->rps.hw_lock);
5784
	mutex_lock(&dev_priv->rps.hw_lock);
5821
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5785
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5822
	mutex_unlock(&dev_priv->rps.hw_lock);
5786
	mutex_unlock(&dev_priv->rps.hw_lock);
5823
 
5787
 
5824
	intel_update_cdclk(dev);
5788
	intel_update_cdclk(dev);
5825
}
5789
}
5826
 
5790
 
5827
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5791
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5828
{
5792
{
5829
	/* disable DBUF power */
5793
	/* disable DBUF power */
5830
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5794
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5831
	POSTING_READ(DBUF_CTL);
5795
	POSTING_READ(DBUF_CTL);
5832
 
5796
 
5833
	udelay(10);
5797
	udelay(10);
5834
 
5798
 
5835
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5799
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5836
		DRM_ERROR("DBuf power disable timeout\n");
5800
		DRM_ERROR("DBuf power disable timeout\n");
5837
 
5801
 
5838
		/* disable DPLL0 */
5802
	/* disable DPLL0 */
5839
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5803
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5840
		if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5804
	if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5841
			DRM_ERROR("Couldn't disable DPLL0\n");
5805
		DRM_ERROR("Couldn't disable DPLL0\n");
5842
	}
5806
}
5843
 
5807
 
5844
void skl_init_cdclk(struct drm_i915_private *dev_priv)
5808
void skl_init_cdclk(struct drm_i915_private *dev_priv)
5845
{
5809
{
5846
	unsigned int required_vco;
5810
	unsigned int required_vco;
5847
 
5811
 
5848
	/* DPLL0 not enabled (happens on early BIOS versions) */
5812
	/* DPLL0 not enabled (happens on early BIOS versions) */
5849
	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5813
	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5850
		/* enable DPLL0 */
5814
		/* enable DPLL0 */
5851
		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5815
		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5852
		skl_dpll0_enable(dev_priv, required_vco);
5816
		skl_dpll0_enable(dev_priv, required_vco);
5853
	}
5817
	}
5854
 
5818
 
5855
	/* set CDCLK to the frequency the BIOS chose */
5819
	/* set CDCLK to the frequency the BIOS chose */
5856
	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5820
	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5857
 
5821
 
5858
	/* enable DBUF power */
5822
	/* enable DBUF power */
5859
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5823
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5860
	POSTING_READ(DBUF_CTL);
5824
	POSTING_READ(DBUF_CTL);
5861
 
5825
 
5862
	udelay(10);
5826
	udelay(10);
5863
 
5827
 
5864
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5828
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5865
		DRM_ERROR("DBuf power enable timeout\n");
5829
		DRM_ERROR("DBuf power enable timeout\n");
5866
}
5830
}
5867
 
5831
 
5868
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5832
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5869
{
5833
{
5870
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5834
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5871
	uint32_t cdctl = I915_READ(CDCLK_CTL);
5835
	uint32_t cdctl = I915_READ(CDCLK_CTL);
5872
	int freq = dev_priv->skl_boot_cdclk;
5836
	int freq = dev_priv->skl_boot_cdclk;
5873
 
5837
 
5874
	/*
5838
	/*
5875
	 * check if the pre-os intialized the display
5839
	 * check if the pre-os intialized the display
5876
	 * There is SWF18 scratchpad register defined which is set by the
5840
	 * There is SWF18 scratchpad register defined which is set by the
5877
	 * pre-os which can be used by the OS drivers to check the status
5841
	 * pre-os which can be used by the OS drivers to check the status
5878
	 */
5842
	 */
5879
	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5843
	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5880
		goto sanitize;
5844
		goto sanitize;
5881
 
5845
 
5882
	/* Is PLL enabled and locked ? */
5846
	/* Is PLL enabled and locked ? */
5883
	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5847
	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5884
		goto sanitize;
5848
		goto sanitize;
5885
 
5849
 
5886
	/* DPLL okay; verify the cdclock
5850
	/* DPLL okay; verify the cdclock
5887
	 *
5851
	 *
5888
	 * Noticed in some instances that the freq selection is correct but
5852
	 * Noticed in some instances that the freq selection is correct but
5889
	 * decimal part is programmed wrong from BIOS where pre-os does not
5853
	 * decimal part is programmed wrong from BIOS where pre-os does not
5890
	 * enable display. Verify the same as well.
5854
	 * enable display. Verify the same as well.
5891
	 */
5855
	 */
5892
	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5856
	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5893
		/* All well; nothing to sanitize */
5857
		/* All well; nothing to sanitize */
5894
		return false;
5858
		return false;
5895
sanitize:
5859
sanitize:
5896
	/*
5860
	/*
5897
	 * As of now initialize with max cdclk till
5861
	 * As of now initialize with max cdclk till
5898
	 * we get dynamic cdclk support
5862
	 * we get dynamic cdclk support
5899
	 * */
5863
	 * */
5900
	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5864
	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5901
	skl_init_cdclk(dev_priv);
5865
	skl_init_cdclk(dev_priv);
5902
 
5866
 
5903
	/* we did have to sanitize */
5867
	/* we did have to sanitize */
5904
	return true;
5868
	return true;
5905
}
5869
}
5906
 
5870
 
5907
/* Adjust CDclk dividers to allow high res or save power if possible */
5871
/* Adjust CDclk dividers to allow high res or save power if possible */
5908
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5872
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5909
{
5873
{
5910
	struct drm_i915_private *dev_priv = dev->dev_private;
5874
	struct drm_i915_private *dev_priv = dev->dev_private;
5911
	u32 val, cmd;
5875
	u32 val, cmd;
5912
 
5876
 
5913
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5877
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5914
					!= dev_priv->cdclk_freq);
5878
					!= dev_priv->cdclk_freq);
5915
 
5879
 
5916
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5880
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5917
		cmd = 2;
5881
		cmd = 2;
5918
	else if (cdclk == 266667)
5882
	else if (cdclk == 266667)
5919
		cmd = 1;
5883
		cmd = 1;
5920
	else
5884
	else
5921
		cmd = 0;
5885
		cmd = 0;
5922
 
5886
 
5923
	mutex_lock(&dev_priv->rps.hw_lock);
5887
	mutex_lock(&dev_priv->rps.hw_lock);
5924
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5888
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5925
	val &= ~DSPFREQGUAR_MASK;
5889
	val &= ~DSPFREQGUAR_MASK;
5926
	val |= (cmd << DSPFREQGUAR_SHIFT);
5890
	val |= (cmd << DSPFREQGUAR_SHIFT);
5927
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5891
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5928
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5892
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5929
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5893
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5930
		     50)) {
5894
		     50)) {
5931
		DRM_ERROR("timed out waiting for CDclk change\n");
5895
		DRM_ERROR("timed out waiting for CDclk change\n");
5932
	}
5896
	}
5933
	mutex_unlock(&dev_priv->rps.hw_lock);
5897
	mutex_unlock(&dev_priv->rps.hw_lock);
5934
 
5898
 
5935
	mutex_lock(&dev_priv->sb_lock);
5899
	mutex_lock(&dev_priv->sb_lock);
5936
 
5900
 
5937
	if (cdclk == 400000) {
5901
	if (cdclk == 400000) {
5938
		u32 divider;
5902
		u32 divider;
5939
 
5903
 
5940
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5904
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5941
 
5905
 
5942
		/* adjust cdclk divider */
5906
		/* adjust cdclk divider */
5943
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5907
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5944
		val &= ~CCK_FREQUENCY_VALUES;
5908
		val &= ~CCK_FREQUENCY_VALUES;
5945
		val |= divider;
5909
		val |= divider;
5946
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5910
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5947
 
5911
 
5948
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5912
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5949
			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5913
			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5950
			     50))
5914
			     50))
5951
			DRM_ERROR("timed out waiting for CDclk change\n");
5915
			DRM_ERROR("timed out waiting for CDclk change\n");
5952
	}
5916
	}
5953
 
5917
 
5954
	/* adjust self-refresh exit latency value */
5918
	/* adjust self-refresh exit latency value */
5955
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5919
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5956
	val &= ~0x7f;
5920
	val &= ~0x7f;
5957
 
5921
 
5958
	/*
5922
	/*
5959
	 * For high bandwidth configs, we set a higher latency in the bunit
5923
	 * For high bandwidth configs, we set a higher latency in the bunit
5960
	 * so that the core display fetch happens in time to avoid underruns.
5924
	 * so that the core display fetch happens in time to avoid underruns.
5961
	 */
5925
	 */
5962
	if (cdclk == 400000)
5926
	if (cdclk == 400000)
5963
		val |= 4500 / 250; /* 4.5 usec */
5927
		val |= 4500 / 250; /* 4.5 usec */
5964
	else
5928
	else
5965
		val |= 3000 / 250; /* 3.0 usec */
5929
		val |= 3000 / 250; /* 3.0 usec */
5966
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5930
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5967
 
5931
 
5968
	mutex_unlock(&dev_priv->sb_lock);
5932
	mutex_unlock(&dev_priv->sb_lock);
5969
 
5933
 
5970
	intel_update_cdclk(dev);
5934
	intel_update_cdclk(dev);
5971
}
5935
}
5972
 
5936
 
5973
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5937
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5974
{
5938
{
5975
	struct drm_i915_private *dev_priv = dev->dev_private;
5939
	struct drm_i915_private *dev_priv = dev->dev_private;
5976
	u32 val, cmd;
5940
	u32 val, cmd;
5977
 
5941
 
5978
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5942
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5979
						!= dev_priv->cdclk_freq);
5943
						!= dev_priv->cdclk_freq);
5980
 
5944
 
5981
	switch (cdclk) {
5945
	switch (cdclk) {
5982
	case 333333:
5946
	case 333333:
5983
	case 320000:
5947
	case 320000:
5984
	case 266667:
5948
	case 266667:
5985
	case 200000:
5949
	case 200000:
5986
		break;
5950
		break;
5987
	default:
5951
	default:
5988
		MISSING_CASE(cdclk);
5952
		MISSING_CASE(cdclk);
5989
		return;
5953
		return;
5990
	}
5954
	}
5991
 
5955
 
5992
	/*
5956
	/*
5993
	 * Specs are full of misinformation, but testing on actual
5957
	 * Specs are full of misinformation, but testing on actual
5994
	 * hardware has shown that we just need to write the desired
5958
	 * hardware has shown that we just need to write the desired
5995
	 * CCK divider into the Punit register.
5959
	 * CCK divider into the Punit register.
5996
	 */
5960
	 */
5997
	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5961
	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5998
 
5962
 
5999
	mutex_lock(&dev_priv->rps.hw_lock);
5963
	mutex_lock(&dev_priv->rps.hw_lock);
6000
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5964
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6001
	val &= ~DSPFREQGUAR_MASK_CHV;
5965
	val &= ~DSPFREQGUAR_MASK_CHV;
6002
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5966
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
6003
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5967
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
6004
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5968
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
6005
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5969
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
6006
		     50)) {
5970
		     50)) {
6007
		DRM_ERROR("timed out waiting for CDclk change\n");
5971
		DRM_ERROR("timed out waiting for CDclk change\n");
6008
	}
5972
	}
6009
	mutex_unlock(&dev_priv->rps.hw_lock);
5973
	mutex_unlock(&dev_priv->rps.hw_lock);
6010
 
5974
 
6011
	intel_update_cdclk(dev);
5975
	intel_update_cdclk(dev);
6012
}
5976
}
6013
 
5977
 
6014
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5978
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
6015
				 int max_pixclk)
5979
				 int max_pixclk)
6016
{
5980
{
6017
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5981
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
6018
	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5982
	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
6019
 
5983
 
6020
	/*
5984
	/*
6021
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5985
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
6022
	 *   200MHz
5986
	 *   200MHz
6023
	 *   267MHz
5987
	 *   267MHz
6024
	 *   320/333MHz (depends on HPLL freq)
5988
	 *   320/333MHz (depends on HPLL freq)
6025
	 *   400MHz (VLV only)
5989
	 *   400MHz (VLV only)
6026
	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5990
	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
6027
	 * of the lower bin and adjust if needed.
5991
	 * of the lower bin and adjust if needed.
6028
	 *
5992
	 *
6029
	 * We seem to get an unstable or solid color picture at 200MHz.
5993
	 * We seem to get an unstable or solid color picture at 200MHz.
6030
	 * Not sure what's wrong. For now use 200MHz only when all pipes
5994
	 * Not sure what's wrong. For now use 200MHz only when all pipes
6031
	 * are off.
5995
	 * are off.
6032
	 */
5996
	 */
6033
	if (!IS_CHERRYVIEW(dev_priv) &&
5997
	if (!IS_CHERRYVIEW(dev_priv) &&
6034
	    max_pixclk > freq_320*limit/100)
5998
	    max_pixclk > freq_320*limit/100)
6035
		return 400000;
5999
		return 400000;
6036
	else if (max_pixclk > 266667*limit/100)
6000
	else if (max_pixclk > 266667*limit/100)
6037
		return freq_320;
6001
		return freq_320;
6038
	else if (max_pixclk > 0)
6002
	else if (max_pixclk > 0)
6039
		return 266667;
6003
		return 266667;
6040
	else
6004
	else
6041
		return 200000;
6005
		return 200000;
6042
}
6006
}
6043
 
6007
 
6044
static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6008
static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6045
			      int max_pixclk)
6009
			      int max_pixclk)
6046
{
6010
{
6047
	/*
6011
	/*
6048
	 * FIXME:
6012
	 * FIXME:
6049
	 * - remove the guardband, it's not needed on BXT
6013
	 * - remove the guardband, it's not needed on BXT
6050
	 * - set 19.2MHz bypass frequency if there are no active pipes
6014
	 * - set 19.2MHz bypass frequency if there are no active pipes
6051
	 */
6015
	 */
6052
	if (max_pixclk > 576000*9/10)
6016
	if (max_pixclk > 576000*9/10)
6053
		return 624000;
6017
		return 624000;
6054
	else if (max_pixclk > 384000*9/10)
6018
	else if (max_pixclk > 384000*9/10)
6055
		return 576000;
6019
		return 576000;
6056
	else if (max_pixclk > 288000*9/10)
6020
	else if (max_pixclk > 288000*9/10)
6057
		return 384000;
6021
		return 384000;
6058
	else if (max_pixclk > 144000*9/10)
6022
	else if (max_pixclk > 144000*9/10)
6059
		return 288000;
6023
		return 288000;
6060
	else
6024
	else
6061
		return 144000;
6025
		return 144000;
6062
}
6026
}
6063
 
6027
 
6064
/* Compute the max pixel clock for new configuration. Uses atomic state if
-
 
6065
 * that's non-NULL, look at current state otherwise. */
6028
/* Compute the max pixel clock for new configuration. */
6066
static int intel_mode_max_pixclk(struct drm_device *dev,
6029
static int intel_mode_max_pixclk(struct drm_device *dev,
6067
				 struct drm_atomic_state *state)
6030
				 struct drm_atomic_state *state)
6068
{
6031
{
-
 
6032
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
 
6033
	struct drm_i915_private *dev_priv = dev->dev_private;
6069
	struct intel_crtc *intel_crtc;
6034
	struct drm_crtc *crtc;
6070
	struct intel_crtc_state *crtc_state;
6035
	struct drm_crtc_state *crtc_state;
6071
	int max_pixclk = 0;
6036
	unsigned max_pixclk = 0, i;
-
 
6037
	enum pipe pipe;
6072
 
-
 
6073
	for_each_intel_crtc(dev, intel_crtc) {
6038
 
6074
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6039
	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6075
		if (IS_ERR(crtc_state))
-
 
6076
			return PTR_ERR(crtc_state);
6040
	       sizeof(intel_state->min_pixclk));
6077
 
6041
 
6078
		if (!crtc_state->base.enable)
6042
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
6079
			continue;
6043
		int pixclk = 0;
-
 
6044
 
-
 
6045
		if (crtc_state->enable)
6080
 
6046
			pixclk = crtc_state->adjusted_mode.crtc_clock;
-
 
6047
 
-
 
6048
		intel_state->min_pixclk[i] = pixclk;
-
 
6049
	}
6081
		max_pixclk = max(max_pixclk,
6050
 
6082
				 crtc_state->base.adjusted_mode.crtc_clock);
6051
	for_each_pipe(dev_priv, pipe)
6083
	}
6052
		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6084
 
6053
 
6085
	return max_pixclk;
6054
	return max_pixclk;
6086
}
6055
}
6087
 
6056
 
6088
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6057
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6089
{
6058
{
6090
	struct drm_device *dev = state->dev;
6059
	struct drm_device *dev = state->dev;
6091
	struct drm_i915_private *dev_priv = dev->dev_private;
6060
	struct drm_i915_private *dev_priv = dev->dev_private;
6092
	int max_pixclk = intel_mode_max_pixclk(dev, state);
6061
	int max_pixclk = intel_mode_max_pixclk(dev, state);
-
 
6062
	struct intel_atomic_state *intel_state =
-
 
6063
		to_intel_atomic_state(state);
6093
 
6064
 
6094
	if (max_pixclk < 0)
6065
	if (max_pixclk < 0)
6095
		return max_pixclk;
6066
		return max_pixclk;
6096
 
6067
 
6097
	to_intel_atomic_state(state)->cdclk =
6068
	intel_state->cdclk = intel_state->dev_cdclk =
-
 
6069
		valleyview_calc_cdclk(dev_priv, max_pixclk);
-
 
6070
 
-
 
6071
	if (!intel_state->active_crtcs)
6098
		valleyview_calc_cdclk(dev_priv, max_pixclk);
6072
		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6099
 
6073
 
6100
	return 0;
6074
	return 0;
6101
}
6075
}
6102
 
6076
 
6103
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6077
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6104
{
6078
{
6105
	struct drm_device *dev = state->dev;
6079
	struct drm_device *dev = state->dev;
6106
	struct drm_i915_private *dev_priv = dev->dev_private;
6080
	struct drm_i915_private *dev_priv = dev->dev_private;
6107
	int max_pixclk = intel_mode_max_pixclk(dev, state);
6081
	int max_pixclk = intel_mode_max_pixclk(dev, state);
-
 
6082
	struct intel_atomic_state *intel_state =
-
 
6083
		to_intel_atomic_state(state);
6108
 
6084
 
6109
	if (max_pixclk < 0)
6085
	if (max_pixclk < 0)
6110
		return max_pixclk;
6086
		return max_pixclk;
6111
 
6087
 
6112
	to_intel_atomic_state(state)->cdclk =
6088
	intel_state->cdclk = intel_state->dev_cdclk =
-
 
6089
		broxton_calc_cdclk(dev_priv, max_pixclk);
-
 
6090
 
-
 
6091
	if (!intel_state->active_crtcs)
6113
		broxton_calc_cdclk(dev_priv, max_pixclk);
6092
		intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
6114
 
6093
 
6115
	return 0;
6094
	return 0;
6116
}
6095
}
6117
 
6096
 
6118
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6097
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6119
{
6098
{
6120
	unsigned int credits, default_credits;
6099
	unsigned int credits, default_credits;
6121
 
6100
 
6122
	if (IS_CHERRYVIEW(dev_priv))
6101
	if (IS_CHERRYVIEW(dev_priv))
6123
		default_credits = PFI_CREDIT(12);
6102
		default_credits = PFI_CREDIT(12);
6124
	else
6103
	else
6125
		default_credits = PFI_CREDIT(8);
6104
		default_credits = PFI_CREDIT(8);
6126
 
6105
 
6127
	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6106
	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6128
		/* CHV suggested value is 31 or 63 */
6107
		/* CHV suggested value is 31 or 63 */
6129
		if (IS_CHERRYVIEW(dev_priv))
6108
		if (IS_CHERRYVIEW(dev_priv))
6130
			credits = PFI_CREDIT_63;
6109
			credits = PFI_CREDIT_63;
6131
		else
6110
		else
6132
			credits = PFI_CREDIT(15);
6111
			credits = PFI_CREDIT(15);
6133
	} else {
6112
	} else {
6134
		credits = default_credits;
6113
		credits = default_credits;
6135
	}
6114
	}
6136
 
6115
 
6137
	/*
6116
	/*
6138
	 * WA - write default credits before re-programming
6117
	 * WA - write default credits before re-programming
6139
	 * FIXME: should we also set the resend bit here?
6118
	 * FIXME: should we also set the resend bit here?
6140
	 */
6119
	 */
6141
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6120
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6142
		   default_credits);
6121
		   default_credits);
6143
 
6122
 
6144
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6123
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6145
		   credits | PFI_CREDIT_RESEND);
6124
		   credits | PFI_CREDIT_RESEND);
6146
 
6125
 
6147
	/*
6126
	/*
6148
	 * FIXME is this guaranteed to clear
6127
	 * FIXME is this guaranteed to clear
6149
	 * immediately or should we poll for it?
6128
	 * immediately or should we poll for it?
6150
	 */
6129
	 */
6151
	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6130
	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6152
}
6131
}
6153
 
6132
 
6154
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6133
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6155
{
6134
{
6156
	struct drm_device *dev = old_state->dev;
6135
	struct drm_device *dev = old_state->dev;
6157
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
-
 
6158
	struct drm_i915_private *dev_priv = dev->dev_private;
6136
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6137
	struct intel_atomic_state *old_intel_state =
-
 
6138
		to_intel_atomic_state(old_state);
-
 
6139
	unsigned req_cdclk = old_intel_state->dev_cdclk;
6159
 
6140
 
6160
	/*
6141
	/*
6161
	 * FIXME: We can end up here with all power domains off, yet
6142
	 * FIXME: We can end up here with all power domains off, yet
6162
	 * with a CDCLK frequency other than the minimum. To account
6143
	 * with a CDCLK frequency other than the minimum. To account
6163
	 * for this take the PIPE-A power domain, which covers the HW
6144
	 * for this take the PIPE-A power domain, which covers the HW
6164
	 * blocks needed for the following programming. This can be
6145
	 * blocks needed for the following programming. This can be
6165
	 * removed once it's guaranteed that we get here either with
6146
	 * removed once it's guaranteed that we get here either with
6166
	 * the minimum CDCLK set, or the required power domains
6147
	 * the minimum CDCLK set, or the required power domains
6167
	 * enabled.
6148
	 * enabled.
6168
	 */
6149
	 */
6169
	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6150
	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6170
 
6151
 
6171
	if (IS_CHERRYVIEW(dev))
6152
	if (IS_CHERRYVIEW(dev))
6172
		cherryview_set_cdclk(dev, req_cdclk);
6153
		cherryview_set_cdclk(dev, req_cdclk);
6173
	else
6154
	else
6174
		valleyview_set_cdclk(dev, req_cdclk);
6155
		valleyview_set_cdclk(dev, req_cdclk);
6175
 
6156
 
6176
	vlv_program_pfi_credits(dev_priv);
6157
	vlv_program_pfi_credits(dev_priv);
6177
 
6158
 
6178
	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6159
	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6179
}
6160
}
6180
 
6161
 
6181
static void valleyview_crtc_enable(struct drm_crtc *crtc)
6162
static void valleyview_crtc_enable(struct drm_crtc *crtc)
6182
{
6163
{
6183
	struct drm_device *dev = crtc->dev;
6164
	struct drm_device *dev = crtc->dev;
6184
	struct drm_i915_private *dev_priv = to_i915(dev);
6165
	struct drm_i915_private *dev_priv = to_i915(dev);
6185
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6186
	struct intel_encoder *encoder;
6167
	struct intel_encoder *encoder;
6187
	int pipe = intel_crtc->pipe;
6168
	int pipe = intel_crtc->pipe;
6188
 
6169
 
6189
	if (WARN_ON(intel_crtc->active))
6170
	if (WARN_ON(intel_crtc->active))
6190
		return;
6171
		return;
6191
 
6172
 
6192
	if (intel_crtc->config->has_dp_encoder)
6173
	if (intel_crtc->config->has_dp_encoder)
6193
		intel_dp_set_m_n(intel_crtc, M1_N1);
6174
		intel_dp_set_m_n(intel_crtc, M1_N1);
6194
 
6175
 
6195
	intel_set_pipe_timings(intel_crtc);
6176
	intel_set_pipe_timings(intel_crtc);
6196
 
6177
 
6197
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6178
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6198
		struct drm_i915_private *dev_priv = dev->dev_private;
6179
		struct drm_i915_private *dev_priv = dev->dev_private;
6199
 
6180
 
6200
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6181
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6201
		I915_WRITE(CHV_CANVAS(pipe), 0);
6182
		I915_WRITE(CHV_CANVAS(pipe), 0);
6202
	}
6183
	}
6203
 
6184
 
6204
	i9xx_set_pipeconf(intel_crtc);
6185
	i9xx_set_pipeconf(intel_crtc);
6205
 
6186
 
6206
	intel_crtc->active = true;
6187
	intel_crtc->active = true;
6207
 
6188
 
6208
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6189
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6209
 
6190
 
6210
	for_each_encoder_on_crtc(dev, crtc, encoder)
6191
	for_each_encoder_on_crtc(dev, crtc, encoder)
6211
		if (encoder->pre_pll_enable)
6192
		if (encoder->pre_pll_enable)
6212
			encoder->pre_pll_enable(encoder);
6193
			encoder->pre_pll_enable(encoder);
6213
 
6194
 
6214
	if (!intel_crtc->config->has_dsi_encoder) {
6195
	if (!intel_crtc->config->has_dsi_encoder) {
6215
		if (IS_CHERRYVIEW(dev)) {
6196
		if (IS_CHERRYVIEW(dev)) {
6216
			chv_prepare_pll(intel_crtc, intel_crtc->config);
6197
			chv_prepare_pll(intel_crtc, intel_crtc->config);
6217
			chv_enable_pll(intel_crtc, intel_crtc->config);
6198
			chv_enable_pll(intel_crtc, intel_crtc->config);
6218
		} else {
6199
		} else {
6219
			vlv_prepare_pll(intel_crtc, intel_crtc->config);
6200
			vlv_prepare_pll(intel_crtc, intel_crtc->config);
6220
			vlv_enable_pll(intel_crtc, intel_crtc->config);
6201
			vlv_enable_pll(intel_crtc, intel_crtc->config);
6221
		}
6202
		}
6222
	}
6203
	}
6223
 
6204
 
6224
	for_each_encoder_on_crtc(dev, crtc, encoder)
6205
	for_each_encoder_on_crtc(dev, crtc, encoder)
6225
		if (encoder->pre_enable)
6206
		if (encoder->pre_enable)
6226
			encoder->pre_enable(encoder);
6207
			encoder->pre_enable(encoder);
6227
 
6208
 
6228
	i9xx_pfit_enable(intel_crtc);
6209
	i9xx_pfit_enable(intel_crtc);
6229
 
6210
 
6230
	intel_crtc_load_lut(crtc);
6211
	intel_crtc_load_lut(crtc);
6231
 
6212
 
6232
	intel_update_watermarks(crtc);
6213
	intel_update_watermarks(crtc);
6233
	intel_enable_pipe(intel_crtc);
6214
	intel_enable_pipe(intel_crtc);
6234
 
6215
 
6235
	assert_vblank_disabled(crtc);
6216
	assert_vblank_disabled(crtc);
6236
	drm_crtc_vblank_on(crtc);
6217
	drm_crtc_vblank_on(crtc);
6237
 
6218
 
6238
	for_each_encoder_on_crtc(dev, crtc, encoder)
6219
	for_each_encoder_on_crtc(dev, crtc, encoder)
6239
		encoder->enable(encoder);
6220
		encoder->enable(encoder);
6240
}
6221
}
6241
 
6222
 
6242
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6223
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6243
{
6224
{
6244
	struct drm_device *dev = crtc->base.dev;
6225
	struct drm_device *dev = crtc->base.dev;
6245
	struct drm_i915_private *dev_priv = dev->dev_private;
6226
	struct drm_i915_private *dev_priv = dev->dev_private;
6246
 
6227
 
6247
	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6228
	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6248
	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6229
	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6249
}
6230
}
6250
 
6231
 
6251
static void i9xx_crtc_enable(struct drm_crtc *crtc)
6232
static void i9xx_crtc_enable(struct drm_crtc *crtc)
6252
{
6233
{
6253
	struct drm_device *dev = crtc->dev;
6234
	struct drm_device *dev = crtc->dev;
6254
	struct drm_i915_private *dev_priv = to_i915(dev);
6235
	struct drm_i915_private *dev_priv = to_i915(dev);
6255
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6236
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6256
	struct intel_encoder *encoder;
6237
	struct intel_encoder *encoder;
6257
	int pipe = intel_crtc->pipe;
6238
	int pipe = intel_crtc->pipe;
6258
 
6239
 
6259
	if (WARN_ON(intel_crtc->active))
6240
	if (WARN_ON(intel_crtc->active))
6260
		return;
6241
		return;
6261
 
6242
 
6262
	i9xx_set_pll_dividers(intel_crtc);
6243
	i9xx_set_pll_dividers(intel_crtc);
6263
 
6244
 
6264
	if (intel_crtc->config->has_dp_encoder)
6245
	if (intel_crtc->config->has_dp_encoder)
6265
		intel_dp_set_m_n(intel_crtc, M1_N1);
6246
		intel_dp_set_m_n(intel_crtc, M1_N1);
6266
 
6247
 
6267
	intel_set_pipe_timings(intel_crtc);
6248
	intel_set_pipe_timings(intel_crtc);
6268
 
6249
 
6269
	i9xx_set_pipeconf(intel_crtc);
6250
	i9xx_set_pipeconf(intel_crtc);
6270
 
6251
 
6271
	intel_crtc->active = true;
6252
	intel_crtc->active = true;
6272
 
6253
 
6273
	if (!IS_GEN2(dev))
6254
	if (!IS_GEN2(dev))
6274
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6255
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6275
 
6256
 
6276
	for_each_encoder_on_crtc(dev, crtc, encoder)
6257
	for_each_encoder_on_crtc(dev, crtc, encoder)
6277
		if (encoder->pre_enable)
6258
		if (encoder->pre_enable)
6278
			encoder->pre_enable(encoder);
6259
			encoder->pre_enable(encoder);
6279
 
6260
 
6280
	i9xx_enable_pll(intel_crtc);
6261
	i9xx_enable_pll(intel_crtc);
6281
 
6262
 
6282
	i9xx_pfit_enable(intel_crtc);
6263
	i9xx_pfit_enable(intel_crtc);
6283
 
6264
 
6284
	intel_crtc_load_lut(crtc);
6265
	intel_crtc_load_lut(crtc);
6285
 
6266
 
6286
	intel_update_watermarks(crtc);
6267
	intel_update_watermarks(crtc);
6287
	intel_enable_pipe(intel_crtc);
6268
	intel_enable_pipe(intel_crtc);
6288
 
6269
 
6289
	assert_vblank_disabled(crtc);
6270
	assert_vblank_disabled(crtc);
6290
	drm_crtc_vblank_on(crtc);
6271
	drm_crtc_vblank_on(crtc);
6291
 
6272
 
6292
	for_each_encoder_on_crtc(dev, crtc, encoder)
6273
	for_each_encoder_on_crtc(dev, crtc, encoder)
6293
		encoder->enable(encoder);
6274
		encoder->enable(encoder);
6294
 
-
 
6295
	intel_fbc_enable(intel_crtc);
-
 
6296
}
6275
}
6297
 
6276
 
6298
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6277
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6299
{
6278
{
6300
	struct drm_device *dev = crtc->base.dev;
6279
	struct drm_device *dev = crtc->base.dev;
6301
	struct drm_i915_private *dev_priv = dev->dev_private;
6280
	struct drm_i915_private *dev_priv = dev->dev_private;
6302
 
6281
 
6303
	if (!crtc->config->gmch_pfit.control)
6282
	if (!crtc->config->gmch_pfit.control)
6304
		return;
6283
		return;
6305
 
6284
 
6306
	assert_pipe_disabled(dev_priv, crtc->pipe);
6285
	assert_pipe_disabled(dev_priv, crtc->pipe);
6307
 
6286
 
6308
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6287
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6309
			 I915_READ(PFIT_CONTROL));
6288
			 I915_READ(PFIT_CONTROL));
6310
	I915_WRITE(PFIT_CONTROL, 0);
6289
	I915_WRITE(PFIT_CONTROL, 0);
6311
}
6290
}
6312
 
6291
 
6313
static void i9xx_crtc_disable(struct drm_crtc *crtc)
6292
static void i9xx_crtc_disable(struct drm_crtc *crtc)
6314
{
6293
{
6315
	struct drm_device *dev = crtc->dev;
6294
	struct drm_device *dev = crtc->dev;
6316
	struct drm_i915_private *dev_priv = dev->dev_private;
6295
	struct drm_i915_private *dev_priv = dev->dev_private;
6317
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6296
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6318
	struct intel_encoder *encoder;
6297
	struct intel_encoder *encoder;
6319
	int pipe = intel_crtc->pipe;
6298
	int pipe = intel_crtc->pipe;
6320
 
6299
 
6321
	/*
6300
	/*
6322
	 * On gen2 planes are double buffered but the pipe isn't, so we must
6301
	 * On gen2 planes are double buffered but the pipe isn't, so we must
6323
	 * wait for planes to fully turn off before disabling the pipe.
6302
	 * wait for planes to fully turn off before disabling the pipe.
6324
	 * We also need to wait on all gmch platforms because of the
6303
	 * We also need to wait on all gmch platforms because of the
6325
	 * self-refresh mode constraint explained above.
6304
	 * self-refresh mode constraint explained above.
6326
	 */
6305
	 */
6327
	intel_wait_for_vblank(dev, pipe);
6306
	intel_wait_for_vblank(dev, pipe);
6328
 
6307
 
6329
	for_each_encoder_on_crtc(dev, crtc, encoder)
6308
	for_each_encoder_on_crtc(dev, crtc, encoder)
6330
		encoder->disable(encoder);
6309
		encoder->disable(encoder);
6331
 
6310
 
6332
	drm_crtc_vblank_off(crtc);
6311
	drm_crtc_vblank_off(crtc);
6333
	assert_vblank_disabled(crtc);
6312
	assert_vblank_disabled(crtc);
6334
 
6313
 
6335
	intel_disable_pipe(intel_crtc);
6314
	intel_disable_pipe(intel_crtc);
6336
 
6315
 
6337
	i9xx_pfit_disable(intel_crtc);
6316
	i9xx_pfit_disable(intel_crtc);
6338
 
6317
 
6339
	for_each_encoder_on_crtc(dev, crtc, encoder)
6318
	for_each_encoder_on_crtc(dev, crtc, encoder)
6340
		if (encoder->post_disable)
6319
		if (encoder->post_disable)
6341
			encoder->post_disable(encoder);
6320
			encoder->post_disable(encoder);
6342
 
6321
 
6343
	if (!intel_crtc->config->has_dsi_encoder) {
6322
	if (!intel_crtc->config->has_dsi_encoder) {
6344
		if (IS_CHERRYVIEW(dev))
6323
		if (IS_CHERRYVIEW(dev))
6345
			chv_disable_pll(dev_priv, pipe);
6324
			chv_disable_pll(dev_priv, pipe);
6346
		else if (IS_VALLEYVIEW(dev))
6325
		else if (IS_VALLEYVIEW(dev))
6347
			vlv_disable_pll(dev_priv, pipe);
6326
			vlv_disable_pll(dev_priv, pipe);
6348
		else
6327
		else
6349
			i9xx_disable_pll(intel_crtc);
6328
			i9xx_disable_pll(intel_crtc);
6350
	}
6329
	}
6351
 
6330
 
6352
	for_each_encoder_on_crtc(dev, crtc, encoder)
6331
	for_each_encoder_on_crtc(dev, crtc, encoder)
6353
		if (encoder->post_pll_disable)
6332
		if (encoder->post_pll_disable)
6354
			encoder->post_pll_disable(encoder);
6333
			encoder->post_pll_disable(encoder);
6355
 
6334
 
6356
	if (!IS_GEN2(dev))
6335
	if (!IS_GEN2(dev))
6357
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6336
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6358
 
-
 
6359
	intel_fbc_disable_crtc(intel_crtc);
-
 
6360
}
6337
}
6361
 
6338
 
6362
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6339
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6363
{
6340
{
6364
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6341
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6365
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6342
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6366
	enum intel_display_power_domain domain;
6343
	enum intel_display_power_domain domain;
6367
	unsigned long domains;
6344
	unsigned long domains;
6368
 
6345
 
6369
	if (!intel_crtc->active)
6346
	if (!intel_crtc->active)
6370
		return;
6347
		return;
6371
 
6348
 
6372
	if (to_intel_plane_state(crtc->primary->state)->visible) {
6349
	if (to_intel_plane_state(crtc->primary->state)->visible) {
6373
		WARN_ON(intel_crtc->unpin_work);
6350
		WARN_ON(intel_crtc->unpin_work);
6374
 
6351
 
6375
		intel_pre_disable_primary(crtc);
6352
		intel_pre_disable_primary(crtc);
6376
 
6353
 
6377
		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6354
		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6378
		to_intel_plane_state(crtc->primary->state)->visible = false;
6355
		to_intel_plane_state(crtc->primary->state)->visible = false;
6379
	}
6356
	}
6380
 
6357
 
6381
	dev_priv->display.crtc_disable(crtc);
6358
	dev_priv->display.crtc_disable(crtc);
6382
	intel_crtc->active = false;
6359
	intel_crtc->active = false;
-
 
6360
	intel_fbc_disable(intel_crtc);
6383
	intel_update_watermarks(crtc);
6361
	intel_update_watermarks(crtc);
6384
	intel_disable_shared_dpll(intel_crtc);
6362
	intel_disable_shared_dpll(intel_crtc);
6385
 
6363
 
6386
	domains = intel_crtc->enabled_power_domains;
6364
	domains = intel_crtc->enabled_power_domains;
6387
	for_each_power_domain(domain, domains)
6365
	for_each_power_domain(domain, domains)
6388
		intel_display_power_put(dev_priv, domain);
6366
		intel_display_power_put(dev_priv, domain);
6389
	intel_crtc->enabled_power_domains = 0;
6367
	intel_crtc->enabled_power_domains = 0;
-
 
6368
 
-
 
6369
	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
-
 
6370
	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6390
}
6371
}
6391
 
6372
 
6392
/*
6373
/*
6393
 * turn all crtc's off, but do not adjust state
6374
 * turn all crtc's off, but do not adjust state
6394
 * This has to be paired with a call to intel_modeset_setup_hw_state.
6375
 * This has to be paired with a call to intel_modeset_setup_hw_state.
6395
 */
6376
 */
6396
int intel_display_suspend(struct drm_device *dev)
6377
int intel_display_suspend(struct drm_device *dev)
6397
{
6378
{
6398
	struct drm_mode_config *config = &dev->mode_config;
6379
	struct drm_i915_private *dev_priv = to_i915(dev);
6399
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
-
 
6400
	struct drm_atomic_state *state;
6380
	struct drm_atomic_state *state;
6401
	struct drm_crtc *crtc;
-
 
6402
	unsigned crtc_mask = 0;
-
 
6403
	int ret = 0;
6381
	int ret;
6404
 
-
 
6405
	if (WARN_ON(!ctx))
-
 
6406
		return 0;
-
 
6407
 
-
 
6408
	lockdep_assert_held(&ctx->ww_ctx);
-
 
6409
	state = drm_atomic_state_alloc(dev);
-
 
6410
	if (WARN_ON(!state))
-
 
6411
		return -ENOMEM;
-
 
6412
 
-
 
6413
	state->acquire_ctx = ctx;
-
 
6414
	state->allow_modeset = true;
-
 
6415
 
-
 
6416
	for_each_crtc(dev, crtc) {
-
 
6417
		struct drm_crtc_state *crtc_state =
-
 
6418
			drm_atomic_get_crtc_state(state, crtc);
-
 
6419
 
-
 
6420
		ret = PTR_ERR_OR_ZERO(crtc_state);
-
 
6421
		if (ret)
-
 
6422
			goto free;
-
 
6423
 
-
 
6424
		if (!crtc_state->active)
-
 
6425
			continue;
-
 
6426
 
-
 
6427
		crtc_state->active = false;
-
 
6428
		crtc_mask |= 1 << drm_crtc_index(crtc);
-
 
6429
	}
-
 
6430
 
-
 
6431
	if (crtc_mask) {
-
 
6432
		ret = drm_atomic_commit(state);
-
 
6433
 
-
 
6434
		if (!ret) {
-
 
6435
			for_each_crtc(dev, crtc)
-
 
6436
				if (crtc_mask & (1 << drm_crtc_index(crtc)))
-
 
6437
					crtc->state->active = true;
-
 
6438
 
-
 
6439
			return ret;
-
 
6440
		}
-
 
6441
	}
-
 
-
 
6382
 
6442
 
6383
	state = drm_atomic_helper_suspend(dev);
6443
free:
6384
	ret = PTR_ERR_OR_ZERO(state);
6444
	if (ret)
6385
	if (ret)
-
 
6386
		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6445
		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6387
	else
6446
	drm_atomic_state_free(state);
6388
		dev_priv->modeset_restore_state = state;
6447
	return ret;
6389
	return ret;
6448
}
6390
}
6449
 
6391
 
6450
void intel_encoder_destroy(struct drm_encoder *encoder)
6392
void intel_encoder_destroy(struct drm_encoder *encoder)
6451
{
6393
{
6452
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6394
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6453
 
6395
 
6454
	drm_encoder_cleanup(encoder);
6396
	drm_encoder_cleanup(encoder);
6455
	kfree(intel_encoder);
6397
	kfree(intel_encoder);
6456
}
6398
}
6457
 
6399
 
6458
/* Cross check the actual hw state with our own modeset state tracking (and it's
6400
/* Cross check the actual hw state with our own modeset state tracking (and it's
6459
 * internal consistency). */
6401
 * internal consistency). */
6460
static void intel_connector_check_state(struct intel_connector *connector)
6402
static void intel_connector_check_state(struct intel_connector *connector)
6461
{
6403
{
6462
	struct drm_crtc *crtc = connector->base.state->crtc;
6404
	struct drm_crtc *crtc = connector->base.state->crtc;
6463
 
6405
 
6464
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6406
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6465
		      connector->base.base.id,
6407
		      connector->base.base.id,
6466
		      connector->base.name);
6408
		      connector->base.name);
6467
 
6409
 
6468
	if (connector->get_hw_state(connector)) {
6410
	if (connector->get_hw_state(connector)) {
6469
		struct intel_encoder *encoder = connector->encoder;
6411
		struct intel_encoder *encoder = connector->encoder;
6470
		struct drm_connector_state *conn_state = connector->base.state;
6412
		struct drm_connector_state *conn_state = connector->base.state;
6471
 
6413
 
6472
		I915_STATE_WARN(!crtc,
6414
		I915_STATE_WARN(!crtc,
6473
			 "connector enabled without attached crtc\n");
6415
			 "connector enabled without attached crtc\n");
6474
 
6416
 
6475
		if (!crtc)
6417
		if (!crtc)
6476
			return;
6418
			return;
6477
 
6419
 
6478
		I915_STATE_WARN(!crtc->state->active,
6420
		I915_STATE_WARN(!crtc->state->active,
6479
		      "connector is active, but attached crtc isn't\n");
6421
		      "connector is active, but attached crtc isn't\n");
6480
 
6422
 
6481
		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6423
		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6482
			return;
6424
			return;
6483
 
6425
 
6484
		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6426
		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6485
			"atomic encoder doesn't match attached encoder\n");
6427
			"atomic encoder doesn't match attached encoder\n");
6486
 
6428
 
6487
		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6429
		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6488
			"attached encoder crtc differs from connector crtc\n");
6430
			"attached encoder crtc differs from connector crtc\n");
6489
	} else {
6431
	} else {
6490
		I915_STATE_WARN(crtc && crtc->state->active,
6432
		I915_STATE_WARN(crtc && crtc->state->active,
6491
			"attached crtc is active, but connector isn't\n");
6433
			"attached crtc is active, but connector isn't\n");
6492
		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6434
		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6493
			"best encoder set without crtc!\n");
6435
			"best encoder set without crtc!\n");
6494
	}
6436
	}
6495
}
6437
}
6496
 
6438
 
6497
int intel_connector_init(struct intel_connector *connector)
6439
int intel_connector_init(struct intel_connector *connector)
6498
{
6440
{
6499
	drm_atomic_helper_connector_reset(&connector->base);
6441
	drm_atomic_helper_connector_reset(&connector->base);
6500
 
6442
 
6501
	if (!connector->base.state)
6443
	if (!connector->base.state)
6502
		return -ENOMEM;
6444
		return -ENOMEM;
6503
 
6445
 
6504
	return 0;
6446
	return 0;
6505
}
6447
}
6506
 
6448
 
6507
struct intel_connector *intel_connector_alloc(void)
6449
struct intel_connector *intel_connector_alloc(void)
6508
{
6450
{
6509
	struct intel_connector *connector;
6451
	struct intel_connector *connector;
6510
 
6452
 
6511
	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6453
	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6512
	if (!connector)
6454
	if (!connector)
6513
		return NULL;
6455
		return NULL;
6514
 
6456
 
6515
	if (intel_connector_init(connector) < 0) {
6457
	if (intel_connector_init(connector) < 0) {
6516
		kfree(connector);
6458
		kfree(connector);
6517
		return NULL;
6459
		return NULL;
6518
	}
6460
	}
6519
 
6461
 
6520
	return connector;
6462
	return connector;
6521
}
6463
}
6522
 
6464
 
6523
/* Simple connector->get_hw_state implementation for encoders that support only
6465
/* Simple connector->get_hw_state implementation for encoders that support only
6524
 * one connector and no cloning and hence the encoder state determines the state
6466
 * one connector and no cloning and hence the encoder state determines the state
6525
 * of the connector. */
6467
 * of the connector. */
6526
bool intel_connector_get_hw_state(struct intel_connector *connector)
6468
bool intel_connector_get_hw_state(struct intel_connector *connector)
6527
{
6469
{
6528
	enum pipe pipe = 0;
6470
	enum pipe pipe = 0;
6529
	struct intel_encoder *encoder = connector->encoder;
6471
	struct intel_encoder *encoder = connector->encoder;
6530
 
6472
 
6531
	return encoder->get_hw_state(encoder, &pipe);
6473
	return encoder->get_hw_state(encoder, &pipe);
6532
}
6474
}
6533
 
6475
 
6534
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6476
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6535
{
6477
{
6536
	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6478
	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6537
		return crtc_state->fdi_lanes;
6479
		return crtc_state->fdi_lanes;
6538
 
6480
 
6539
	return 0;
6481
	return 0;
6540
}
6482
}
6541
 
6483
 
6542
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6484
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6543
				     struct intel_crtc_state *pipe_config)
6485
				     struct intel_crtc_state *pipe_config)
6544
{
6486
{
6545
	struct drm_atomic_state *state = pipe_config->base.state;
6487
	struct drm_atomic_state *state = pipe_config->base.state;
6546
	struct intel_crtc *other_crtc;
6488
	struct intel_crtc *other_crtc;
6547
	struct intel_crtc_state *other_crtc_state;
6489
	struct intel_crtc_state *other_crtc_state;
6548
 
6490
 
6549
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6491
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6550
		      pipe_name(pipe), pipe_config->fdi_lanes);
6492
		      pipe_name(pipe), pipe_config->fdi_lanes);
6551
	if (pipe_config->fdi_lanes > 4) {
6493
	if (pipe_config->fdi_lanes > 4) {
6552
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6494
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6553
			      pipe_name(pipe), pipe_config->fdi_lanes);
6495
			      pipe_name(pipe), pipe_config->fdi_lanes);
6554
		return -EINVAL;
6496
		return -EINVAL;
6555
	}
6497
	}
6556
 
6498
 
6557
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6499
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6558
		if (pipe_config->fdi_lanes > 2) {
6500
		if (pipe_config->fdi_lanes > 2) {
6559
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6501
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6560
				      pipe_config->fdi_lanes);
6502
				      pipe_config->fdi_lanes);
6561
			return -EINVAL;
6503
			return -EINVAL;
6562
		} else {
6504
		} else {
6563
			return 0;
6505
			return 0;
6564
		}
6506
		}
6565
	}
6507
	}
6566
 
6508
 
6567
	if (INTEL_INFO(dev)->num_pipes == 2)
6509
	if (INTEL_INFO(dev)->num_pipes == 2)
6568
		return 0;
6510
		return 0;
6569
 
6511
 
6570
	/* Ivybridge 3 pipe is really complicated */
6512
	/* Ivybridge 3 pipe is really complicated */
6571
	switch (pipe) {
6513
	switch (pipe) {
6572
	case PIPE_A:
6514
	case PIPE_A:
6573
		return 0;
6515
		return 0;
6574
	case PIPE_B:
6516
	case PIPE_B:
6575
		if (pipe_config->fdi_lanes <= 2)
6517
		if (pipe_config->fdi_lanes <= 2)
6576
			return 0;
6518
			return 0;
6577
 
6519
 
6578
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6520
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6579
		other_crtc_state =
6521
		other_crtc_state =
6580
			intel_atomic_get_crtc_state(state, other_crtc);
6522
			intel_atomic_get_crtc_state(state, other_crtc);
6581
		if (IS_ERR(other_crtc_state))
6523
		if (IS_ERR(other_crtc_state))
6582
			return PTR_ERR(other_crtc_state);
6524
			return PTR_ERR(other_crtc_state);
6583
 
6525
 
6584
		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6526
		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6585
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6527
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6586
				      pipe_name(pipe), pipe_config->fdi_lanes);
6528
				      pipe_name(pipe), pipe_config->fdi_lanes);
6587
			return -EINVAL;
6529
			return -EINVAL;
6588
		}
6530
		}
6589
		return 0;
6531
		return 0;
6590
	case PIPE_C:
6532
	case PIPE_C:
6591
		if (pipe_config->fdi_lanes > 2) {
6533
		if (pipe_config->fdi_lanes > 2) {
6592
			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6534
			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6593
				      pipe_name(pipe), pipe_config->fdi_lanes);
6535
				      pipe_name(pipe), pipe_config->fdi_lanes);
6594
			return -EINVAL;
6536
			return -EINVAL;
6595
		}
6537
		}
6596
 
6538
 
6597
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6539
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6598
		other_crtc_state =
6540
		other_crtc_state =
6599
			intel_atomic_get_crtc_state(state, other_crtc);
6541
			intel_atomic_get_crtc_state(state, other_crtc);
6600
		if (IS_ERR(other_crtc_state))
6542
		if (IS_ERR(other_crtc_state))
6601
			return PTR_ERR(other_crtc_state);
6543
			return PTR_ERR(other_crtc_state);
6602
 
6544
 
6603
		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6545
		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6604
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6546
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6605
			return -EINVAL;
6547
			return -EINVAL;
6606
		}
6548
		}
6607
		return 0;
6549
		return 0;
6608
	default:
6550
	default:
6609
		BUG();
6551
		BUG();
6610
	}
6552
	}
6611
}
6553
}
6612
 
6554
 
6613
#define RETRY 1
6555
#define RETRY 1
6614
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6556
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6615
				       struct intel_crtc_state *pipe_config)
6557
				       struct intel_crtc_state *pipe_config)
6616
{
6558
{
6617
	struct drm_device *dev = intel_crtc->base.dev;
6559
	struct drm_device *dev = intel_crtc->base.dev;
6618
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6560
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6619
	int lane, link_bw, fdi_dotclock, ret;
6561
	int lane, link_bw, fdi_dotclock, ret;
6620
	bool needs_recompute = false;
6562
	bool needs_recompute = false;
6621
 
6563
 
6622
retry:
6564
retry:
6623
	/* FDI is a binary signal running at ~2.7GHz, encoding
6565
	/* FDI is a binary signal running at ~2.7GHz, encoding
6624
	 * each output octet as 10 bits. The actual frequency
6566
	 * each output octet as 10 bits. The actual frequency
6625
	 * is stored as a divider into a 100MHz clock, and the
6567
	 * is stored as a divider into a 100MHz clock, and the
6626
	 * mode pixel clock is stored in units of 1KHz.
6568
	 * mode pixel clock is stored in units of 1KHz.
6627
	 * Hence the bw of each lane in terms of the mode signal
6569
	 * Hence the bw of each lane in terms of the mode signal
6628
	 * is:
6570
	 * is:
6629
	 */
6571
	 */
6630
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6572
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6631
 
6573
 
6632
	fdi_dotclock = adjusted_mode->crtc_clock;
6574
	fdi_dotclock = adjusted_mode->crtc_clock;
6633
 
6575
 
6634
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6576
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6635
					   pipe_config->pipe_bpp);
6577
					   pipe_config->pipe_bpp);
6636
 
6578
 
6637
	pipe_config->fdi_lanes = lane;
6579
	pipe_config->fdi_lanes = lane;
6638
 
6580
 
6639
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6581
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6640
			       link_bw, &pipe_config->fdi_m_n);
6582
			       link_bw, &pipe_config->fdi_m_n);
6641
 
6583
 
6642
	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6584
	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6643
				       intel_crtc->pipe, pipe_config);
6585
				       intel_crtc->pipe, pipe_config);
6644
	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6586
	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6645
		pipe_config->pipe_bpp -= 2*3;
6587
		pipe_config->pipe_bpp -= 2*3;
6646
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6588
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6647
			      pipe_config->pipe_bpp);
6589
			      pipe_config->pipe_bpp);
6648
		needs_recompute = true;
6590
		needs_recompute = true;
6649
		pipe_config->bw_constrained = true;
6591
		pipe_config->bw_constrained = true;
6650
 
6592
 
6651
		goto retry;
6593
		goto retry;
6652
	}
6594
	}
6653
 
6595
 
6654
	if (needs_recompute)
6596
	if (needs_recompute)
6655
		return RETRY;
6597
		return RETRY;
6656
 
6598
 
6657
	return ret;
6599
	return ret;
6658
}
6600
}
6659
 
6601
 
6660
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6602
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6661
				     struct intel_crtc_state *pipe_config)
6603
				     struct intel_crtc_state *pipe_config)
6662
{
6604
{
6663
	if (pipe_config->pipe_bpp > 24)
6605
	if (pipe_config->pipe_bpp > 24)
6664
		return false;
6606
		return false;
6665
 
6607
 
6666
	/* HSW can handle pixel rate up to cdclk? */
6608
	/* HSW can handle pixel rate up to cdclk? */
6667
	if (IS_HASWELL(dev_priv->dev))
6609
	if (IS_HASWELL(dev_priv->dev))
6668
		return true;
6610
		return true;
6669
 
6611
 
6670
	/*
6612
	/*
6671
	 * We compare against max which means we must take
6613
	 * We compare against max which means we must take
6672
	 * the increased cdclk requirement into account when
6614
	 * the increased cdclk requirement into account when
6673
	 * calculating the new cdclk.
6615
	 * calculating the new cdclk.
6674
	 *
6616
	 *
6675
	 * Should measure whether using a lower cdclk w/o IPS
6617
	 * Should measure whether using a lower cdclk w/o IPS
6676
	 */
6618
	 */
6677
	return ilk_pipe_pixel_rate(pipe_config) <=
6619
	return ilk_pipe_pixel_rate(pipe_config) <=
6678
		dev_priv->max_cdclk_freq * 95 / 100;
6620
		dev_priv->max_cdclk_freq * 95 / 100;
6679
}
6621
}
6680
 
6622
 
6681
static void hsw_compute_ips_config(struct intel_crtc *crtc,
6623
static void hsw_compute_ips_config(struct intel_crtc *crtc,
6682
				   struct intel_crtc_state *pipe_config)
6624
				   struct intel_crtc_state *pipe_config)
6683
{
6625
{
6684
	struct drm_device *dev = crtc->base.dev;
6626
	struct drm_device *dev = crtc->base.dev;
6685
	struct drm_i915_private *dev_priv = dev->dev_private;
6627
	struct drm_i915_private *dev_priv = dev->dev_private;
6686
 
6628
 
6687
	pipe_config->ips_enabled = i915.enable_ips &&
6629
	pipe_config->ips_enabled = i915.enable_ips &&
6688
		hsw_crtc_supports_ips(crtc) &&
6630
		hsw_crtc_supports_ips(crtc) &&
6689
		pipe_config_supports_ips(dev_priv, pipe_config);
6631
		pipe_config_supports_ips(dev_priv, pipe_config);
6690
}
6632
}
6691
 
6633
 
6692
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6634
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6693
{
6635
{
6694
	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6636
	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6695
 
6637
 
6696
	/* GDG double wide on either pipe, otherwise pipe A only */
6638
	/* GDG double wide on either pipe, otherwise pipe A only */
6697
	return INTEL_INFO(dev_priv)->gen < 4 &&
6639
	return INTEL_INFO(dev_priv)->gen < 4 &&
6698
		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6640
		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6699
}
6641
}
6700
 
6642
 
6701
static int intel_crtc_compute_config(struct intel_crtc *crtc,
6643
static int intel_crtc_compute_config(struct intel_crtc *crtc,
6702
				     struct intel_crtc_state *pipe_config)
6644
				     struct intel_crtc_state *pipe_config)
6703
{
6645
{
6704
	struct drm_device *dev = crtc->base.dev;
6646
	struct drm_device *dev = crtc->base.dev;
6705
	struct drm_i915_private *dev_priv = dev->dev_private;
6647
	struct drm_i915_private *dev_priv = dev->dev_private;
6706
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6648
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6707
 
6649
 
6708
	/* FIXME should check pixel clock limits on all platforms */
6650
	/* FIXME should check pixel clock limits on all platforms */
6709
	if (INTEL_INFO(dev)->gen < 4) {
6651
	if (INTEL_INFO(dev)->gen < 4) {
6710
		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6652
		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6711
 
6653
 
6712
		/*
6654
		/*
6713
		 * Enable double wide mode when the dot clock
6655
		 * Enable double wide mode when the dot clock
6714
		 * is > 90% of the (display) core speed.
6656
		 * is > 90% of the (display) core speed.
6715
		 */
6657
		 */
6716
		if (intel_crtc_supports_double_wide(crtc) &&
6658
		if (intel_crtc_supports_double_wide(crtc) &&
6717
		    adjusted_mode->crtc_clock > clock_limit) {
6659
		    adjusted_mode->crtc_clock > clock_limit) {
6718
			clock_limit *= 2;
6660
			clock_limit *= 2;
6719
			pipe_config->double_wide = true;
6661
			pipe_config->double_wide = true;
6720
		}
6662
		}
6721
 
6663
 
6722
		if (adjusted_mode->crtc_clock > clock_limit) {
6664
		if (adjusted_mode->crtc_clock > clock_limit) {
6723
			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6665
			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6724
				      adjusted_mode->crtc_clock, clock_limit,
6666
				      adjusted_mode->crtc_clock, clock_limit,
6725
				      yesno(pipe_config->double_wide));
6667
				      yesno(pipe_config->double_wide));
6726
			return -EINVAL;
6668
			return -EINVAL;
6727
	}
6669
		}
6728
	}
6670
	}
6729
 
6671
 
6730
	/*
6672
	/*
6731
	 * Pipe horizontal size must be even in:
6673
	 * Pipe horizontal size must be even in:
6732
	 * - DVO ganged mode
6674
	 * - DVO ganged mode
6733
	 * - LVDS dual channel mode
6675
	 * - LVDS dual channel mode
6734
	 * - Double wide pipe
6676
	 * - Double wide pipe
6735
	 */
6677
	 */
6736
	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6678
	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6737
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6679
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6738
		pipe_config->pipe_src_w &= ~1;
6680
		pipe_config->pipe_src_w &= ~1;
6739
 
6681
 
6740
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6682
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6741
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6683
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6742
	 */
6684
	 */
6743
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6685
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6744
		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6686
		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6745
		return -EINVAL;
6687
		return -EINVAL;
6746
 
6688
 
6747
	if (HAS_IPS(dev))
6689
	if (HAS_IPS(dev))
6748
		hsw_compute_ips_config(crtc, pipe_config);
6690
		hsw_compute_ips_config(crtc, pipe_config);
6749
 
6691
 
6750
	if (pipe_config->has_pch_encoder)
6692
	if (pipe_config->has_pch_encoder)
6751
		return ironlake_fdi_compute_config(crtc, pipe_config);
6693
		return ironlake_fdi_compute_config(crtc, pipe_config);
6752
 
6694
 
6753
	return 0;
6695
	return 0;
6754
}
6696
}
6755
 
6697
 
6756
static int skylake_get_display_clock_speed(struct drm_device *dev)
6698
static int skylake_get_display_clock_speed(struct drm_device *dev)
6757
{
6699
{
6758
	struct drm_i915_private *dev_priv = to_i915(dev);
6700
	struct drm_i915_private *dev_priv = to_i915(dev);
6759
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6701
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6760
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6702
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6761
	uint32_t linkrate;
6703
	uint32_t linkrate;
6762
 
6704
 
6763
	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6705
	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6764
		return 24000; /* 24MHz is the cd freq with NSSC ref */
6706
		return 24000; /* 24MHz is the cd freq with NSSC ref */
6765
 
6707
 
6766
	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6708
	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6767
		return 540000;
6709
		return 540000;
6768
 
6710
 
6769
	linkrate = (I915_READ(DPLL_CTRL1) &
6711
	linkrate = (I915_READ(DPLL_CTRL1) &
6770
		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6712
		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6771
 
6713
 
6772
	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6714
	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6773
	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6715
	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6774
		/* vco 8640 */
6716
		/* vco 8640 */
6775
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6717
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6776
		case CDCLK_FREQ_450_432:
6718
		case CDCLK_FREQ_450_432:
6777
			return 432000;
6719
			return 432000;
6778
		case CDCLK_FREQ_337_308:
6720
		case CDCLK_FREQ_337_308:
6779
			return 308570;
6721
			return 308570;
6780
		case CDCLK_FREQ_675_617:
6722
		case CDCLK_FREQ_675_617:
6781
			return 617140;
6723
			return 617140;
6782
		default:
6724
		default:
6783
			WARN(1, "Unknown cd freq selection\n");
6725
			WARN(1, "Unknown cd freq selection\n");
6784
		}
6726
		}
6785
	} else {
6727
	} else {
6786
		/* vco 8100 */
6728
		/* vco 8100 */
6787
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6729
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6788
		case CDCLK_FREQ_450_432:
6730
		case CDCLK_FREQ_450_432:
6789
			return 450000;
6731
			return 450000;
6790
		case CDCLK_FREQ_337_308:
6732
		case CDCLK_FREQ_337_308:
6791
			return 337500;
6733
			return 337500;
6792
		case CDCLK_FREQ_675_617:
6734
		case CDCLK_FREQ_675_617:
6793
			return 675000;
6735
			return 675000;
6794
		default:
6736
		default:
6795
			WARN(1, "Unknown cd freq selection\n");
6737
			WARN(1, "Unknown cd freq selection\n");
6796
		}
6738
		}
6797
	}
6739
	}
6798
 
6740
 
6799
	/* error case, do as if DPLL0 isn't enabled */
6741
	/* error case, do as if DPLL0 isn't enabled */
6800
	return 24000;
6742
	return 24000;
6801
}
6743
}
6802
 
6744
 
6803
static int broxton_get_display_clock_speed(struct drm_device *dev)
6745
static int broxton_get_display_clock_speed(struct drm_device *dev)
6804
{
6746
{
6805
	struct drm_i915_private *dev_priv = to_i915(dev);
6747
	struct drm_i915_private *dev_priv = to_i915(dev);
6806
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6748
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6807
	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6749
	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6808
	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6750
	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6809
	int cdclk;
6751
	int cdclk;
6810
 
6752
 
6811
	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6753
	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6812
		return 19200;
6754
		return 19200;
6813
 
6755
 
6814
	cdclk = 19200 * pll_ratio / 2;
6756
	cdclk = 19200 * pll_ratio / 2;
6815
 
6757
 
6816
	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6758
	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6817
	case BXT_CDCLK_CD2X_DIV_SEL_1:
6759
	case BXT_CDCLK_CD2X_DIV_SEL_1:
6818
		return cdclk;  /* 576MHz or 624MHz */
6760
		return cdclk;  /* 576MHz or 624MHz */
6819
	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6761
	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6820
		return cdclk * 2 / 3; /* 384MHz */
6762
		return cdclk * 2 / 3; /* 384MHz */
6821
	case BXT_CDCLK_CD2X_DIV_SEL_2:
6763
	case BXT_CDCLK_CD2X_DIV_SEL_2:
6822
		return cdclk / 2; /* 288MHz */
6764
		return cdclk / 2; /* 288MHz */
6823
	case BXT_CDCLK_CD2X_DIV_SEL_4:
6765
	case BXT_CDCLK_CD2X_DIV_SEL_4:
6824
		return cdclk / 4; /* 144MHz */
6766
		return cdclk / 4; /* 144MHz */
6825
	}
6767
	}
6826
 
6768
 
6827
	/* error case, do as if DE PLL isn't enabled */
6769
	/* error case, do as if DE PLL isn't enabled */
6828
	return 19200;
6770
	return 19200;
6829
}
6771
}
6830
 
6772
 
6831
static int broadwell_get_display_clock_speed(struct drm_device *dev)
6773
static int broadwell_get_display_clock_speed(struct drm_device *dev)
6832
{
6774
{
6833
	struct drm_i915_private *dev_priv = dev->dev_private;
6775
	struct drm_i915_private *dev_priv = dev->dev_private;
6834
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6776
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6835
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6777
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6836
 
6778
 
6837
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6779
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6838
		return 800000;
6780
		return 800000;
6839
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6781
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6840
		return 450000;
6782
		return 450000;
6841
	else if (freq == LCPLL_CLK_FREQ_450)
6783
	else if (freq == LCPLL_CLK_FREQ_450)
6842
		return 450000;
6784
		return 450000;
6843
	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6785
	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6844
		return 540000;
6786
		return 540000;
6845
	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6787
	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6846
		return 337500;
6788
		return 337500;
6847
	else
6789
	else
6848
		return 675000;
6790
		return 675000;
6849
}
6791
}
6850
 
6792
 
6851
static int haswell_get_display_clock_speed(struct drm_device *dev)
6793
static int haswell_get_display_clock_speed(struct drm_device *dev)
6852
{
6794
{
6853
	struct drm_i915_private *dev_priv = dev->dev_private;
6795
	struct drm_i915_private *dev_priv = dev->dev_private;
6854
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6796
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6855
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6797
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6856
 
6798
 
6857
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6799
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6858
		return 800000;
6800
		return 800000;
6859
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6801
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6860
		return 450000;
6802
		return 450000;
6861
	else if (freq == LCPLL_CLK_FREQ_450)
6803
	else if (freq == LCPLL_CLK_FREQ_450)
6862
		return 450000;
6804
		return 450000;
6863
	else if (IS_HSW_ULT(dev))
6805
	else if (IS_HSW_ULT(dev))
6864
		return 337500;
6806
		return 337500;
6865
	else
6807
	else
6866
		return 540000;
6808
		return 540000;
6867
}
6809
}
6868
 
6810
 
6869
static int valleyview_get_display_clock_speed(struct drm_device *dev)
6811
static int valleyview_get_display_clock_speed(struct drm_device *dev)
6870
{
6812
{
6871
	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6813
	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6872
				      CCK_DISPLAY_CLOCK_CONTROL);
6814
				      CCK_DISPLAY_CLOCK_CONTROL);
6873
}
6815
}
6874
 
6816
 
6875
static int ilk_get_display_clock_speed(struct drm_device *dev)
6817
static int ilk_get_display_clock_speed(struct drm_device *dev)
6876
{
6818
{
6877
	return 450000;
6819
	return 450000;
6878
}
6820
}
6879
 
6821
 
6880
static int i945_get_display_clock_speed(struct drm_device *dev)
6822
static int i945_get_display_clock_speed(struct drm_device *dev)
6881
{
6823
{
6882
	return 400000;
6824
	return 400000;
6883
}
6825
}
6884
 
6826
 
6885
static int i915_get_display_clock_speed(struct drm_device *dev)
6827
static int i915_get_display_clock_speed(struct drm_device *dev)
6886
{
6828
{
6887
	return 333333;
6829
	return 333333;
6888
}
6830
}
6889
 
6831
 
6890
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6832
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6891
{
6833
{
6892
	return 200000;
6834
	return 200000;
6893
}
6835
}
6894
 
6836
 
6895
static int pnv_get_display_clock_speed(struct drm_device *dev)
6837
static int pnv_get_display_clock_speed(struct drm_device *dev)
6896
{
6838
{
6897
	u16 gcfgc = 0;
6839
	u16 gcfgc = 0;
6898
 
6840
 
6899
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6841
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6900
 
6842
 
6901
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6843
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6902
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6844
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6903
		return 266667;
6845
		return 266667;
6904
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6846
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6905
		return 333333;
6847
		return 333333;
6906
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6848
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6907
		return 444444;
6849
		return 444444;
6908
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6850
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6909
		return 200000;
6851
		return 200000;
6910
	default:
6852
	default:
6911
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6853
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6912
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6854
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6913
		return 133333;
6855
		return 133333;
6914
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6856
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6915
		return 166667;
6857
		return 166667;
6916
	}
6858
	}
6917
}
6859
}
6918
 
6860
 
6919
static int i915gm_get_display_clock_speed(struct drm_device *dev)
6861
static int i915gm_get_display_clock_speed(struct drm_device *dev)
6920
{
6862
{
6921
	u16 gcfgc = 0;
6863
	u16 gcfgc = 0;
6922
 
6864
 
6923
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6865
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6924
 
6866
 
6925
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6867
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6926
		return 133333;
6868
		return 133333;
6927
	else {
6869
	else {
6928
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6870
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6929
		case GC_DISPLAY_CLOCK_333_MHZ:
6871
		case GC_DISPLAY_CLOCK_333_MHZ:
6930
			return 333333;
6872
			return 333333;
6931
		default:
6873
		default:
6932
		case GC_DISPLAY_CLOCK_190_200_MHZ:
6874
		case GC_DISPLAY_CLOCK_190_200_MHZ:
6933
			return 190000;
6875
			return 190000;
6934
		}
6876
		}
6935
	}
6877
	}
6936
}
6878
}
6937
 
6879
 
6938
static int i865_get_display_clock_speed(struct drm_device *dev)
6880
static int i865_get_display_clock_speed(struct drm_device *dev)
6939
{
6881
{
6940
	return 266667;
6882
	return 266667;
6941
}
6883
}
6942
 
6884
 
6943
static int i85x_get_display_clock_speed(struct drm_device *dev)
6885
static int i85x_get_display_clock_speed(struct drm_device *dev)
6944
{
6886
{
6945
	u16 hpllcc = 0;
6887
	u16 hpllcc = 0;
6946
 
6888
 
6947
	/*
6889
	/*
6948
	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6890
	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6949
	 * encoding is different :(
6891
	 * encoding is different :(
6950
	 * FIXME is this the right way to detect 852GM/852GMV?
6892
	 * FIXME is this the right way to detect 852GM/852GMV?
6951
	 */
6893
	 */
6952
	if (dev->pdev->revision == 0x1)
6894
	if (dev->pdev->revision == 0x1)
6953
		return 133333;
6895
		return 133333;
6954
 
6896
 
6955
//   pci_bus_read_config_word(dev->pdev->bus,
6897
//   pci_bus_read_config_word(dev->pdev->bus,
6956
//                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6898
//                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6957
 
6899
 
6958
	/* Assume that the hardware is in the high speed state.  This
6900
	/* Assume that the hardware is in the high speed state.  This
6959
	 * should be the default.
6901
	 * should be the default.
6960
	 */
6902
	 */
6961
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6903
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6962
	case GC_CLOCK_133_200:
6904
	case GC_CLOCK_133_200:
6963
	case GC_CLOCK_133_200_2:
6905
	case GC_CLOCK_133_200_2:
6964
	case GC_CLOCK_100_200:
6906
	case GC_CLOCK_100_200:
6965
		return 200000;
6907
		return 200000;
6966
	case GC_CLOCK_166_250:
6908
	case GC_CLOCK_166_250:
6967
		return 250000;
6909
		return 250000;
6968
	case GC_CLOCK_100_133:
6910
	case GC_CLOCK_100_133:
6969
		return 133333;
6911
		return 133333;
6970
	case GC_CLOCK_133_266:
6912
	case GC_CLOCK_133_266:
6971
	case GC_CLOCK_133_266_2:
6913
	case GC_CLOCK_133_266_2:
6972
	case GC_CLOCK_166_266:
6914
	case GC_CLOCK_166_266:
6973
		return 266667;
6915
		return 266667;
6974
	}
6916
	}
6975
 
6917
 
6976
	/* Shouldn't happen */
6918
	/* Shouldn't happen */
6977
	return 0;
6919
	return 0;
6978
}
6920
}
6979
 
6921
 
6980
static int i830_get_display_clock_speed(struct drm_device *dev)
6922
static int i830_get_display_clock_speed(struct drm_device *dev)
6981
{
6923
{
6982
	return 133333;
6924
	return 133333;
6983
}
6925
}
6984
 
6926
 
6985
static unsigned int intel_hpll_vco(struct drm_device *dev)
6927
static unsigned int intel_hpll_vco(struct drm_device *dev)
6986
{
6928
{
6987
	struct drm_i915_private *dev_priv = dev->dev_private;
6929
	struct drm_i915_private *dev_priv = dev->dev_private;
6988
	static const unsigned int blb_vco[8] = {
6930
	static const unsigned int blb_vco[8] = {
6989
		[0] = 3200000,
6931
		[0] = 3200000,
6990
		[1] = 4000000,
6932
		[1] = 4000000,
6991
		[2] = 5333333,
6933
		[2] = 5333333,
6992
		[3] = 4800000,
6934
		[3] = 4800000,
6993
		[4] = 6400000,
6935
		[4] = 6400000,
6994
	};
6936
	};
6995
	static const unsigned int pnv_vco[8] = {
6937
	static const unsigned int pnv_vco[8] = {
6996
		[0] = 3200000,
6938
		[0] = 3200000,
6997
		[1] = 4000000,
6939
		[1] = 4000000,
6998
		[2] = 5333333,
6940
		[2] = 5333333,
6999
		[3] = 4800000,
6941
		[3] = 4800000,
7000
		[4] = 2666667,
6942
		[4] = 2666667,
7001
	};
6943
	};
7002
	static const unsigned int cl_vco[8] = {
6944
	static const unsigned int cl_vco[8] = {
7003
		[0] = 3200000,
6945
		[0] = 3200000,
7004
		[1] = 4000000,
6946
		[1] = 4000000,
7005
		[2] = 5333333,
6947
		[2] = 5333333,
7006
		[3] = 6400000,
6948
		[3] = 6400000,
7007
		[4] = 3333333,
6949
		[4] = 3333333,
7008
		[5] = 3566667,
6950
		[5] = 3566667,
7009
		[6] = 4266667,
6951
		[6] = 4266667,
7010
	};
6952
	};
7011
	static const unsigned int elk_vco[8] = {
6953
	static const unsigned int elk_vco[8] = {
7012
		[0] = 3200000,
6954
		[0] = 3200000,
7013
		[1] = 4000000,
6955
		[1] = 4000000,
7014
		[2] = 5333333,
6956
		[2] = 5333333,
7015
		[3] = 4800000,
6957
		[3] = 4800000,
7016
	};
6958
	};
7017
	static const unsigned int ctg_vco[8] = {
6959
	static const unsigned int ctg_vco[8] = {
7018
		[0] = 3200000,
6960
		[0] = 3200000,
7019
		[1] = 4000000,
6961
		[1] = 4000000,
7020
		[2] = 5333333,
6962
		[2] = 5333333,
7021
		[3] = 6400000,
6963
		[3] = 6400000,
7022
		[4] = 2666667,
6964
		[4] = 2666667,
7023
		[5] = 4266667,
6965
		[5] = 4266667,
7024
	};
6966
	};
7025
	const unsigned int *vco_table;
6967
	const unsigned int *vco_table;
7026
	unsigned int vco;
6968
	unsigned int vco;
7027
	uint8_t tmp = 0;
6969
	uint8_t tmp = 0;
7028
 
6970
 
7029
	/* FIXME other chipsets? */
6971
	/* FIXME other chipsets? */
7030
	if (IS_GM45(dev))
6972
	if (IS_GM45(dev))
7031
		vco_table = ctg_vco;
6973
		vco_table = ctg_vco;
7032
	else if (IS_G4X(dev))
6974
	else if (IS_G4X(dev))
7033
		vco_table = elk_vco;
6975
		vco_table = elk_vco;
7034
	else if (IS_CRESTLINE(dev))
6976
	else if (IS_CRESTLINE(dev))
7035
		vco_table = cl_vco;
6977
		vco_table = cl_vco;
7036
	else if (IS_PINEVIEW(dev))
6978
	else if (IS_PINEVIEW(dev))
7037
		vco_table = pnv_vco;
6979
		vco_table = pnv_vco;
7038
	else if (IS_G33(dev))
6980
	else if (IS_G33(dev))
7039
		vco_table = blb_vco;
6981
		vco_table = blb_vco;
7040
	else
6982
	else
7041
		return 0;
6983
		return 0;
7042
 
6984
 
7043
	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6985
	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
7044
 
6986
 
7045
	vco = vco_table[tmp & 0x7];
6987
	vco = vco_table[tmp & 0x7];
7046
	if (vco == 0)
6988
	if (vco == 0)
7047
		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6989
		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7048
	else
6990
	else
7049
		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6991
		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7050
 
6992
 
7051
	return vco;
6993
	return vco;
7052
}
6994
}
7053
 
6995
 
7054
static int gm45_get_display_clock_speed(struct drm_device *dev)
6996
static int gm45_get_display_clock_speed(struct drm_device *dev)
7055
{
6997
{
7056
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6998
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7057
	uint16_t tmp = 0;
6999
	uint16_t tmp = 0;
7058
 
7000
 
7059
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7001
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7060
 
7002
 
7061
	cdclk_sel = (tmp >> 12) & 0x1;
7003
	cdclk_sel = (tmp >> 12) & 0x1;
7062
 
7004
 
7063
	switch (vco) {
7005
	switch (vco) {
7064
	case 2666667:
7006
	case 2666667:
7065
	case 4000000:
7007
	case 4000000:
7066
	case 5333333:
7008
	case 5333333:
7067
		return cdclk_sel ? 333333 : 222222;
7009
		return cdclk_sel ? 333333 : 222222;
7068
	case 3200000:
7010
	case 3200000:
7069
		return cdclk_sel ? 320000 : 228571;
7011
		return cdclk_sel ? 320000 : 228571;
7070
	default:
7012
	default:
7071
		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7013
		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7072
		return 222222;
7014
		return 222222;
7073
	}
7015
	}
7074
}
7016
}
7075
 
7017
 
7076
static int i965gm_get_display_clock_speed(struct drm_device *dev)
7018
static int i965gm_get_display_clock_speed(struct drm_device *dev)
7077
{
7019
{
7078
	static const uint8_t div_3200[] = { 16, 10,  8 };
7020
	static const uint8_t div_3200[] = { 16, 10,  8 };
7079
	static const uint8_t div_4000[] = { 20, 12, 10 };
7021
	static const uint8_t div_4000[] = { 20, 12, 10 };
7080
	static const uint8_t div_5333[] = { 24, 16, 14 };
7022
	static const uint8_t div_5333[] = { 24, 16, 14 };
7081
	const uint8_t *div_table;
7023
	const uint8_t *div_table;
7082
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7024
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7083
	uint16_t tmp = 0;
7025
	uint16_t tmp = 0;
7084
 
7026
 
7085
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7027
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7086
 
7028
 
7087
	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7029
	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7088
 
7030
 
7089
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7031
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7090
		goto fail;
7032
		goto fail;
7091
 
7033
 
7092
	switch (vco) {
7034
	switch (vco) {
7093
	case 3200000:
7035
	case 3200000:
7094
		div_table = div_3200;
7036
		div_table = div_3200;
7095
		break;
7037
		break;
7096
	case 4000000:
7038
	case 4000000:
7097
		div_table = div_4000;
7039
		div_table = div_4000;
7098
		break;
7040
		break;
7099
	case 5333333:
7041
	case 5333333:
7100
		div_table = div_5333;
7042
		div_table = div_5333;
7101
		break;
7043
		break;
7102
	default:
7044
	default:
7103
		goto fail;
7045
		goto fail;
7104
	}
7046
	}
7105
 
7047
 
7106
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7048
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7107
 
7049
 
7108
fail:
7050
fail:
7109
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7051
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7110
	return 200000;
7052
	return 200000;
7111
}
7053
}
7112
 
7054
 
7113
static int g33_get_display_clock_speed(struct drm_device *dev)
7055
static int g33_get_display_clock_speed(struct drm_device *dev)
7114
{
7056
{
7115
	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7057
	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7116
	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7058
	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7117
	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7059
	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7118
	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7060
	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7119
	const uint8_t *div_table;
7061
	const uint8_t *div_table;
7120
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7062
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7121
	uint16_t tmp = 0;
7063
	uint16_t tmp = 0;
7122
 
7064
 
7123
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7065
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7124
 
7066
 
7125
	cdclk_sel = (tmp >> 4) & 0x7;
7067
	cdclk_sel = (tmp >> 4) & 0x7;
7126
 
7068
 
7127
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7069
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7128
		goto fail;
7070
		goto fail;
7129
 
7071
 
7130
	switch (vco) {
7072
	switch (vco) {
7131
	case 3200000:
7073
	case 3200000:
7132
		div_table = div_3200;
7074
		div_table = div_3200;
7133
		break;
7075
		break;
7134
	case 4000000:
7076
	case 4000000:
7135
		div_table = div_4000;
7077
		div_table = div_4000;
7136
		break;
7078
		break;
7137
	case 4800000:
7079
	case 4800000:
7138
		div_table = div_4800;
7080
		div_table = div_4800;
7139
		break;
7081
		break;
7140
	case 5333333:
7082
	case 5333333:
7141
		div_table = div_5333;
7083
		div_table = div_5333;
7142
		break;
7084
		break;
7143
	default:
7085
	default:
7144
		goto fail;
7086
		goto fail;
7145
	}
7087
	}
7146
 
7088
 
7147
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7089
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7148
 
7090
 
7149
fail:
7091
fail:
7150
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7092
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7151
	return 190476;
7093
	return 190476;
7152
}
7094
}
7153
 
7095
 
7154
static void
7096
static void
7155
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7097
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7156
{
7098
{
7157
	while (*num > DATA_LINK_M_N_MASK ||
7099
	while (*num > DATA_LINK_M_N_MASK ||
7158
	       *den > DATA_LINK_M_N_MASK) {
7100
	       *den > DATA_LINK_M_N_MASK) {
7159
		*num >>= 1;
7101
		*num >>= 1;
7160
		*den >>= 1;
7102
		*den >>= 1;
7161
	}
7103
	}
7162
}
7104
}
7163
 
7105
 
7164
static void compute_m_n(unsigned int m, unsigned int n,
7106
static void compute_m_n(unsigned int m, unsigned int n,
7165
			uint32_t *ret_m, uint32_t *ret_n)
7107
			uint32_t *ret_m, uint32_t *ret_n)
7166
{
7108
{
7167
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7109
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7168
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7110
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7169
	intel_reduce_m_n_ratio(ret_m, ret_n);
7111
	intel_reduce_m_n_ratio(ret_m, ret_n);
7170
}
7112
}
7171
 
7113
 
7172
void
7114
void
7173
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7115
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7174
		       int pixel_clock, int link_clock,
7116
		       int pixel_clock, int link_clock,
7175
		       struct intel_link_m_n *m_n)
7117
		       struct intel_link_m_n *m_n)
7176
{
7118
{
7177
	m_n->tu = 64;
7119
	m_n->tu = 64;
7178
 
7120
 
7179
	compute_m_n(bits_per_pixel * pixel_clock,
7121
	compute_m_n(bits_per_pixel * pixel_clock,
7180
		    link_clock * nlanes * 8,
7122
		    link_clock * nlanes * 8,
7181
		    &m_n->gmch_m, &m_n->gmch_n);
7123
		    &m_n->gmch_m, &m_n->gmch_n);
7182
 
7124
 
7183
	compute_m_n(pixel_clock, link_clock,
7125
	compute_m_n(pixel_clock, link_clock,
7184
		    &m_n->link_m, &m_n->link_n);
7126
		    &m_n->link_m, &m_n->link_n);
7185
}
7127
}
7186
 
7128
 
7187
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7129
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7188
{
7130
{
7189
	if (i915.panel_use_ssc >= 0)
7131
	if (i915.panel_use_ssc >= 0)
7190
		return i915.panel_use_ssc != 0;
7132
		return i915.panel_use_ssc != 0;
7191
	return dev_priv->vbt.lvds_use_ssc
7133
	return dev_priv->vbt.lvds_use_ssc
7192
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7134
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7193
}
7135
}
7194
 
7136
 
7195
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7137
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7196
			   int num_connectors)
7138
			   int num_connectors)
7197
{
7139
{
7198
	struct drm_device *dev = crtc_state->base.crtc->dev;
7140
	struct drm_device *dev = crtc_state->base.crtc->dev;
7199
	struct drm_i915_private *dev_priv = dev->dev_private;
7141
	struct drm_i915_private *dev_priv = dev->dev_private;
7200
	int refclk;
7142
	int refclk;
7201
 
7143
 
7202
	WARN_ON(!crtc_state->base.state);
7144
	WARN_ON(!crtc_state->base.state);
7203
 
7145
 
7204
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
7146
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
7205
		refclk = 100000;
7147
		refclk = 100000;
7206
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7148
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7207
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7149
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7208
		refclk = dev_priv->vbt.lvds_ssc_freq;
7150
		refclk = dev_priv->vbt.lvds_ssc_freq;
7209
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7151
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7210
	} else if (!IS_GEN2(dev)) {
7152
	} else if (!IS_GEN2(dev)) {
7211
		refclk = 96000;
7153
		refclk = 96000;
7212
	} else {
7154
	} else {
7213
		refclk = 48000;
7155
		refclk = 48000;
7214
	}
7156
	}
7215
 
7157
 
7216
	return refclk;
7158
	return refclk;
7217
}
7159
}
7218
 
7160
 
7219
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7161
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7220
{
7162
{
7221
	return (1 << dpll->n) << 16 | dpll->m2;
7163
	return (1 << dpll->n) << 16 | dpll->m2;
7222
}
7164
}
7223
 
7165
 
7224
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7166
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7225
{
7167
{
7226
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7168
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7227
}
7169
}
7228
 
7170
 
7229
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7171
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7230
				     struct intel_crtc_state *crtc_state,
7172
				     struct intel_crtc_state *crtc_state,
7231
				     intel_clock_t *reduced_clock)
7173
				     intel_clock_t *reduced_clock)
7232
{
7174
{
7233
	struct drm_device *dev = crtc->base.dev;
7175
	struct drm_device *dev = crtc->base.dev;
7234
	u32 fp, fp2 = 0;
7176
	u32 fp, fp2 = 0;
7235
 
7177
 
7236
	if (IS_PINEVIEW(dev)) {
7178
	if (IS_PINEVIEW(dev)) {
7237
		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7179
		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7238
		if (reduced_clock)
7180
		if (reduced_clock)
7239
			fp2 = pnv_dpll_compute_fp(reduced_clock);
7181
			fp2 = pnv_dpll_compute_fp(reduced_clock);
7240
	} else {
7182
	} else {
7241
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7183
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7242
		if (reduced_clock)
7184
		if (reduced_clock)
7243
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7185
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7244
	}
7186
	}
7245
 
7187
 
7246
	crtc_state->dpll_hw_state.fp0 = fp;
7188
	crtc_state->dpll_hw_state.fp0 = fp;
7247
 
7189
 
7248
	crtc->lowfreq_avail = false;
7190
	crtc->lowfreq_avail = false;
7249
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7191
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7250
	    reduced_clock) {
7192
	    reduced_clock) {
7251
		crtc_state->dpll_hw_state.fp1 = fp2;
7193
		crtc_state->dpll_hw_state.fp1 = fp2;
7252
		crtc->lowfreq_avail = true;
7194
		crtc->lowfreq_avail = true;
7253
	} else {
7195
	} else {
7254
		crtc_state->dpll_hw_state.fp1 = fp;
7196
		crtc_state->dpll_hw_state.fp1 = fp;
7255
	}
7197
	}
7256
}
7198
}
7257
 
7199
 
7258
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7200
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7259
		pipe)
7201
		pipe)
7260
{
7202
{
7261
	u32 reg_val;
7203
	u32 reg_val;
7262
 
7204
 
7263
	/*
7205
	/*
7264
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7206
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7265
	 * and set it to a reasonable value instead.
7207
	 * and set it to a reasonable value instead.
7266
	 */
7208
	 */
7267
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7209
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7268
	reg_val &= 0xffffff00;
7210
	reg_val &= 0xffffff00;
7269
	reg_val |= 0x00000030;
7211
	reg_val |= 0x00000030;
7270
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7212
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7271
 
7213
 
7272
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7214
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7273
	reg_val &= 0x8cffffff;
7215
	reg_val &= 0x8cffffff;
7274
	reg_val = 0x8c000000;
7216
	reg_val = 0x8c000000;
7275
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7217
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7276
 
7218
 
7277
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7219
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7278
	reg_val &= 0xffffff00;
7220
	reg_val &= 0xffffff00;
7279
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7221
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7280
 
7222
 
7281
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7223
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7282
	reg_val &= 0x00ffffff;
7224
	reg_val &= 0x00ffffff;
7283
	reg_val |= 0xb0000000;
7225
	reg_val |= 0xb0000000;
7284
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7226
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7285
}
7227
}
7286
 
7228
 
7287
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7229
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7288
					 struct intel_link_m_n *m_n)
7230
					 struct intel_link_m_n *m_n)
7289
{
7231
{
7290
	struct drm_device *dev = crtc->base.dev;
7232
	struct drm_device *dev = crtc->base.dev;
7291
	struct drm_i915_private *dev_priv = dev->dev_private;
7233
	struct drm_i915_private *dev_priv = dev->dev_private;
7292
	int pipe = crtc->pipe;
7234
	int pipe = crtc->pipe;
7293
 
7235
 
7294
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7236
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7295
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7237
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7296
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7238
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7297
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7239
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7298
}
7240
}
7299
 
7241
 
7300
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7242
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7301
					 struct intel_link_m_n *m_n,
7243
					 struct intel_link_m_n *m_n,
7302
					 struct intel_link_m_n *m2_n2)
7244
					 struct intel_link_m_n *m2_n2)
7303
{
7245
{
7304
	struct drm_device *dev = crtc->base.dev;
7246
	struct drm_device *dev = crtc->base.dev;
7305
	struct drm_i915_private *dev_priv = dev->dev_private;
7247
	struct drm_i915_private *dev_priv = dev->dev_private;
7306
	int pipe = crtc->pipe;
7248
	int pipe = crtc->pipe;
7307
	enum transcoder transcoder = crtc->config->cpu_transcoder;
7249
	enum transcoder transcoder = crtc->config->cpu_transcoder;
7308
 
7250
 
7309
	if (INTEL_INFO(dev)->gen >= 5) {
7251
	if (INTEL_INFO(dev)->gen >= 5) {
7310
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7252
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7311
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7253
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7312
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7254
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7313
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7255
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7314
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7256
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7315
		 * for gen < 8) and if DRRS is supported (to make sure the
7257
		 * for gen < 8) and if DRRS is supported (to make sure the
7316
		 * registers are not unnecessarily accessed).
7258
		 * registers are not unnecessarily accessed).
7317
		 */
7259
		 */
7318
		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7260
		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7319
			crtc->config->has_drrs) {
7261
			crtc->config->has_drrs) {
7320
			I915_WRITE(PIPE_DATA_M2(transcoder),
7262
			I915_WRITE(PIPE_DATA_M2(transcoder),
7321
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7263
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7322
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7264
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7323
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7265
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7324
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7266
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7325
		}
7267
		}
7326
	} else {
7268
	} else {
7327
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7269
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7328
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7270
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7329
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7271
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7330
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7272
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7331
	}
7273
	}
7332
}
7274
}
7333
 
7275
 
7334
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7276
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7335
{
7277
{
7336
	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7278
	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7337
 
7279
 
7338
	if (m_n == M1_N1) {
7280
	if (m_n == M1_N1) {
7339
		dp_m_n = &crtc->config->dp_m_n;
7281
		dp_m_n = &crtc->config->dp_m_n;
7340
		dp_m2_n2 = &crtc->config->dp_m2_n2;
7282
		dp_m2_n2 = &crtc->config->dp_m2_n2;
7341
	} else if (m_n == M2_N2) {
7283
	} else if (m_n == M2_N2) {
7342
 
7284
 
7343
		/*
7285
		/*
7344
		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7286
		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7345
		 * needs to be programmed into M1_N1.
7287
		 * needs to be programmed into M1_N1.
7346
		 */
7288
		 */
7347
		dp_m_n = &crtc->config->dp_m2_n2;
7289
		dp_m_n = &crtc->config->dp_m2_n2;
7348
	} else {
7290
	} else {
7349
		DRM_ERROR("Unsupported divider value\n");
7291
		DRM_ERROR("Unsupported divider value\n");
7350
		return;
7292
		return;
7351
	}
7293
	}
7352
 
7294
 
7353
	if (crtc->config->has_pch_encoder)
7295
	if (crtc->config->has_pch_encoder)
7354
		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7296
		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7355
	else
7297
	else
7356
		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7298
		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7357
}
7299
}
7358
 
7300
 
7359
static void vlv_compute_dpll(struct intel_crtc *crtc,
7301
static void vlv_compute_dpll(struct intel_crtc *crtc,
7360
			     struct intel_crtc_state *pipe_config)
7302
			     struct intel_crtc_state *pipe_config)
7361
{
7303
{
7362
	u32 dpll, dpll_md;
7304
	u32 dpll, dpll_md;
7363
 
7305
 
7364
	/*
7306
	/*
7365
	 * Enable DPIO clock input. We should never disable the reference
7307
	 * Enable DPIO clock input. We should never disable the reference
7366
	 * clock for pipe B, since VGA hotplug / manual detection depends
7308
	 * clock for pipe B, since VGA hotplug / manual detection depends
7367
	 * on it.
7309
	 * on it.
7368
	 */
7310
	 */
7369
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7311
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7370
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
7312
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
7371
	/* We should never disable this, set it here for state tracking */
7313
	/* We should never disable this, set it here for state tracking */
7372
	if (crtc->pipe == PIPE_B)
7314
	if (crtc->pipe == PIPE_B)
7373
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7315
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7374
	dpll |= DPLL_VCO_ENABLE;
7316
	dpll |= DPLL_VCO_ENABLE;
7375
	pipe_config->dpll_hw_state.dpll = dpll;
7317
	pipe_config->dpll_hw_state.dpll = dpll;
7376
 
7318
 
7377
	dpll_md = (pipe_config->pixel_multiplier - 1)
7319
	dpll_md = (pipe_config->pixel_multiplier - 1)
7378
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7320
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7379
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
7321
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
7380
}
7322
}
7381
 
7323
 
7382
static void vlv_prepare_pll(struct intel_crtc *crtc,
7324
static void vlv_prepare_pll(struct intel_crtc *crtc,
7383
			    const struct intel_crtc_state *pipe_config)
7325
			    const struct intel_crtc_state *pipe_config)
7384
{
7326
{
7385
	struct drm_device *dev = crtc->base.dev;
7327
	struct drm_device *dev = crtc->base.dev;
7386
	struct drm_i915_private *dev_priv = dev->dev_private;
7328
	struct drm_i915_private *dev_priv = dev->dev_private;
7387
	int pipe = crtc->pipe;
7329
	int pipe = crtc->pipe;
7388
	u32 mdiv;
7330
	u32 mdiv;
7389
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7331
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7390
	u32 coreclk, reg_val;
7332
	u32 coreclk, reg_val;
7391
 
7333
 
7392
	mutex_lock(&dev_priv->sb_lock);
7334
	mutex_lock(&dev_priv->sb_lock);
7393
 
7335
 
7394
	bestn = pipe_config->dpll.n;
7336
	bestn = pipe_config->dpll.n;
7395
	bestm1 = pipe_config->dpll.m1;
7337
	bestm1 = pipe_config->dpll.m1;
7396
	bestm2 = pipe_config->dpll.m2;
7338
	bestm2 = pipe_config->dpll.m2;
7397
	bestp1 = pipe_config->dpll.p1;
7339
	bestp1 = pipe_config->dpll.p1;
7398
	bestp2 = pipe_config->dpll.p2;
7340
	bestp2 = pipe_config->dpll.p2;
7399
 
7341
 
7400
	/* See eDP HDMI DPIO driver vbios notes doc */
7342
	/* See eDP HDMI DPIO driver vbios notes doc */
7401
 
7343
 
7402
	/* PLL B needs special handling */
7344
	/* PLL B needs special handling */
7403
	if (pipe == PIPE_B)
7345
	if (pipe == PIPE_B)
7404
		vlv_pllb_recal_opamp(dev_priv, pipe);
7346
		vlv_pllb_recal_opamp(dev_priv, pipe);
7405
 
7347
 
7406
	/* Set up Tx target for periodic Rcomp update */
7348
	/* Set up Tx target for periodic Rcomp update */
7407
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7349
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7408
 
7350
 
7409
	/* Disable target IRef on PLL */
7351
	/* Disable target IRef on PLL */
7410
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7352
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7411
	reg_val &= 0x00ffffff;
7353
	reg_val &= 0x00ffffff;
7412
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7354
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7413
 
7355
 
7414
	/* Disable fast lock */
7356
	/* Disable fast lock */
7415
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7357
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7416
 
7358
 
7417
	/* Set idtafcrecal before PLL is enabled */
7359
	/* Set idtafcrecal before PLL is enabled */
7418
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7360
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7419
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7361
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7420
	mdiv |= ((bestn << DPIO_N_SHIFT));
7362
	mdiv |= ((bestn << DPIO_N_SHIFT));
7421
	mdiv |= (1 << DPIO_K_SHIFT);
7363
	mdiv |= (1 << DPIO_K_SHIFT);
7422
 
7364
 
7423
	/*
7365
	/*
7424
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7366
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7425
	 * but we don't support that).
7367
	 * but we don't support that).
7426
	 * Note: don't use the DAC post divider as it seems unstable.
7368
	 * Note: don't use the DAC post divider as it seems unstable.
7427
	 */
7369
	 */
7428
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7370
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7429
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7371
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7430
 
7372
 
7431
	mdiv |= DPIO_ENABLE_CALIBRATION;
7373
	mdiv |= DPIO_ENABLE_CALIBRATION;
7432
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7374
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7433
 
7375
 
7434
	/* Set HBR and RBR LPF coefficients */
7376
	/* Set HBR and RBR LPF coefficients */
7435
	if (pipe_config->port_clock == 162000 ||
7377
	if (pipe_config->port_clock == 162000 ||
7436
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7378
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7437
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7379
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7438
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7380
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7439
				 0x009f0003);
7381
				 0x009f0003);
7440
	else
7382
	else
7441
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7383
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7442
				 0x00d0000f);
7384
				 0x00d0000f);
7443
 
7385
 
7444
	if (pipe_config->has_dp_encoder) {
7386
	if (pipe_config->has_dp_encoder) {
7445
		/* Use SSC source */
7387
		/* Use SSC source */
7446
		if (pipe == PIPE_A)
7388
		if (pipe == PIPE_A)
7447
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7389
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7448
					 0x0df40000);
7390
					 0x0df40000);
7449
		else
7391
		else
7450
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7392
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7451
					 0x0df70000);
7393
					 0x0df70000);
7452
	} else { /* HDMI or VGA */
7394
	} else { /* HDMI or VGA */
7453
		/* Use bend source */
7395
		/* Use bend source */
7454
		if (pipe == PIPE_A)
7396
		if (pipe == PIPE_A)
7455
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7397
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7456
					 0x0df70000);
7398
					 0x0df70000);
7457
		else
7399
		else
7458
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7400
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7459
					 0x0df40000);
7401
					 0x0df40000);
7460
	}
7402
	}
7461
 
7403
 
7462
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7404
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7463
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7405
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7464
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7406
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7465
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7407
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7466
		coreclk |= 0x01000000;
7408
		coreclk |= 0x01000000;
7467
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7409
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7468
 
7410
 
7469
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7411
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7470
	mutex_unlock(&dev_priv->sb_lock);
7412
	mutex_unlock(&dev_priv->sb_lock);
7471
}
7413
}
7472
 
7414
 
7473
static void chv_compute_dpll(struct intel_crtc *crtc,
7415
static void chv_compute_dpll(struct intel_crtc *crtc,
7474
			     struct intel_crtc_state *pipe_config)
7416
			     struct intel_crtc_state *pipe_config)
7475
{
7417
{
7476
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7418
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7477
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
7419
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
7478
		DPLL_VCO_ENABLE;
7420
		DPLL_VCO_ENABLE;
7479
	if (crtc->pipe != PIPE_A)
7421
	if (crtc->pipe != PIPE_A)
7480
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7422
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7481
 
7423
 
7482
	pipe_config->dpll_hw_state.dpll_md =
7424
	pipe_config->dpll_hw_state.dpll_md =
7483
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7425
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7484
}
7426
}
7485
 
7427
 
7486
static void chv_prepare_pll(struct intel_crtc *crtc,
7428
static void chv_prepare_pll(struct intel_crtc *crtc,
7487
			    const struct intel_crtc_state *pipe_config)
7429
			    const struct intel_crtc_state *pipe_config)
7488
{
7430
{
7489
	struct drm_device *dev = crtc->base.dev;
7431
	struct drm_device *dev = crtc->base.dev;
7490
	struct drm_i915_private *dev_priv = dev->dev_private;
7432
	struct drm_i915_private *dev_priv = dev->dev_private;
7491
	int pipe = crtc->pipe;
7433
	int pipe = crtc->pipe;
7492
	i915_reg_t dpll_reg = DPLL(crtc->pipe);
7434
	i915_reg_t dpll_reg = DPLL(crtc->pipe);
7493
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7435
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7494
	u32 loopfilter, tribuf_calcntr;
7436
	u32 loopfilter, tribuf_calcntr;
7495
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7437
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7496
	u32 dpio_val;
7438
	u32 dpio_val;
7497
	int vco;
7439
	int vco;
7498
 
7440
 
7499
	bestn = pipe_config->dpll.n;
7441
	bestn = pipe_config->dpll.n;
7500
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7442
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7501
	bestm1 = pipe_config->dpll.m1;
7443
	bestm1 = pipe_config->dpll.m1;
7502
	bestm2 = pipe_config->dpll.m2 >> 22;
7444
	bestm2 = pipe_config->dpll.m2 >> 22;
7503
	bestp1 = pipe_config->dpll.p1;
7445
	bestp1 = pipe_config->dpll.p1;
7504
	bestp2 = pipe_config->dpll.p2;
7446
	bestp2 = pipe_config->dpll.p2;
7505
	vco = pipe_config->dpll.vco;
7447
	vco = pipe_config->dpll.vco;
7506
	dpio_val = 0;
7448
	dpio_val = 0;
7507
	loopfilter = 0;
7449
	loopfilter = 0;
7508
 
7450
 
7509
	/*
7451
	/*
7510
	 * Enable Refclk and SSC
7452
	 * Enable Refclk and SSC
7511
	 */
7453
	 */
7512
	I915_WRITE(dpll_reg,
7454
	I915_WRITE(dpll_reg,
7513
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7455
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7514
 
7456
 
7515
	mutex_lock(&dev_priv->sb_lock);
7457
	mutex_lock(&dev_priv->sb_lock);
7516
 
7458
 
7517
	/* p1 and p2 divider */
7459
	/* p1 and p2 divider */
7518
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7460
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7519
			5 << DPIO_CHV_S1_DIV_SHIFT |
7461
			5 << DPIO_CHV_S1_DIV_SHIFT |
7520
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7462
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7521
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7463
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7522
			1 << DPIO_CHV_K_DIV_SHIFT);
7464
			1 << DPIO_CHV_K_DIV_SHIFT);
7523
 
7465
 
7524
	/* Feedback post-divider - m2 */
7466
	/* Feedback post-divider - m2 */
7525
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7467
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7526
 
7468
 
7527
	/* Feedback refclk divider - n and m1 */
7469
	/* Feedback refclk divider - n and m1 */
7528
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7470
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7529
			DPIO_CHV_M1_DIV_BY_2 |
7471
			DPIO_CHV_M1_DIV_BY_2 |
7530
			1 << DPIO_CHV_N_DIV_SHIFT);
7472
			1 << DPIO_CHV_N_DIV_SHIFT);
7531
 
7473
 
7532
	/* M2 fraction division */
7474
	/* M2 fraction division */
7533
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7475
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7534
 
7476
 
7535
	/* M2 fraction division enable */
7477
	/* M2 fraction division enable */
7536
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7478
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7537
	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7479
	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7538
	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7480
	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7539
	if (bestm2_frac)
7481
	if (bestm2_frac)
7540
		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7482
		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7541
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7483
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7542
 
7484
 
7543
	/* Program digital lock detect threshold */
7485
	/* Program digital lock detect threshold */
7544
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7486
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7545
	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7487
	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7546
					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7488
					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7547
	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7489
	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7548
	if (!bestm2_frac)
7490
	if (!bestm2_frac)
7549
		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7491
		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7550
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7492
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7551
 
7493
 
7552
	/* Loop filter */
7494
	/* Loop filter */
7553
	if (vco == 5400000) {
7495
	if (vco == 5400000) {
7554
		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7496
		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7555
		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7497
		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7556
		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7498
		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7557
		tribuf_calcntr = 0x9;
7499
		tribuf_calcntr = 0x9;
7558
	} else if (vco <= 6200000) {
7500
	} else if (vco <= 6200000) {
7559
		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7501
		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7560
		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7502
		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7561
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7503
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7562
		tribuf_calcntr = 0x9;
7504
		tribuf_calcntr = 0x9;
7563
	} else if (vco <= 6480000) {
7505
	} else if (vco <= 6480000) {
7564
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7506
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7565
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7507
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7566
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7508
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7567
		tribuf_calcntr = 0x8;
7509
		tribuf_calcntr = 0x8;
7568
	} else {
7510
	} else {
7569
		/* Not supported. Apply the same limits as in the max case */
7511
		/* Not supported. Apply the same limits as in the max case */
7570
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7512
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7571
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7513
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7572
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7514
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7573
		tribuf_calcntr = 0;
7515
		tribuf_calcntr = 0;
7574
	}
7516
	}
7575
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7517
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7576
 
7518
 
7577
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7519
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7578
	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7520
	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7579
	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7521
	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7580
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7522
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7581
 
7523
 
7582
	/* AFC Recal */
7524
	/* AFC Recal */
7583
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7525
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7584
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7526
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7585
			DPIO_AFC_RECAL);
7527
			DPIO_AFC_RECAL);
7586
 
7528
 
7587
	mutex_unlock(&dev_priv->sb_lock);
7529
	mutex_unlock(&dev_priv->sb_lock);
7588
}
7530
}
7589
 
7531
 
7590
/**
7532
/**
7591
 * vlv_force_pll_on - forcibly enable just the PLL
7533
 * vlv_force_pll_on - forcibly enable just the PLL
7592
 * @dev_priv: i915 private structure
7534
 * @dev_priv: i915 private structure
7593
 * @pipe: pipe PLL to enable
7535
 * @pipe: pipe PLL to enable
7594
 * @dpll: PLL configuration
7536
 * @dpll: PLL configuration
7595
 *
7537
 *
7596
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7538
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7597
 * in cases where we need the PLL enabled even when @pipe is not going to
7539
 * in cases where we need the PLL enabled even when @pipe is not going to
7598
 * be enabled.
7540
 * be enabled.
7599
 */
7541
 */
7600
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7542
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7601
		      const struct dpll *dpll)
7543
		     const struct dpll *dpll)
7602
{
7544
{
7603
	struct intel_crtc *crtc =
7545
	struct intel_crtc *crtc =
7604
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7546
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7605
	struct intel_crtc_state pipe_config = {
7547
	struct intel_crtc_state *pipe_config;
-
 
7548
 
-
 
7549
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
-
 
7550
	if (!pipe_config)
-
 
7551
		return -ENOMEM;
-
 
7552
 
7606
		.base.crtc = &crtc->base,
7553
	pipe_config->base.crtc = &crtc->base;
7607
		.pixel_multiplier = 1,
7554
	pipe_config->pixel_multiplier = 1;
7608
		.dpll = *dpll,
7555
	pipe_config->dpll = *dpll;
7609
	};
-
 
7610
 
7556
 
7611
	if (IS_CHERRYVIEW(dev)) {
7557
	if (IS_CHERRYVIEW(dev)) {
7612
		chv_compute_dpll(crtc, &pipe_config);
7558
		chv_compute_dpll(crtc, pipe_config);
7613
		chv_prepare_pll(crtc, &pipe_config);
7559
		chv_prepare_pll(crtc, pipe_config);
7614
		chv_enable_pll(crtc, &pipe_config);
7560
		chv_enable_pll(crtc, pipe_config);
7615
	} else {
7561
	} else {
7616
		vlv_compute_dpll(crtc, &pipe_config);
7562
		vlv_compute_dpll(crtc, pipe_config);
7617
		vlv_prepare_pll(crtc, &pipe_config);
7563
		vlv_prepare_pll(crtc, pipe_config);
7618
		vlv_enable_pll(crtc, &pipe_config);
7564
		vlv_enable_pll(crtc, pipe_config);
-
 
7565
	}
-
 
7566
 
-
 
7567
	kfree(pipe_config);
-
 
7568
 
7619
	}
7569
	return 0;
7620
}
7570
}
7621
 
7571
 
7622
/**
7572
/**
7623
 * vlv_force_pll_off - forcibly disable just the PLL
7573
 * vlv_force_pll_off - forcibly disable just the PLL
7624
 * @dev_priv: i915 private structure
7574
 * @dev_priv: i915 private structure
7625
 * @pipe: pipe PLL to disable
7575
 * @pipe: pipe PLL to disable
7626
 *
7576
 *
7627
 * Disable the PLL for @pipe. To be used in cases where we need
7577
 * Disable the PLL for @pipe. To be used in cases where we need
7628
 * the PLL enabled even when @pipe is not going to be enabled.
7578
 * the PLL enabled even when @pipe is not going to be enabled.
7629
 */
7579
 */
7630
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7580
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7631
{
7581
{
7632
	if (IS_CHERRYVIEW(dev))
7582
	if (IS_CHERRYVIEW(dev))
7633
		chv_disable_pll(to_i915(dev), pipe);
7583
		chv_disable_pll(to_i915(dev), pipe);
7634
	else
7584
	else
7635
		vlv_disable_pll(to_i915(dev), pipe);
7585
		vlv_disable_pll(to_i915(dev), pipe);
7636
}
7586
}
7637
 
7587
 
7638
static void i9xx_compute_dpll(struct intel_crtc *crtc,
7588
static void i9xx_compute_dpll(struct intel_crtc *crtc,
7639
			      struct intel_crtc_state *crtc_state,
7589
			      struct intel_crtc_state *crtc_state,
7640
			      intel_clock_t *reduced_clock,
7590
			      intel_clock_t *reduced_clock,
7641
			      int num_connectors)
7591
			      int num_connectors)
7642
{
7592
{
7643
	struct drm_device *dev = crtc->base.dev;
7593
	struct drm_device *dev = crtc->base.dev;
7644
	struct drm_i915_private *dev_priv = dev->dev_private;
7594
	struct drm_i915_private *dev_priv = dev->dev_private;
7645
	u32 dpll;
7595
	u32 dpll;
7646
	bool is_sdvo;
7596
	bool is_sdvo;
7647
	struct dpll *clock = &crtc_state->dpll;
7597
	struct dpll *clock = &crtc_state->dpll;
7648
 
7598
 
7649
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7599
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7650
 
7600
 
7651
	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7601
	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7652
		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7602
		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7653
 
7603
 
7654
	dpll = DPLL_VGA_MODE_DIS;
7604
	dpll = DPLL_VGA_MODE_DIS;
7655
 
7605
 
7656
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7606
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7657
		dpll |= DPLLB_MODE_LVDS;
7607
		dpll |= DPLLB_MODE_LVDS;
7658
	else
7608
	else
7659
		dpll |= DPLLB_MODE_DAC_SERIAL;
7609
		dpll |= DPLLB_MODE_DAC_SERIAL;
7660
 
7610
 
7661
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7611
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7662
		dpll |= (crtc_state->pixel_multiplier - 1)
7612
		dpll |= (crtc_state->pixel_multiplier - 1)
7663
			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7613
			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7664
	}
7614
	}
7665
 
7615
 
7666
	if (is_sdvo)
7616
	if (is_sdvo)
7667
		dpll |= DPLL_SDVO_HIGH_SPEED;
7617
		dpll |= DPLL_SDVO_HIGH_SPEED;
7668
 
7618
 
7669
	if (crtc_state->has_dp_encoder)
7619
	if (crtc_state->has_dp_encoder)
7670
		dpll |= DPLL_SDVO_HIGH_SPEED;
7620
		dpll |= DPLL_SDVO_HIGH_SPEED;
7671
 
7621
 
7672
	/* compute bitmask from p1 value */
7622
	/* compute bitmask from p1 value */
7673
	if (IS_PINEVIEW(dev))
7623
	if (IS_PINEVIEW(dev))
7674
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7624
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7675
	else {
7625
	else {
7676
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7626
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7677
		if (IS_G4X(dev) && reduced_clock)
7627
		if (IS_G4X(dev) && reduced_clock)
7678
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7628
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7679
	}
7629
	}
7680
	switch (clock->p2) {
7630
	switch (clock->p2) {
7681
	case 5:
7631
	case 5:
7682
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7632
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7683
		break;
7633
		break;
7684
	case 7:
7634
	case 7:
7685
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7635
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7686
		break;
7636
		break;
7687
	case 10:
7637
	case 10:
7688
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7638
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7689
		break;
7639
		break;
7690
	case 14:
7640
	case 14:
7691
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7641
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7692
		break;
7642
		break;
7693
	}
7643
	}
7694
	if (INTEL_INFO(dev)->gen >= 4)
7644
	if (INTEL_INFO(dev)->gen >= 4)
7695
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7645
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7696
 
7646
 
7697
	if (crtc_state->sdvo_tv_clock)
7647
	if (crtc_state->sdvo_tv_clock)
7698
		dpll |= PLL_REF_INPUT_TVCLKINBC;
7648
		dpll |= PLL_REF_INPUT_TVCLKINBC;
7699
	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7649
	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7700
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7650
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7701
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7651
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7702
	else
7652
	else
7703
		dpll |= PLL_REF_INPUT_DREFCLK;
7653
		dpll |= PLL_REF_INPUT_DREFCLK;
7704
 
7654
 
7705
	dpll |= DPLL_VCO_ENABLE;
7655
	dpll |= DPLL_VCO_ENABLE;
7706
	crtc_state->dpll_hw_state.dpll = dpll;
7656
	crtc_state->dpll_hw_state.dpll = dpll;
7707
 
7657
 
7708
	if (INTEL_INFO(dev)->gen >= 4) {
7658
	if (INTEL_INFO(dev)->gen >= 4) {
7709
		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7659
		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7710
			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7660
			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7711
		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7661
		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7712
	}
7662
	}
7713
}
7663
}
7714
 
7664
 
7715
static void i8xx_compute_dpll(struct intel_crtc *crtc,
7665
static void i8xx_compute_dpll(struct intel_crtc *crtc,
7716
			      struct intel_crtc_state *crtc_state,
7666
			      struct intel_crtc_state *crtc_state,
7717
			      intel_clock_t *reduced_clock,
7667
			      intel_clock_t *reduced_clock,
7718
			      int num_connectors)
7668
			      int num_connectors)
7719
{
7669
{
7720
	struct drm_device *dev = crtc->base.dev;
7670
	struct drm_device *dev = crtc->base.dev;
7721
	struct drm_i915_private *dev_priv = dev->dev_private;
7671
	struct drm_i915_private *dev_priv = dev->dev_private;
7722
	u32 dpll;
7672
	u32 dpll;
7723
	struct dpll *clock = &crtc_state->dpll;
7673
	struct dpll *clock = &crtc_state->dpll;
7724
 
7674
 
7725
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7675
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7726
 
7676
 
7727
	dpll = DPLL_VGA_MODE_DIS;
7677
	dpll = DPLL_VGA_MODE_DIS;
7728
 
7678
 
7729
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7679
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7730
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7680
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7731
	} else {
7681
	} else {
7732
		if (clock->p1 == 2)
7682
		if (clock->p1 == 2)
7733
			dpll |= PLL_P1_DIVIDE_BY_TWO;
7683
			dpll |= PLL_P1_DIVIDE_BY_TWO;
7734
		else
7684
		else
7735
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7685
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7736
		if (clock->p2 == 4)
7686
		if (clock->p2 == 4)
7737
			dpll |= PLL_P2_DIVIDE_BY_4;
7687
			dpll |= PLL_P2_DIVIDE_BY_4;
7738
	}
7688
	}
7739
 
7689
 
7740
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7690
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7741
		dpll |= DPLL_DVO_2X_MODE;
7691
		dpll |= DPLL_DVO_2X_MODE;
7742
 
7692
 
7743
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7693
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7744
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7694
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7745
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7695
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7746
	else
7696
	else
7747
		dpll |= PLL_REF_INPUT_DREFCLK;
7697
		dpll |= PLL_REF_INPUT_DREFCLK;
7748
 
7698
 
7749
	dpll |= DPLL_VCO_ENABLE;
7699
	dpll |= DPLL_VCO_ENABLE;
7750
	crtc_state->dpll_hw_state.dpll = dpll;
7700
	crtc_state->dpll_hw_state.dpll = dpll;
7751
}
7701
}
7752
 
7702
 
7753
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7703
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7754
{
7704
{
7755
	struct drm_device *dev = intel_crtc->base.dev;
7705
	struct drm_device *dev = intel_crtc->base.dev;
7756
	struct drm_i915_private *dev_priv = dev->dev_private;
7706
	struct drm_i915_private *dev_priv = dev->dev_private;
7757
	enum pipe pipe = intel_crtc->pipe;
7707
	enum pipe pipe = intel_crtc->pipe;
7758
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7708
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7759
	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7709
	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7760
	uint32_t crtc_vtotal, crtc_vblank_end;
7710
	uint32_t crtc_vtotal, crtc_vblank_end;
7761
	int vsyncshift = 0;
7711
	int vsyncshift = 0;
7762
 
7712
 
7763
	/* We need to be careful not to changed the adjusted mode, for otherwise
7713
	/* We need to be careful not to changed the adjusted mode, for otherwise
7764
	 * the hw state checker will get angry at the mismatch. */
7714
	 * the hw state checker will get angry at the mismatch. */
7765
	crtc_vtotal = adjusted_mode->crtc_vtotal;
7715
	crtc_vtotal = adjusted_mode->crtc_vtotal;
7766
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7716
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7767
 
7717
 
7768
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7718
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7769
		/* the chip adds 2 halflines automatically */
7719
		/* the chip adds 2 halflines automatically */
7770
		crtc_vtotal -= 1;
7720
		crtc_vtotal -= 1;
7771
		crtc_vblank_end -= 1;
7721
		crtc_vblank_end -= 1;
7772
 
7722
 
7773
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7723
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7774
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7724
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7775
		else
7725
		else
7776
			vsyncshift = adjusted_mode->crtc_hsync_start -
7726
			vsyncshift = adjusted_mode->crtc_hsync_start -
7777
				adjusted_mode->crtc_htotal / 2;
7727
				adjusted_mode->crtc_htotal / 2;
7778
		if (vsyncshift < 0)
7728
		if (vsyncshift < 0)
7779
			vsyncshift += adjusted_mode->crtc_htotal;
7729
			vsyncshift += adjusted_mode->crtc_htotal;
7780
	}
7730
	}
7781
 
7731
 
7782
	if (INTEL_INFO(dev)->gen > 3)
7732
	if (INTEL_INFO(dev)->gen > 3)
7783
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7733
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7784
 
7734
 
7785
	I915_WRITE(HTOTAL(cpu_transcoder),
7735
	I915_WRITE(HTOTAL(cpu_transcoder),
7786
		   (adjusted_mode->crtc_hdisplay - 1) |
7736
		   (adjusted_mode->crtc_hdisplay - 1) |
7787
		   ((adjusted_mode->crtc_htotal - 1) << 16));
7737
		   ((adjusted_mode->crtc_htotal - 1) << 16));
7788
	I915_WRITE(HBLANK(cpu_transcoder),
7738
	I915_WRITE(HBLANK(cpu_transcoder),
7789
		   (adjusted_mode->crtc_hblank_start - 1) |
7739
		   (adjusted_mode->crtc_hblank_start - 1) |
7790
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7740
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7791
	I915_WRITE(HSYNC(cpu_transcoder),
7741
	I915_WRITE(HSYNC(cpu_transcoder),
7792
		   (adjusted_mode->crtc_hsync_start - 1) |
7742
		   (adjusted_mode->crtc_hsync_start - 1) |
7793
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7743
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7794
 
7744
 
7795
	I915_WRITE(VTOTAL(cpu_transcoder),
7745
	I915_WRITE(VTOTAL(cpu_transcoder),
7796
		   (adjusted_mode->crtc_vdisplay - 1) |
7746
		   (adjusted_mode->crtc_vdisplay - 1) |
7797
		   ((crtc_vtotal - 1) << 16));
7747
		   ((crtc_vtotal - 1) << 16));
7798
	I915_WRITE(VBLANK(cpu_transcoder),
7748
	I915_WRITE(VBLANK(cpu_transcoder),
7799
		   (adjusted_mode->crtc_vblank_start - 1) |
7749
		   (adjusted_mode->crtc_vblank_start - 1) |
7800
		   ((crtc_vblank_end - 1) << 16));
7750
		   ((crtc_vblank_end - 1) << 16));
7801
	I915_WRITE(VSYNC(cpu_transcoder),
7751
	I915_WRITE(VSYNC(cpu_transcoder),
7802
		   (adjusted_mode->crtc_vsync_start - 1) |
7752
		   (adjusted_mode->crtc_vsync_start - 1) |
7803
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7753
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7804
 
7754
 
7805
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7755
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7806
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7756
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7807
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7757
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7808
	 * bits. */
7758
	 * bits. */
7809
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7759
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7810
	    (pipe == PIPE_B || pipe == PIPE_C))
7760
	    (pipe == PIPE_B || pipe == PIPE_C))
7811
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7761
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7812
 
7762
 
7813
	/* pipesrc controls the size that is scaled from, which should
7763
	/* pipesrc controls the size that is scaled from, which should
7814
	 * always be the user's requested size.
7764
	 * always be the user's requested size.
7815
	 */
7765
	 */
7816
	I915_WRITE(PIPESRC(pipe),
7766
	I915_WRITE(PIPESRC(pipe),
7817
		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7767
		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7818
		   (intel_crtc->config->pipe_src_h - 1));
7768
		   (intel_crtc->config->pipe_src_h - 1));
7819
}
7769
}
7820
 
7770
 
7821
static void intel_get_pipe_timings(struct intel_crtc *crtc,
7771
static void intel_get_pipe_timings(struct intel_crtc *crtc,
7822
				   struct intel_crtc_state *pipe_config)
7772
				   struct intel_crtc_state *pipe_config)
7823
{
7773
{
7824
	struct drm_device *dev = crtc->base.dev;
7774
	struct drm_device *dev = crtc->base.dev;
7825
	struct drm_i915_private *dev_priv = dev->dev_private;
7775
	struct drm_i915_private *dev_priv = dev->dev_private;
7826
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7776
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7827
	uint32_t tmp;
7777
	uint32_t tmp;
7828
 
7778
 
7829
	tmp = I915_READ(HTOTAL(cpu_transcoder));
7779
	tmp = I915_READ(HTOTAL(cpu_transcoder));
7830
	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7780
	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7831
	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7781
	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7832
	tmp = I915_READ(HBLANK(cpu_transcoder));
7782
	tmp = I915_READ(HBLANK(cpu_transcoder));
7833
	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7783
	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7834
	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7784
	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7835
	tmp = I915_READ(HSYNC(cpu_transcoder));
7785
	tmp = I915_READ(HSYNC(cpu_transcoder));
7836
	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7786
	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7837
	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7787
	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7838
 
7788
 
7839
	tmp = I915_READ(VTOTAL(cpu_transcoder));
7789
	tmp = I915_READ(VTOTAL(cpu_transcoder));
7840
	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7790
	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7841
	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7791
	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7842
	tmp = I915_READ(VBLANK(cpu_transcoder));
7792
	tmp = I915_READ(VBLANK(cpu_transcoder));
7843
	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7793
	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7844
	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7794
	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7845
	tmp = I915_READ(VSYNC(cpu_transcoder));
7795
	tmp = I915_READ(VSYNC(cpu_transcoder));
7846
	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7796
	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7847
	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7797
	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7848
 
7798
 
7849
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7799
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7850
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7800
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7851
		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7801
		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7852
		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7802
		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7853
	}
7803
	}
7854
 
7804
 
7855
	tmp = I915_READ(PIPESRC(crtc->pipe));
7805
	tmp = I915_READ(PIPESRC(crtc->pipe));
7856
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7806
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7857
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7807
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7858
 
7808
 
7859
	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7809
	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7860
	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7810
	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7861
}
7811
}
7862
 
7812
 
7863
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7813
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7864
				 struct intel_crtc_state *pipe_config)
7814
				 struct intel_crtc_state *pipe_config)
7865
{
7815
{
7866
	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7816
	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7867
	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7817
	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7868
	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7818
	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7869
	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7819
	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7870
 
7820
 
7871
	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7821
	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7872
	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7822
	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7873
	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7823
	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7874
	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7824
	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7875
 
7825
 
7876
	mode->flags = pipe_config->base.adjusted_mode.flags;
7826
	mode->flags = pipe_config->base.adjusted_mode.flags;
7877
	mode->type = DRM_MODE_TYPE_DRIVER;
7827
	mode->type = DRM_MODE_TYPE_DRIVER;
7878
 
7828
 
7879
	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7829
	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7880
	mode->flags |= pipe_config->base.adjusted_mode.flags;
7830
	mode->flags |= pipe_config->base.adjusted_mode.flags;
7881
 
7831
 
7882
	mode->hsync = drm_mode_hsync(mode);
7832
	mode->hsync = drm_mode_hsync(mode);
7883
	mode->vrefresh = drm_mode_vrefresh(mode);
7833
	mode->vrefresh = drm_mode_vrefresh(mode);
7884
	drm_mode_set_name(mode);
7834
	drm_mode_set_name(mode);
7885
}
7835
}
7886
 
7836
 
7887
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7837
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7888
{
7838
{
7889
	struct drm_device *dev = intel_crtc->base.dev;
7839
	struct drm_device *dev = intel_crtc->base.dev;
7890
	struct drm_i915_private *dev_priv = dev->dev_private;
7840
	struct drm_i915_private *dev_priv = dev->dev_private;
7891
	uint32_t pipeconf;
7841
	uint32_t pipeconf;
7892
 
7842
 
7893
	pipeconf = 0;
7843
	pipeconf = 0;
7894
 
7844
 
7895
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7845
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7896
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7846
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7897
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7847
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7898
 
7848
 
7899
	if (intel_crtc->config->double_wide)
7849
	if (intel_crtc->config->double_wide)
7900
		pipeconf |= PIPECONF_DOUBLE_WIDE;
7850
		pipeconf |= PIPECONF_DOUBLE_WIDE;
7901
 
7851
 
7902
	/* only g4x and later have fancy bpc/dither controls */
7852
	/* only g4x and later have fancy bpc/dither controls */
7903
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7853
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7904
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7854
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7905
		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7855
		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7906
			pipeconf |= PIPECONF_DITHER_EN |
7856
			pipeconf |= PIPECONF_DITHER_EN |
7907
				    PIPECONF_DITHER_TYPE_SP;
7857
				    PIPECONF_DITHER_TYPE_SP;
7908
 
7858
 
7909
		switch (intel_crtc->config->pipe_bpp) {
7859
		switch (intel_crtc->config->pipe_bpp) {
7910
		case 18:
7860
		case 18:
7911
			pipeconf |= PIPECONF_6BPC;
7861
			pipeconf |= PIPECONF_6BPC;
7912
			break;
7862
			break;
7913
		case 24:
7863
		case 24:
7914
			pipeconf |= PIPECONF_8BPC;
7864
			pipeconf |= PIPECONF_8BPC;
7915
			break;
7865
			break;
7916
		case 30:
7866
		case 30:
7917
			pipeconf |= PIPECONF_10BPC;
7867
			pipeconf |= PIPECONF_10BPC;
7918
			break;
7868
			break;
7919
		default:
7869
		default:
7920
			/* Case prevented by intel_choose_pipe_bpp_dither. */
7870
			/* Case prevented by intel_choose_pipe_bpp_dither. */
7921
			BUG();
7871
			BUG();
7922
		}
7872
		}
7923
	}
7873
	}
7924
 
7874
 
7925
	if (HAS_PIPE_CXSR(dev)) {
7875
	if (HAS_PIPE_CXSR(dev)) {
7926
		if (intel_crtc->lowfreq_avail) {
7876
		if (intel_crtc->lowfreq_avail) {
7927
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7877
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7928
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7878
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7929
		} else {
7879
		} else {
7930
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7880
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7931
		}
7881
		}
7932
	}
7882
	}
7933
 
7883
 
7934
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7884
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7935
		if (INTEL_INFO(dev)->gen < 4 ||
7885
		if (INTEL_INFO(dev)->gen < 4 ||
7936
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7886
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7937
			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7887
			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7938
		else
7888
		else
7939
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7889
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7940
	} else
7890
	} else
7941
		pipeconf |= PIPECONF_PROGRESSIVE;
7891
		pipeconf |= PIPECONF_PROGRESSIVE;
7942
 
7892
 
7943
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7893
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7944
	     intel_crtc->config->limited_color_range)
7894
	     intel_crtc->config->limited_color_range)
7945
		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7895
		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7946
 
7896
 
7947
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7897
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7948
	POSTING_READ(PIPECONF(intel_crtc->pipe));
7898
	POSTING_READ(PIPECONF(intel_crtc->pipe));
7949
}
7899
}
7950
 
7900
 
7951
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7901
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7952
				   struct intel_crtc_state *crtc_state)
7902
				   struct intel_crtc_state *crtc_state)
7953
{
7903
{
7954
	struct drm_device *dev = crtc->base.dev;
7904
	struct drm_device *dev = crtc->base.dev;
7955
	struct drm_i915_private *dev_priv = dev->dev_private;
7905
	struct drm_i915_private *dev_priv = dev->dev_private;
7956
	int refclk, num_connectors = 0;
7906
	int refclk, num_connectors = 0;
7957
	intel_clock_t clock;
7907
	intel_clock_t clock;
7958
	bool ok;
7908
	bool ok;
7959
	const intel_limit_t *limit;
7909
	const intel_limit_t *limit;
7960
	struct drm_atomic_state *state = crtc_state->base.state;
7910
	struct drm_atomic_state *state = crtc_state->base.state;
7961
	struct drm_connector *connector;
7911
	struct drm_connector *connector;
7962
	struct drm_connector_state *connector_state;
7912
	struct drm_connector_state *connector_state;
7963
	int i;
7913
	int i;
7964
 
7914
 
7965
	memset(&crtc_state->dpll_hw_state, 0,
7915
	memset(&crtc_state->dpll_hw_state, 0,
7966
	       sizeof(crtc_state->dpll_hw_state));
7916
	       sizeof(crtc_state->dpll_hw_state));
7967
 
7917
 
7968
	if (crtc_state->has_dsi_encoder)
7918
	if (crtc_state->has_dsi_encoder)
7969
		return 0;
7919
		return 0;
7970
 
7920
 
7971
	for_each_connector_in_state(state, connector, connector_state, i) {
7921
	for_each_connector_in_state(state, connector, connector_state, i) {
7972
		if (connector_state->crtc == &crtc->base)
7922
		if (connector_state->crtc == &crtc->base)
7973
		num_connectors++;
7923
			num_connectors++;
7974
	}
7924
	}
7975
 
7925
 
7976
	if (!crtc_state->clock_set) {
7926
	if (!crtc_state->clock_set) {
7977
		refclk = i9xx_get_refclk(crtc_state, num_connectors);
7927
		refclk = i9xx_get_refclk(crtc_state, num_connectors);
7978
 
7928
 
7979
		/*
7929
		/*
7980
		 * Returns a set of divisors for the desired target clock with
7930
		 * Returns a set of divisors for the desired target clock with
7981
		 * the given refclk, or FALSE.  The returned values represent
7931
		 * the given refclk, or FALSE.  The returned values represent
7982
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7932
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7983
		 * 2) / p1 / p2.
7933
		 * 2) / p1 / p2.
7984
		 */
7934
		 */
7985
		limit = intel_limit(crtc_state, refclk);
7935
		limit = intel_limit(crtc_state, refclk);
7986
		ok = dev_priv->display.find_dpll(limit, crtc_state,
7936
		ok = dev_priv->display.find_dpll(limit, crtc_state,
7987
						 crtc_state->port_clock,
7937
						 crtc_state->port_clock,
7988
						 refclk, NULL, &clock);
7938
						 refclk, NULL, &clock);
7989
		if (!ok) {
7939
		if (!ok) {
7990
			DRM_ERROR("Couldn't find PLL settings for mode!\n");
7940
			DRM_ERROR("Couldn't find PLL settings for mode!\n");
7991
			return -EINVAL;
7941
			return -EINVAL;
7992
		}
7942
		}
7993
 
7943
 
7994
		/* Compat-code for transition, will disappear. */
7944
		/* Compat-code for transition, will disappear. */
7995
		crtc_state->dpll.n = clock.n;
7945
		crtc_state->dpll.n = clock.n;
7996
		crtc_state->dpll.m1 = clock.m1;
7946
		crtc_state->dpll.m1 = clock.m1;
7997
		crtc_state->dpll.m2 = clock.m2;
7947
		crtc_state->dpll.m2 = clock.m2;
7998
		crtc_state->dpll.p1 = clock.p1;
7948
		crtc_state->dpll.p1 = clock.p1;
7999
		crtc_state->dpll.p2 = clock.p2;
7949
		crtc_state->dpll.p2 = clock.p2;
8000
	}
7950
	}
8001
 
7951
 
8002
	if (IS_GEN2(dev)) {
7952
	if (IS_GEN2(dev)) {
8003
		i8xx_compute_dpll(crtc, crtc_state, NULL,
7953
		i8xx_compute_dpll(crtc, crtc_state, NULL,
8004
				  num_connectors);
7954
				  num_connectors);
8005
	} else if (IS_CHERRYVIEW(dev)) {
7955
	} else if (IS_CHERRYVIEW(dev)) {
8006
		chv_compute_dpll(crtc, crtc_state);
7956
		chv_compute_dpll(crtc, crtc_state);
8007
	} else if (IS_VALLEYVIEW(dev)) {
7957
	} else if (IS_VALLEYVIEW(dev)) {
8008
		vlv_compute_dpll(crtc, crtc_state);
7958
		vlv_compute_dpll(crtc, crtc_state);
8009
	} else {
7959
	} else {
8010
		i9xx_compute_dpll(crtc, crtc_state, NULL,
7960
		i9xx_compute_dpll(crtc, crtc_state, NULL,
8011
				  num_connectors);
7961
				  num_connectors);
8012
	}
7962
	}
8013
 
7963
 
8014
	return 0;
7964
	return 0;
8015
}
7965
}
8016
 
7966
 
8017
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7967
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8018
				 struct intel_crtc_state *pipe_config)
7968
				 struct intel_crtc_state *pipe_config)
8019
{
7969
{
8020
	struct drm_device *dev = crtc->base.dev;
7970
	struct drm_device *dev = crtc->base.dev;
8021
	struct drm_i915_private *dev_priv = dev->dev_private;
7971
	struct drm_i915_private *dev_priv = dev->dev_private;
8022
	uint32_t tmp;
7972
	uint32_t tmp;
8023
 
7973
 
8024
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7974
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8025
		return;
7975
		return;
8026
 
7976
 
8027
	tmp = I915_READ(PFIT_CONTROL);
7977
	tmp = I915_READ(PFIT_CONTROL);
8028
	if (!(tmp & PFIT_ENABLE))
7978
	if (!(tmp & PFIT_ENABLE))
8029
		return;
7979
		return;
8030
 
7980
 
8031
	/* Check whether the pfit is attached to our pipe. */
7981
	/* Check whether the pfit is attached to our pipe. */
8032
	if (INTEL_INFO(dev)->gen < 4) {
7982
	if (INTEL_INFO(dev)->gen < 4) {
8033
		if (crtc->pipe != PIPE_B)
7983
		if (crtc->pipe != PIPE_B)
8034
			return;
7984
			return;
8035
	} else {
7985
	} else {
8036
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7986
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8037
			return;
7987
			return;
8038
	}
7988
	}
8039
 
7989
 
8040
	pipe_config->gmch_pfit.control = tmp;
7990
	pipe_config->gmch_pfit.control = tmp;
8041
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7991
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8042
	if (INTEL_INFO(dev)->gen < 5)
-
 
8043
		pipe_config->gmch_pfit.lvds_border_bits =
-
 
8044
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
-
 
8045
}
7992
}
8046
 
7993
 
8047
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7994
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8048
			       struct intel_crtc_state *pipe_config)
7995
			       struct intel_crtc_state *pipe_config)
8049
{
7996
{
8050
	struct drm_device *dev = crtc->base.dev;
7997
	struct drm_device *dev = crtc->base.dev;
8051
	struct drm_i915_private *dev_priv = dev->dev_private;
7998
	struct drm_i915_private *dev_priv = dev->dev_private;
8052
	int pipe = pipe_config->cpu_transcoder;
7999
	int pipe = pipe_config->cpu_transcoder;
8053
	intel_clock_t clock;
8000
	intel_clock_t clock;
8054
	u32 mdiv;
8001
	u32 mdiv;
8055
	int refclk = 100000;
8002
	int refclk = 100000;
8056
 
8003
 
8057
	/* In case of MIPI DPLL will not even be used */
8004
	/* In case of MIPI DPLL will not even be used */
8058
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8005
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8059
		return;
8006
		return;
8060
 
8007
 
8061
	mutex_lock(&dev_priv->sb_lock);
8008
	mutex_lock(&dev_priv->sb_lock);
8062
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8009
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8063
	mutex_unlock(&dev_priv->sb_lock);
8010
	mutex_unlock(&dev_priv->sb_lock);
8064
 
8011
 
8065
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8012
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8066
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8013
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8067
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8014
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8068
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8015
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8069
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8016
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8070
 
8017
 
8071
	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8018
	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8072
}
8019
}
8073
 
8020
 
8074
static void
8021
static void
8075
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8022
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8076
			      struct intel_initial_plane_config *plane_config)
8023
			      struct intel_initial_plane_config *plane_config)
8077
{
8024
{
8078
	struct drm_device *dev = crtc->base.dev;
8025
	struct drm_device *dev = crtc->base.dev;
8079
	struct drm_i915_private *dev_priv = dev->dev_private;
8026
	struct drm_i915_private *dev_priv = dev->dev_private;
8080
	u32 val, base, offset;
8027
	u32 val, base, offset;
8081
	int pipe = crtc->pipe, plane = crtc->plane;
8028
	int pipe = crtc->pipe, plane = crtc->plane;
8082
	int fourcc, pixel_format;
8029
	int fourcc, pixel_format;
8083
	unsigned int aligned_height;
8030
	unsigned int aligned_height;
8084
	struct drm_framebuffer *fb;
8031
	struct drm_framebuffer *fb;
8085
	struct intel_framebuffer *intel_fb;
8032
	struct intel_framebuffer *intel_fb;
8086
 
8033
 
8087
	val = I915_READ(DSPCNTR(plane));
8034
	val = I915_READ(DSPCNTR(plane));
8088
	if (!(val & DISPLAY_PLANE_ENABLE))
8035
	if (!(val & DISPLAY_PLANE_ENABLE))
8089
		return;
8036
		return;
8090
 
8037
 
8091
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8038
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8092
	if (!intel_fb) {
8039
	if (!intel_fb) {
8093
		DRM_DEBUG_KMS("failed to alloc fb\n");
8040
		DRM_DEBUG_KMS("failed to alloc fb\n");
8094
		return;
8041
		return;
8095
	}
8042
	}
8096
 
8043
 
8097
	fb = &intel_fb->base;
8044
	fb = &intel_fb->base;
8098
 
8045
 
8099
	if (INTEL_INFO(dev)->gen >= 4) {
8046
	if (INTEL_INFO(dev)->gen >= 4) {
8100
		if (val & DISPPLANE_TILED) {
8047
		if (val & DISPPLANE_TILED) {
8101
			plane_config->tiling = I915_TILING_X;
8048
			plane_config->tiling = I915_TILING_X;
8102
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8049
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8103
		}
8050
		}
8104
	}
8051
	}
8105
 
8052
 
8106
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8053
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8107
	fourcc = i9xx_format_to_fourcc(pixel_format);
8054
	fourcc = i9xx_format_to_fourcc(pixel_format);
8108
	fb->pixel_format = fourcc;
8055
	fb->pixel_format = fourcc;
8109
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8056
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8110
 
8057
 
8111
	if (INTEL_INFO(dev)->gen >= 4) {
8058
	if (INTEL_INFO(dev)->gen >= 4) {
8112
		if (plane_config->tiling)
8059
		if (plane_config->tiling)
8113
			offset = I915_READ(DSPTILEOFF(plane));
8060
			offset = I915_READ(DSPTILEOFF(plane));
8114
		else
8061
		else
8115
			offset = I915_READ(DSPLINOFF(plane));
8062
			offset = I915_READ(DSPLINOFF(plane));
8116
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8063
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8117
	} else {
8064
	} else {
8118
		base = I915_READ(DSPADDR(plane));
8065
		base = I915_READ(DSPADDR(plane));
8119
	}
8066
	}
8120
	plane_config->base = base;
8067
	plane_config->base = base;
8121
 
8068
 
8122
	val = I915_READ(PIPESRC(pipe));
8069
	val = I915_READ(PIPESRC(pipe));
8123
	fb->width = ((val >> 16) & 0xfff) + 1;
8070
	fb->width = ((val >> 16) & 0xfff) + 1;
8124
	fb->height = ((val >> 0) & 0xfff) + 1;
8071
	fb->height = ((val >> 0) & 0xfff) + 1;
8125
 
8072
 
8126
	val = I915_READ(DSPSTRIDE(pipe));
8073
	val = I915_READ(DSPSTRIDE(pipe));
8127
	fb->pitches[0] = val & 0xffffffc0;
8074
	fb->pitches[0] = val & 0xffffffc0;
8128
 
8075
 
8129
	aligned_height = intel_fb_align_height(dev, fb->height,
8076
	aligned_height = intel_fb_align_height(dev, fb->height,
8130
					       fb->pixel_format,
8077
					       fb->pixel_format,
8131
					       fb->modifier[0]);
8078
					       fb->modifier[0]);
8132
 
8079
 
8133
	plane_config->size = fb->pitches[0] * aligned_height;
8080
	plane_config->size = fb->pitches[0] * aligned_height;
8134
 
8081
 
8135
	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8082
	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8136
		      pipe_name(pipe), plane, fb->width, fb->height,
8083
		      pipe_name(pipe), plane, fb->width, fb->height,
8137
		      fb->bits_per_pixel, base, fb->pitches[0],
8084
		      fb->bits_per_pixel, base, fb->pitches[0],
8138
		      plane_config->size);
8085
		      plane_config->size);
8139
 
8086
 
8140
	plane_config->fb = intel_fb;
8087
	plane_config->fb = intel_fb;
8141
}
8088
}
8142
 
8089
 
8143
static void chv_crtc_clock_get(struct intel_crtc *crtc,
8090
static void chv_crtc_clock_get(struct intel_crtc *crtc,
8144
			       struct intel_crtc_state *pipe_config)
8091
			       struct intel_crtc_state *pipe_config)
8145
{
8092
{
8146
	struct drm_device *dev = crtc->base.dev;
8093
	struct drm_device *dev = crtc->base.dev;
8147
	struct drm_i915_private *dev_priv = dev->dev_private;
8094
	struct drm_i915_private *dev_priv = dev->dev_private;
8148
	int pipe = pipe_config->cpu_transcoder;
8095
	int pipe = pipe_config->cpu_transcoder;
8149
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8096
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8150
	intel_clock_t clock;
8097
	intel_clock_t clock;
8151
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8098
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8152
	int refclk = 100000;
8099
	int refclk = 100000;
8153
 
8100
 
8154
	mutex_lock(&dev_priv->sb_lock);
8101
	mutex_lock(&dev_priv->sb_lock);
8155
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8102
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8156
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8103
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8157
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8104
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8158
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8105
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8159
	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8106
	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8160
	mutex_unlock(&dev_priv->sb_lock);
8107
	mutex_unlock(&dev_priv->sb_lock);
8161
 
8108
 
8162
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8109
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8163
	clock.m2 = (pll_dw0 & 0xff) << 22;
8110
	clock.m2 = (pll_dw0 & 0xff) << 22;
8164
	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8111
	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8165
		clock.m2 |= pll_dw2 & 0x3fffff;
8112
		clock.m2 |= pll_dw2 & 0x3fffff;
8166
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8113
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8167
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8114
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8168
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8115
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8169
 
8116
 
8170
	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8117
	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8171
}
8118
}
8172
 
8119
 
8173
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8120
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8174
				 struct intel_crtc_state *pipe_config)
8121
				 struct intel_crtc_state *pipe_config)
8175
{
8122
{
8176
	struct drm_device *dev = crtc->base.dev;
8123
	struct drm_device *dev = crtc->base.dev;
8177
	struct drm_i915_private *dev_priv = dev->dev_private;
8124
	struct drm_i915_private *dev_priv = dev->dev_private;
8178
	enum intel_display_power_domain power_domain;
8125
	enum intel_display_power_domain power_domain;
8179
	uint32_t tmp;
8126
	uint32_t tmp;
8180
	bool ret;
8127
	bool ret;
8181
 
8128
 
8182
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8129
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8183
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8130
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8184
		return false;
8131
		return false;
8185
 
8132
 
8186
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8133
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8187
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8134
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8188
 
8135
 
8189
	ret = false;
8136
	ret = false;
8190
 
8137
 
8191
	tmp = I915_READ(PIPECONF(crtc->pipe));
8138
	tmp = I915_READ(PIPECONF(crtc->pipe));
8192
	if (!(tmp & PIPECONF_ENABLE))
8139
	if (!(tmp & PIPECONF_ENABLE))
8193
		goto out;
8140
		goto out;
8194
 
8141
 
8195
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8142
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8196
		switch (tmp & PIPECONF_BPC_MASK) {
8143
		switch (tmp & PIPECONF_BPC_MASK) {
8197
		case PIPECONF_6BPC:
8144
		case PIPECONF_6BPC:
8198
			pipe_config->pipe_bpp = 18;
8145
			pipe_config->pipe_bpp = 18;
8199
			break;
8146
			break;
8200
		case PIPECONF_8BPC:
8147
		case PIPECONF_8BPC:
8201
			pipe_config->pipe_bpp = 24;
8148
			pipe_config->pipe_bpp = 24;
8202
			break;
8149
			break;
8203
		case PIPECONF_10BPC:
8150
		case PIPECONF_10BPC:
8204
			pipe_config->pipe_bpp = 30;
8151
			pipe_config->pipe_bpp = 30;
8205
			break;
8152
			break;
8206
		default:
8153
		default:
8207
			break;
8154
			break;
8208
		}
8155
		}
8209
	}
8156
	}
8210
 
8157
 
8211
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8158
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8212
	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8159
	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8213
		pipe_config->limited_color_range = true;
8160
		pipe_config->limited_color_range = true;
8214
 
8161
 
8215
	if (INTEL_INFO(dev)->gen < 4)
8162
	if (INTEL_INFO(dev)->gen < 4)
8216
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8163
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8217
 
8164
 
8218
	intel_get_pipe_timings(crtc, pipe_config);
8165
	intel_get_pipe_timings(crtc, pipe_config);
8219
 
8166
 
8220
	i9xx_get_pfit_config(crtc, pipe_config);
8167
	i9xx_get_pfit_config(crtc, pipe_config);
8221
 
8168
 
8222
	if (INTEL_INFO(dev)->gen >= 4) {
8169
	if (INTEL_INFO(dev)->gen >= 4) {
8223
		tmp = I915_READ(DPLL_MD(crtc->pipe));
8170
		tmp = I915_READ(DPLL_MD(crtc->pipe));
8224
		pipe_config->pixel_multiplier =
8171
		pipe_config->pixel_multiplier =
8225
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8172
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8226
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8173
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8227
		pipe_config->dpll_hw_state.dpll_md = tmp;
8174
		pipe_config->dpll_hw_state.dpll_md = tmp;
8228
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8175
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8229
		tmp = I915_READ(DPLL(crtc->pipe));
8176
		tmp = I915_READ(DPLL(crtc->pipe));
8230
		pipe_config->pixel_multiplier =
8177
		pipe_config->pixel_multiplier =
8231
			((tmp & SDVO_MULTIPLIER_MASK)
8178
			((tmp & SDVO_MULTIPLIER_MASK)
8232
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8179
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8233
	} else {
8180
	} else {
8234
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8181
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8235
		 * port and will be fixed up in the encoder->get_config
8182
		 * port and will be fixed up in the encoder->get_config
8236
		 * function. */
8183
		 * function. */
8237
		pipe_config->pixel_multiplier = 1;
8184
		pipe_config->pixel_multiplier = 1;
8238
	}
8185
	}
8239
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8186
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8240
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8187
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8241
		/*
8188
		/*
8242
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8189
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8243
		 * on 830. Filter it out here so that we don't
8190
		 * on 830. Filter it out here so that we don't
8244
		 * report errors due to that.
8191
		 * report errors due to that.
8245
		 */
8192
		 */
8246
		if (IS_I830(dev))
8193
		if (IS_I830(dev))
8247
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8194
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8248
 
8195
 
8249
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8196
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8250
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8197
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8251
	} else {
8198
	} else {
8252
		/* Mask out read-only status bits. */
8199
		/* Mask out read-only status bits. */
8253
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8200
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8254
						     DPLL_PORTC_READY_MASK |
8201
						     DPLL_PORTC_READY_MASK |
8255
						     DPLL_PORTB_READY_MASK);
8202
						     DPLL_PORTB_READY_MASK);
8256
	}
8203
	}
8257
 
8204
 
8258
	if (IS_CHERRYVIEW(dev))
8205
	if (IS_CHERRYVIEW(dev))
8259
		chv_crtc_clock_get(crtc, pipe_config);
8206
		chv_crtc_clock_get(crtc, pipe_config);
8260
	else if (IS_VALLEYVIEW(dev))
8207
	else if (IS_VALLEYVIEW(dev))
8261
		vlv_crtc_clock_get(crtc, pipe_config);
8208
		vlv_crtc_clock_get(crtc, pipe_config);
8262
	else
8209
	else
8263
		i9xx_crtc_clock_get(crtc, pipe_config);
8210
		i9xx_crtc_clock_get(crtc, pipe_config);
8264
 
8211
 
8265
	/*
8212
	/*
8266
	 * Normally the dotclock is filled in by the encoder .get_config()
8213
	 * Normally the dotclock is filled in by the encoder .get_config()
8267
	 * but in case the pipe is enabled w/o any ports we need a sane
8214
	 * but in case the pipe is enabled w/o any ports we need a sane
8268
	 * default.
8215
	 * default.
8269
	 */
8216
	 */
8270
	pipe_config->base.adjusted_mode.crtc_clock =
8217
	pipe_config->base.adjusted_mode.crtc_clock =
8271
		pipe_config->port_clock / pipe_config->pixel_multiplier;
8218
		pipe_config->port_clock / pipe_config->pixel_multiplier;
8272
 
8219
 
8273
	ret = true;
8220
	ret = true;
8274
 
8221
 
8275
out:
8222
out:
8276
	intel_display_power_put(dev_priv, power_domain);
8223
	intel_display_power_put(dev_priv, power_domain);
8277
 
8224
 
8278
	return ret;
8225
	return ret;
8279
}
8226
}
8280
 
8227
 
8281
static void ironlake_init_pch_refclk(struct drm_device *dev)
8228
static void ironlake_init_pch_refclk(struct drm_device *dev)
8282
{
8229
{
8283
	struct drm_i915_private *dev_priv = dev->dev_private;
8230
	struct drm_i915_private *dev_priv = dev->dev_private;
8284
	struct intel_encoder *encoder;
8231
	struct intel_encoder *encoder;
-
 
8232
	int i;
8285
	u32 val, final;
8233
	u32 val, final;
8286
	bool has_lvds = false;
8234
	bool has_lvds = false;
8287
	bool has_cpu_edp = false;
8235
	bool has_cpu_edp = false;
8288
	bool has_panel = false;
8236
	bool has_panel = false;
8289
	bool has_ck505 = false;
8237
	bool has_ck505 = false;
8290
	bool can_ssc = false;
8238
	bool can_ssc = false;
-
 
8239
	bool using_ssc_source = false;
8291
 
8240
 
8292
	/* We need to take the global config into account */
8241
	/* We need to take the global config into account */
8293
	for_each_intel_encoder(dev, encoder) {
8242
	for_each_intel_encoder(dev, encoder) {
8294
		switch (encoder->type) {
8243
		switch (encoder->type) {
8295
		case INTEL_OUTPUT_LVDS:
8244
		case INTEL_OUTPUT_LVDS:
8296
			has_panel = true;
8245
			has_panel = true;
8297
			has_lvds = true;
8246
			has_lvds = true;
8298
			break;
8247
			break;
8299
		case INTEL_OUTPUT_EDP:
8248
		case INTEL_OUTPUT_EDP:
8300
			has_panel = true;
8249
			has_panel = true;
8301
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8250
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8302
				has_cpu_edp = true;
8251
				has_cpu_edp = true;
8303
			break;
8252
			break;
8304
		default:
8253
		default:
8305
			break;
8254
			break;
8306
		}
8255
		}
8307
	}
8256
	}
8308
 
8257
 
8309
	if (HAS_PCH_IBX(dev)) {
8258
	if (HAS_PCH_IBX(dev)) {
8310
		has_ck505 = dev_priv->vbt.display_clock_mode;
8259
		has_ck505 = dev_priv->vbt.display_clock_mode;
8311
		can_ssc = has_ck505;
8260
		can_ssc = has_ck505;
8312
	} else {
8261
	} else {
8313
		has_ck505 = false;
8262
		has_ck505 = false;
8314
		can_ssc = true;
8263
		can_ssc = true;
8315
	}
8264
	}
-
 
8265
 
-
 
8266
	/* Check if any DPLLs are using the SSC source */
-
 
8267
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-
 
8268
		u32 temp = I915_READ(PCH_DPLL(i));
-
 
8269
 
-
 
8270
		if (!(temp & DPLL_VCO_ENABLE))
-
 
8271
			continue;
-
 
8272
 
-
 
8273
		if ((temp & PLL_REF_INPUT_MASK) ==
-
 
8274
		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-
 
8275
			using_ssc_source = true;
-
 
8276
			break;
-
 
8277
		}
-
 
8278
	}
8316
 
8279
 
8317
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8280
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8318
		      has_panel, has_lvds, has_ck505);
8281
		      has_panel, has_lvds, has_ck505, using_ssc_source);
8319
 
8282
 
8320
	/* Ironlake: try to setup display ref clock before DPLL
8283
	/* Ironlake: try to setup display ref clock before DPLL
8321
	 * enabling. This is only under driver's control after
8284
	 * enabling. This is only under driver's control after
8322
	 * PCH B stepping, previous chipset stepping should be
8285
	 * PCH B stepping, previous chipset stepping should be
8323
	 * ignoring this setting.
8286
	 * ignoring this setting.
8324
	 */
8287
	 */
8325
	val = I915_READ(PCH_DREF_CONTROL);
8288
	val = I915_READ(PCH_DREF_CONTROL);
8326
 
8289
 
8327
	/* As we must carefully and slowly disable/enable each source in turn,
8290
	/* As we must carefully and slowly disable/enable each source in turn,
8328
	 * compute the final state we want first and check if we need to
8291
	 * compute the final state we want first and check if we need to
8329
	 * make any changes at all.
8292
	 * make any changes at all.
8330
	 */
8293
	 */
8331
	final = val;
8294
	final = val;
8332
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8295
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8333
	if (has_ck505)
8296
	if (has_ck505)
8334
		final |= DREF_NONSPREAD_CK505_ENABLE;
8297
		final |= DREF_NONSPREAD_CK505_ENABLE;
8335
	else
8298
	else
8336
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8299
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8337
 
8300
 
8338
	final &= ~DREF_SSC_SOURCE_MASK;
8301
	final &= ~DREF_SSC_SOURCE_MASK;
8339
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8302
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8340
	final &= ~DREF_SSC1_ENABLE;
8303
	final &= ~DREF_SSC1_ENABLE;
8341
 
8304
 
8342
	if (has_panel) {
8305
	if (has_panel) {
8343
		final |= DREF_SSC_SOURCE_ENABLE;
8306
		final |= DREF_SSC_SOURCE_ENABLE;
8344
 
8307
 
8345
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8308
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8346
			final |= DREF_SSC1_ENABLE;
8309
			final |= DREF_SSC1_ENABLE;
8347
 
8310
 
8348
		if (has_cpu_edp) {
8311
		if (has_cpu_edp) {
8349
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8312
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8350
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8313
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8351
			else
8314
			else
8352
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8315
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8353
		} else
8316
		} else
8354
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8317
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8355
	} else {
8318
	} else if (using_ssc_source) {
8356
		final |= DREF_SSC_SOURCE_DISABLE;
8319
		final |= DREF_SSC_SOURCE_ENABLE;
8357
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8320
		final |= DREF_SSC1_ENABLE;
8358
	}
8321
	}
8359
 
8322
 
8360
	if (final == val)
8323
	if (final == val)
8361
		return;
8324
		return;
8362
 
8325
 
8363
	/* Always enable nonspread source */
8326
	/* Always enable nonspread source */
8364
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8327
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8365
 
8328
 
8366
	if (has_ck505)
8329
	if (has_ck505)
8367
		val |= DREF_NONSPREAD_CK505_ENABLE;
8330
		val |= DREF_NONSPREAD_CK505_ENABLE;
8368
	else
8331
	else
8369
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8332
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8370
 
8333
 
8371
	if (has_panel) {
8334
	if (has_panel) {
8372
		val &= ~DREF_SSC_SOURCE_MASK;
8335
		val &= ~DREF_SSC_SOURCE_MASK;
8373
		val |= DREF_SSC_SOURCE_ENABLE;
8336
		val |= DREF_SSC_SOURCE_ENABLE;
8374
 
8337
 
8375
		/* SSC must be turned on before enabling the CPU output  */
8338
		/* SSC must be turned on before enabling the CPU output  */
8376
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8339
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8377
			DRM_DEBUG_KMS("Using SSC on panel\n");
8340
			DRM_DEBUG_KMS("Using SSC on panel\n");
8378
			val |= DREF_SSC1_ENABLE;
8341
			val |= DREF_SSC1_ENABLE;
8379
		} else
8342
		} else
8380
			val &= ~DREF_SSC1_ENABLE;
8343
			val &= ~DREF_SSC1_ENABLE;
8381
 
8344
 
8382
		/* Get SSC going before enabling the outputs */
8345
		/* Get SSC going before enabling the outputs */
8383
		I915_WRITE(PCH_DREF_CONTROL, val);
8346
		I915_WRITE(PCH_DREF_CONTROL, val);
8384
		POSTING_READ(PCH_DREF_CONTROL);
8347
		POSTING_READ(PCH_DREF_CONTROL);
8385
		udelay(200);
8348
		udelay(200);
8386
 
8349
 
8387
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8350
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8388
 
8351
 
8389
		/* Enable CPU source on CPU attached eDP */
8352
		/* Enable CPU source on CPU attached eDP */
8390
		if (has_cpu_edp) {
8353
		if (has_cpu_edp) {
8391
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8354
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8392
				DRM_DEBUG_KMS("Using SSC on eDP\n");
8355
				DRM_DEBUG_KMS("Using SSC on eDP\n");
8393
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8356
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8394
			} else
8357
			} else
8395
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8358
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8396
		} else
8359
		} else
8397
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8360
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8398
 
8361
 
8399
		I915_WRITE(PCH_DREF_CONTROL, val);
8362
		I915_WRITE(PCH_DREF_CONTROL, val);
8400
		POSTING_READ(PCH_DREF_CONTROL);
8363
		POSTING_READ(PCH_DREF_CONTROL);
8401
		udelay(200);
8364
		udelay(200);
8402
	} else {
8365
	} else {
8403
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
8366
		DRM_DEBUG_KMS("Disabling CPU source output\n");
8404
 
8367
 
8405
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8368
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8406
 
8369
 
8407
		/* Turn off CPU output */
8370
		/* Turn off CPU output */
8408
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8371
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8409
 
8372
 
8410
		I915_WRITE(PCH_DREF_CONTROL, val);
8373
		I915_WRITE(PCH_DREF_CONTROL, val);
8411
		POSTING_READ(PCH_DREF_CONTROL);
8374
		POSTING_READ(PCH_DREF_CONTROL);
8412
		udelay(200);
8375
		udelay(200);
-
 
8376
 
-
 
8377
		if (!using_ssc_source) {
-
 
8378
			DRM_DEBUG_KMS("Disabling SSC source\n");
8413
 
8379
 
8414
		/* Turn off the SSC source */
8380
			/* Turn off the SSC source */
8415
		val &= ~DREF_SSC_SOURCE_MASK;
8381
			val &= ~DREF_SSC_SOURCE_MASK;
8416
		val |= DREF_SSC_SOURCE_DISABLE;
8382
			val |= DREF_SSC_SOURCE_DISABLE;
8417
 
8383
 
8418
		/* Turn off SSC1 */
8384
			/* Turn off SSC1 */
8419
		val &= ~DREF_SSC1_ENABLE;
8385
			val &= ~DREF_SSC1_ENABLE;
8420
 
8386
 
8421
		I915_WRITE(PCH_DREF_CONTROL, val);
8387
			I915_WRITE(PCH_DREF_CONTROL, val);
8422
		POSTING_READ(PCH_DREF_CONTROL);
8388
			POSTING_READ(PCH_DREF_CONTROL);
8423
		udelay(200);
8389
			udelay(200);
8424
	}
8390
		}
-
 
8391
	}
8425
 
8392
 
8426
	BUG_ON(val != final);
8393
	BUG_ON(val != final);
8427
}
8394
}
8428
 
8395
 
8429
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8396
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8430
{
8397
{
8431
	uint32_t tmp;
8398
	uint32_t tmp;
8432
 
8399
 
8433
	tmp = I915_READ(SOUTH_CHICKEN2);
8400
	tmp = I915_READ(SOUTH_CHICKEN2);
8434
	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8401
	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8435
	I915_WRITE(SOUTH_CHICKEN2, tmp);
8402
	I915_WRITE(SOUTH_CHICKEN2, tmp);
8436
 
8403
 
8437
	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8404
	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8438
			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8405
			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8439
		DRM_ERROR("FDI mPHY reset assert timeout\n");
8406
		DRM_ERROR("FDI mPHY reset assert timeout\n");
8440
 
8407
 
8441
	tmp = I915_READ(SOUTH_CHICKEN2);
8408
	tmp = I915_READ(SOUTH_CHICKEN2);
8442
	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8409
	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8443
	I915_WRITE(SOUTH_CHICKEN2, tmp);
8410
	I915_WRITE(SOUTH_CHICKEN2, tmp);
8444
 
8411
 
8445
	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8412
	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8446
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8413
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8447
		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8414
		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8448
}
8415
}
8449
 
8416
 
8450
/* WaMPhyProgramming:hsw */
8417
/* WaMPhyProgramming:hsw */
8451
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8418
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8452
{
8419
{
8453
	uint32_t tmp;
8420
	uint32_t tmp;
8454
 
8421
 
8455
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8422
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8456
	tmp &= ~(0xFF << 24);
8423
	tmp &= ~(0xFF << 24);
8457
	tmp |= (0x12 << 24);
8424
	tmp |= (0x12 << 24);
8458
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8425
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8459
 
8426
 
8460
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8427
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8461
	tmp |= (1 << 11);
8428
	tmp |= (1 << 11);
8462
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8429
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8463
 
8430
 
8464
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8431
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8465
	tmp |= (1 << 11);
8432
	tmp |= (1 << 11);
8466
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8433
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8467
 
8434
 
8468
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8435
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8469
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8436
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8470
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8437
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8471
 
8438
 
8472
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8439
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8473
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8440
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8474
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8441
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8475
 
8442
 
8476
	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8443
	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8477
	tmp &= ~(7 << 13);
8444
	tmp &= ~(7 << 13);
8478
	tmp |= (5 << 13);
8445
	tmp |= (5 << 13);
8479
	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8446
	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8480
 
8447
 
8481
	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8448
	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8482
	tmp &= ~(7 << 13);
8449
	tmp &= ~(7 << 13);
8483
	tmp |= (5 << 13);
8450
	tmp |= (5 << 13);
8484
	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8451
	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8485
 
8452
 
8486
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8453
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8487
	tmp &= ~0xFF;
8454
	tmp &= ~0xFF;
8488
	tmp |= 0x1C;
8455
	tmp |= 0x1C;
8489
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8456
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8490
 
8457
 
8491
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8458
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8492
	tmp &= ~0xFF;
8459
	tmp &= ~0xFF;
8493
	tmp |= 0x1C;
8460
	tmp |= 0x1C;
8494
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8461
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8495
 
8462
 
8496
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8463
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8497
	tmp &= ~(0xFF << 16);
8464
	tmp &= ~(0xFF << 16);
8498
	tmp |= (0x1C << 16);
8465
	tmp |= (0x1C << 16);
8499
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8466
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8500
 
8467
 
8501
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8468
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8502
	tmp &= ~(0xFF << 16);
8469
	tmp &= ~(0xFF << 16);
8503
	tmp |= (0x1C << 16);
8470
	tmp |= (0x1C << 16);
8504
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8471
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8505
 
8472
 
8506
	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8473
	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8507
	tmp |= (1 << 27);
8474
	tmp |= (1 << 27);
8508
	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8475
	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8509
 
8476
 
8510
	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8477
	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8511
	tmp |= (1 << 27);
8478
	tmp |= (1 << 27);
8512
	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8479
	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8513
 
8480
 
8514
	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8481
	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8515
	tmp &= ~(0xF << 28);
8482
	tmp &= ~(0xF << 28);
8516
	tmp |= (4 << 28);
8483
	tmp |= (4 << 28);
8517
	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8484
	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8518
 
8485
 
8519
	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8486
	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8520
	tmp &= ~(0xF << 28);
8487
	tmp &= ~(0xF << 28);
8521
	tmp |= (4 << 28);
8488
	tmp |= (4 << 28);
8522
	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8489
	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8523
}
8490
}
8524
 
8491
 
8525
/* Implements 3 different sequences from BSpec chapter "Display iCLK
8492
/* Implements 3 different sequences from BSpec chapter "Display iCLK
8526
 * Programming" based on the parameters passed:
8493
 * Programming" based on the parameters passed:
8527
 * - Sequence to enable CLKOUT_DP
8494
 * - Sequence to enable CLKOUT_DP
8528
 * - Sequence to enable CLKOUT_DP without spread
8495
 * - Sequence to enable CLKOUT_DP without spread
8529
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8496
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8530
 */
8497
 */
8531
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8498
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8532
				 bool with_fdi)
8499
				 bool with_fdi)
8533
{
8500
{
8534
	struct drm_i915_private *dev_priv = dev->dev_private;
8501
	struct drm_i915_private *dev_priv = dev->dev_private;
8535
	uint32_t reg, tmp;
8502
	uint32_t reg, tmp;
8536
 
8503
 
8537
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8504
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8538
		with_spread = true;
8505
		with_spread = true;
8539
	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8506
	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8540
		with_fdi = false;
8507
		with_fdi = false;
8541
 
8508
 
8542
	mutex_lock(&dev_priv->sb_lock);
8509
	mutex_lock(&dev_priv->sb_lock);
8543
 
8510
 
8544
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8511
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8545
	tmp &= ~SBI_SSCCTL_DISABLE;
8512
	tmp &= ~SBI_SSCCTL_DISABLE;
8546
	tmp |= SBI_SSCCTL_PATHALT;
8513
	tmp |= SBI_SSCCTL_PATHALT;
8547
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8514
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8548
 
8515
 
8549
	udelay(24);
8516
	udelay(24);
8550
 
8517
 
8551
	if (with_spread) {
8518
	if (with_spread) {
8552
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8519
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8553
		tmp &= ~SBI_SSCCTL_PATHALT;
8520
		tmp &= ~SBI_SSCCTL_PATHALT;
8554
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8521
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8555
 
8522
 
8556
		if (with_fdi) {
8523
		if (with_fdi) {
8557
			lpt_reset_fdi_mphy(dev_priv);
8524
			lpt_reset_fdi_mphy(dev_priv);
8558
			lpt_program_fdi_mphy(dev_priv);
8525
			lpt_program_fdi_mphy(dev_priv);
8559
		}
8526
		}
8560
	}
8527
	}
8561
 
8528
 
8562
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8529
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8563
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8530
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8564
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8531
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8565
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8532
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8566
 
8533
 
8567
	mutex_unlock(&dev_priv->sb_lock);
8534
	mutex_unlock(&dev_priv->sb_lock);
8568
}
8535
}
8569
 
8536
 
8570
/* Sequence to disable CLKOUT_DP */
8537
/* Sequence to disable CLKOUT_DP */
8571
static void lpt_disable_clkout_dp(struct drm_device *dev)
8538
static void lpt_disable_clkout_dp(struct drm_device *dev)
8572
{
8539
{
8573
	struct drm_i915_private *dev_priv = dev->dev_private;
8540
	struct drm_i915_private *dev_priv = dev->dev_private;
8574
	uint32_t reg, tmp;
8541
	uint32_t reg, tmp;
8575
 
8542
 
8576
	mutex_lock(&dev_priv->sb_lock);
8543
	mutex_lock(&dev_priv->sb_lock);
8577
 
8544
 
8578
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8545
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8579
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8546
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8580
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8547
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8581
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8548
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8582
 
8549
 
8583
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8550
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8584
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8551
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8585
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8552
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8586
			tmp |= SBI_SSCCTL_PATHALT;
8553
			tmp |= SBI_SSCCTL_PATHALT;
8587
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8554
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8588
			udelay(32);
8555
			udelay(32);
8589
		}
8556
		}
8590
		tmp |= SBI_SSCCTL_DISABLE;
8557
		tmp |= SBI_SSCCTL_DISABLE;
8591
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8558
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8592
	}
8559
	}
8593
 
8560
 
8594
	mutex_unlock(&dev_priv->sb_lock);
8561
	mutex_unlock(&dev_priv->sb_lock);
8595
}
8562
}
8596
 
8563
 
8597
#define BEND_IDX(steps) ((50 + (steps)) / 5)
8564
#define BEND_IDX(steps) ((50 + (steps)) / 5)
8598
 
8565
 
8599
static const uint16_t sscdivintphase[] = {
8566
static const uint16_t sscdivintphase[] = {
8600
	[BEND_IDX( 50)] = 0x3B23,
8567
	[BEND_IDX( 50)] = 0x3B23,
8601
	[BEND_IDX( 45)] = 0x3B23,
8568
	[BEND_IDX( 45)] = 0x3B23,
8602
	[BEND_IDX( 40)] = 0x3C23,
8569
	[BEND_IDX( 40)] = 0x3C23,
8603
	[BEND_IDX( 35)] = 0x3C23,
8570
	[BEND_IDX( 35)] = 0x3C23,
8604
	[BEND_IDX( 30)] = 0x3D23,
8571
	[BEND_IDX( 30)] = 0x3D23,
8605
	[BEND_IDX( 25)] = 0x3D23,
8572
	[BEND_IDX( 25)] = 0x3D23,
8606
	[BEND_IDX( 20)] = 0x3E23,
8573
	[BEND_IDX( 20)] = 0x3E23,
8607
	[BEND_IDX( 15)] = 0x3E23,
8574
	[BEND_IDX( 15)] = 0x3E23,
8608
	[BEND_IDX( 10)] = 0x3F23,
8575
	[BEND_IDX( 10)] = 0x3F23,
8609
	[BEND_IDX(  5)] = 0x3F23,
8576
	[BEND_IDX(  5)] = 0x3F23,
8610
	[BEND_IDX(  0)] = 0x0025,
8577
	[BEND_IDX(  0)] = 0x0025,
8611
	[BEND_IDX( -5)] = 0x0025,
8578
	[BEND_IDX( -5)] = 0x0025,
8612
	[BEND_IDX(-10)] = 0x0125,
8579
	[BEND_IDX(-10)] = 0x0125,
8613
	[BEND_IDX(-15)] = 0x0125,
8580
	[BEND_IDX(-15)] = 0x0125,
8614
	[BEND_IDX(-20)] = 0x0225,
8581
	[BEND_IDX(-20)] = 0x0225,
8615
	[BEND_IDX(-25)] = 0x0225,
8582
	[BEND_IDX(-25)] = 0x0225,
8616
	[BEND_IDX(-30)] = 0x0325,
8583
	[BEND_IDX(-30)] = 0x0325,
8617
	[BEND_IDX(-35)] = 0x0325,
8584
	[BEND_IDX(-35)] = 0x0325,
8618
	[BEND_IDX(-40)] = 0x0425,
8585
	[BEND_IDX(-40)] = 0x0425,
8619
	[BEND_IDX(-45)] = 0x0425,
8586
	[BEND_IDX(-45)] = 0x0425,
8620
	[BEND_IDX(-50)] = 0x0525,
8587
	[BEND_IDX(-50)] = 0x0525,
8621
};
8588
};
8622
 
8589
 
8623
/*
8590
/*
8624
 * Bend CLKOUT_DP
8591
 * Bend CLKOUT_DP
8625
 * steps -50 to 50 inclusive, in steps of 5
8592
 * steps -50 to 50 inclusive, in steps of 5
8626
 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8593
 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8627
 * change in clock period = -(steps / 10) * 5.787 ps
8594
 * change in clock period = -(steps / 10) * 5.787 ps
8628
 */
8595
 */
8629
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8596
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8630
{
8597
{
8631
	uint32_t tmp;
8598
	uint32_t tmp;
8632
	int idx = BEND_IDX(steps);
8599
	int idx = BEND_IDX(steps);
8633
 
8600
 
8634
	if (WARN_ON(steps % 5 != 0))
8601
	if (WARN_ON(steps % 5 != 0))
8635
		return;
8602
		return;
8636
 
8603
 
8637
	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8604
	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8638
		return;
8605
		return;
8639
 
8606
 
8640
	mutex_lock(&dev_priv->sb_lock);
8607
	mutex_lock(&dev_priv->sb_lock);
8641
 
8608
 
8642
	if (steps % 10 != 0)
8609
	if (steps % 10 != 0)
8643
		tmp = 0xAAAAAAAB;
8610
		tmp = 0xAAAAAAAB;
8644
	else
8611
	else
8645
		tmp = 0x00000000;
8612
		tmp = 0x00000000;
8646
	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8613
	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8647
 
8614
 
8648
	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8615
	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8649
	tmp &= 0xffff0000;
8616
	tmp &= 0xffff0000;
8650
	tmp |= sscdivintphase[idx];
8617
	tmp |= sscdivintphase[idx];
8651
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8618
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8652
 
8619
 
8653
	mutex_unlock(&dev_priv->sb_lock);
8620
	mutex_unlock(&dev_priv->sb_lock);
8654
}
8621
}
8655
 
8622
 
8656
#undef BEND_IDX
8623
#undef BEND_IDX
8657
 
8624
 
8658
static void lpt_init_pch_refclk(struct drm_device *dev)
8625
static void lpt_init_pch_refclk(struct drm_device *dev)
8659
{
8626
{
8660
	struct intel_encoder *encoder;
8627
	struct intel_encoder *encoder;
8661
	bool has_vga = false;
8628
	bool has_vga = false;
8662
 
8629
 
8663
	for_each_intel_encoder(dev, encoder) {
8630
	for_each_intel_encoder(dev, encoder) {
8664
		switch (encoder->type) {
8631
		switch (encoder->type) {
8665
		case INTEL_OUTPUT_ANALOG:
8632
		case INTEL_OUTPUT_ANALOG:
8666
			has_vga = true;
8633
			has_vga = true;
8667
			break;
8634
			break;
8668
		default:
8635
		default:
8669
			break;
8636
			break;
8670
		}
8637
		}
8671
	}
8638
	}
8672
 
8639
 
8673
	if (has_vga) {
8640
	if (has_vga) {
8674
		lpt_bend_clkout_dp(to_i915(dev), 0);
8641
		lpt_bend_clkout_dp(to_i915(dev), 0);
8675
		lpt_enable_clkout_dp(dev, true, true);
8642
		lpt_enable_clkout_dp(dev, true, true);
8676
	} else {
8643
	} else {
8677
		lpt_disable_clkout_dp(dev);
8644
		lpt_disable_clkout_dp(dev);
8678
}
8645
	}
8679
}
8646
}
8680
 
8647
 
8681
/*
8648
/*
8682
 * Initialize reference clocks when the driver loads
8649
 * Initialize reference clocks when the driver loads
8683
 */
8650
 */
8684
void intel_init_pch_refclk(struct drm_device *dev)
8651
void intel_init_pch_refclk(struct drm_device *dev)
8685
{
8652
{
8686
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8653
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8687
		ironlake_init_pch_refclk(dev);
8654
		ironlake_init_pch_refclk(dev);
8688
	else if (HAS_PCH_LPT(dev))
8655
	else if (HAS_PCH_LPT(dev))
8689
		lpt_init_pch_refclk(dev);
8656
		lpt_init_pch_refclk(dev);
8690
}
8657
}
8691
 
8658
 
8692
static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
8659
static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
8693
{
8660
{
8694
	struct drm_device *dev = crtc_state->base.crtc->dev;
8661
	struct drm_device *dev = crtc_state->base.crtc->dev;
8695
	struct drm_i915_private *dev_priv = dev->dev_private;
8662
	struct drm_i915_private *dev_priv = dev->dev_private;
8696
	struct drm_atomic_state *state = crtc_state->base.state;
8663
	struct drm_atomic_state *state = crtc_state->base.state;
8697
	struct drm_connector *connector;
8664
	struct drm_connector *connector;
8698
	struct drm_connector_state *connector_state;
8665
	struct drm_connector_state *connector_state;
8699
	struct intel_encoder *encoder;
8666
	struct intel_encoder *encoder;
8700
	int num_connectors = 0, i;
8667
	int num_connectors = 0, i;
8701
	bool is_lvds = false;
8668
	bool is_lvds = false;
8702
 
8669
 
8703
	for_each_connector_in_state(state, connector, connector_state, i) {
8670
	for_each_connector_in_state(state, connector, connector_state, i) {
8704
		if (connector_state->crtc != crtc_state->base.crtc)
8671
		if (connector_state->crtc != crtc_state->base.crtc)
8705
			continue;
8672
			continue;
8706
 
8673
 
8707
		encoder = to_intel_encoder(connector_state->best_encoder);
8674
		encoder = to_intel_encoder(connector_state->best_encoder);
8708
 
8675
 
8709
		switch (encoder->type) {
8676
		switch (encoder->type) {
8710
		case INTEL_OUTPUT_LVDS:
8677
		case INTEL_OUTPUT_LVDS:
8711
			is_lvds = true;
8678
			is_lvds = true;
8712
			break;
8679
			break;
8713
		default:
8680
		default:
8714
			break;
8681
			break;
8715
		}
8682
		}
8716
		num_connectors++;
8683
		num_connectors++;
8717
	}
8684
	}
8718
 
8685
 
8719
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
8686
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
8720
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8687
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8721
			      dev_priv->vbt.lvds_ssc_freq);
8688
			      dev_priv->vbt.lvds_ssc_freq);
8722
		return dev_priv->vbt.lvds_ssc_freq;
8689
		return dev_priv->vbt.lvds_ssc_freq;
8723
	}
8690
	}
8724
 
8691
 
8725
	return 120000;
8692
	return 120000;
8726
}
8693
}
8727
 
8694
 
8728
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8695
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8729
{
8696
{
8730
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8697
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8731
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8698
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8732
	int pipe = intel_crtc->pipe;
8699
	int pipe = intel_crtc->pipe;
8733
	uint32_t val;
8700
	uint32_t val;
8734
 
8701
 
8735
	val = 0;
8702
	val = 0;
8736
 
8703
 
8737
	switch (intel_crtc->config->pipe_bpp) {
8704
	switch (intel_crtc->config->pipe_bpp) {
8738
	case 18:
8705
	case 18:
8739
		val |= PIPECONF_6BPC;
8706
		val |= PIPECONF_6BPC;
8740
		break;
8707
		break;
8741
	case 24:
8708
	case 24:
8742
		val |= PIPECONF_8BPC;
8709
		val |= PIPECONF_8BPC;
8743
		break;
8710
		break;
8744
	case 30:
8711
	case 30:
8745
		val |= PIPECONF_10BPC;
8712
		val |= PIPECONF_10BPC;
8746
		break;
8713
		break;
8747
	case 36:
8714
	case 36:
8748
		val |= PIPECONF_12BPC;
8715
		val |= PIPECONF_12BPC;
8749
		break;
8716
		break;
8750
	default:
8717
	default:
8751
		/* Case prevented by intel_choose_pipe_bpp_dither. */
8718
		/* Case prevented by intel_choose_pipe_bpp_dither. */
8752
		BUG();
8719
		BUG();
8753
	}
8720
	}
8754
 
8721
 
8755
	if (intel_crtc->config->dither)
8722
	if (intel_crtc->config->dither)
8756
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8723
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8757
 
8724
 
8758
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8725
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8759
		val |= PIPECONF_INTERLACED_ILK;
8726
		val |= PIPECONF_INTERLACED_ILK;
8760
	else
8727
	else
8761
		val |= PIPECONF_PROGRESSIVE;
8728
		val |= PIPECONF_PROGRESSIVE;
8762
 
8729
 
8763
	if (intel_crtc->config->limited_color_range)
8730
	if (intel_crtc->config->limited_color_range)
8764
		val |= PIPECONF_COLOR_RANGE_SELECT;
8731
		val |= PIPECONF_COLOR_RANGE_SELECT;
8765
 
8732
 
8766
	I915_WRITE(PIPECONF(pipe), val);
8733
	I915_WRITE(PIPECONF(pipe), val);
8767
	POSTING_READ(PIPECONF(pipe));
8734
	POSTING_READ(PIPECONF(pipe));
8768
}
8735
}
8769
 
8736
 
8770
/*
8737
/*
8771
 * Set up the pipe CSC unit.
8738
 * Set up the pipe CSC unit.
8772
 *
8739
 *
8773
 * Currently only full range RGB to limited range RGB conversion
8740
 * Currently only full range RGB to limited range RGB conversion
8774
 * is supported, but eventually this should handle various
8741
 * is supported, but eventually this should handle various
8775
 * RGB<->YCbCr scenarios as well.
8742
 * RGB<->YCbCr scenarios as well.
8776
 */
8743
 */
8777
static void intel_set_pipe_csc(struct drm_crtc *crtc)
8744
static void intel_set_pipe_csc(struct drm_crtc *crtc)
8778
{
8745
{
8779
	struct drm_device *dev = crtc->dev;
8746
	struct drm_device *dev = crtc->dev;
8780
	struct drm_i915_private *dev_priv = dev->dev_private;
8747
	struct drm_i915_private *dev_priv = dev->dev_private;
8781
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8748
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8782
	int pipe = intel_crtc->pipe;
8749
	int pipe = intel_crtc->pipe;
8783
	uint16_t coeff = 0x7800; /* 1.0 */
8750
	uint16_t coeff = 0x7800; /* 1.0 */
8784
 
8751
 
8785
	/*
8752
	/*
8786
	 * TODO: Check what kind of values actually come out of the pipe
8753
	 * TODO: Check what kind of values actually come out of the pipe
8787
	 * with these coeff/postoff values and adjust to get the best
8754
	 * with these coeff/postoff values and adjust to get the best
8788
	 * accuracy. Perhaps we even need to take the bpc value into
8755
	 * accuracy. Perhaps we even need to take the bpc value into
8789
	 * consideration.
8756
	 * consideration.
8790
	 */
8757
	 */
8791
 
8758
 
8792
	if (intel_crtc->config->limited_color_range)
8759
	if (intel_crtc->config->limited_color_range)
8793
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8760
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8794
 
8761
 
8795
	/*
8762
	/*
8796
	 * GY/GU and RY/RU should be the other way around according
8763
	 * GY/GU and RY/RU should be the other way around according
8797
	 * to BSpec, but reality doesn't agree. Just set them up in
8764
	 * to BSpec, but reality doesn't agree. Just set them up in
8798
	 * a way that results in the correct picture.
8765
	 * a way that results in the correct picture.
8799
	 */
8766
	 */
8800
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8767
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8801
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8768
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8802
 
8769
 
8803
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8770
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8804
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8771
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8805
 
8772
 
8806
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8773
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8807
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8774
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8808
 
8775
 
8809
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8776
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8810
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8777
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8811
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8778
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8812
 
8779
 
8813
	if (INTEL_INFO(dev)->gen > 6) {
8780
	if (INTEL_INFO(dev)->gen > 6) {
8814
		uint16_t postoff = 0;
8781
		uint16_t postoff = 0;
8815
 
8782
 
8816
		if (intel_crtc->config->limited_color_range)
8783
		if (intel_crtc->config->limited_color_range)
8817
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
8784
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
8818
 
8785
 
8819
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8786
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8820
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8787
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8821
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8788
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8822
 
8789
 
8823
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8790
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8824
	} else {
8791
	} else {
8825
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
8792
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
8826
 
8793
 
8827
		if (intel_crtc->config->limited_color_range)
8794
		if (intel_crtc->config->limited_color_range)
8828
			mode |= CSC_BLACK_SCREEN_OFFSET;
8795
			mode |= CSC_BLACK_SCREEN_OFFSET;
8829
 
8796
 
8830
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8797
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8831
	}
8798
	}
8832
}
8799
}
8833
 
8800
 
8834
static void haswell_set_pipeconf(struct drm_crtc *crtc)
8801
static void haswell_set_pipeconf(struct drm_crtc *crtc)
8835
{
8802
{
8836
	struct drm_device *dev = crtc->dev;
8803
	struct drm_device *dev = crtc->dev;
8837
	struct drm_i915_private *dev_priv = dev->dev_private;
8804
	struct drm_i915_private *dev_priv = dev->dev_private;
8838
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8805
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8839
	enum pipe pipe = intel_crtc->pipe;
8806
	enum pipe pipe = intel_crtc->pipe;
8840
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8807
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8841
	uint32_t val;
8808
	uint32_t val;
8842
 
8809
 
8843
	val = 0;
8810
	val = 0;
8844
 
8811
 
8845
	if (IS_HASWELL(dev) && intel_crtc->config->dither)
8812
	if (IS_HASWELL(dev) && intel_crtc->config->dither)
8846
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8813
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8847
 
8814
 
8848
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8815
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8849
		val |= PIPECONF_INTERLACED_ILK;
8816
		val |= PIPECONF_INTERLACED_ILK;
8850
	else
8817
	else
8851
		val |= PIPECONF_PROGRESSIVE;
8818
		val |= PIPECONF_PROGRESSIVE;
8852
 
8819
 
8853
	I915_WRITE(PIPECONF(cpu_transcoder), val);
8820
	I915_WRITE(PIPECONF(cpu_transcoder), val);
8854
	POSTING_READ(PIPECONF(cpu_transcoder));
8821
	POSTING_READ(PIPECONF(cpu_transcoder));
8855
 
8822
 
8856
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8823
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8857
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
8824
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
8858
 
8825
 
8859
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
8826
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
8860
		val = 0;
8827
		val = 0;
8861
 
8828
 
8862
		switch (intel_crtc->config->pipe_bpp) {
8829
		switch (intel_crtc->config->pipe_bpp) {
8863
		case 18:
8830
		case 18:
8864
			val |= PIPEMISC_DITHER_6_BPC;
8831
			val |= PIPEMISC_DITHER_6_BPC;
8865
			break;
8832
			break;
8866
		case 24:
8833
		case 24:
8867
			val |= PIPEMISC_DITHER_8_BPC;
8834
			val |= PIPEMISC_DITHER_8_BPC;
8868
			break;
8835
			break;
8869
		case 30:
8836
		case 30:
8870
			val |= PIPEMISC_DITHER_10_BPC;
8837
			val |= PIPEMISC_DITHER_10_BPC;
8871
			break;
8838
			break;
8872
		case 36:
8839
		case 36:
8873
			val |= PIPEMISC_DITHER_12_BPC;
8840
			val |= PIPEMISC_DITHER_12_BPC;
8874
			break;
8841
			break;
8875
		default:
8842
		default:
8876
			/* Case prevented by pipe_config_set_bpp. */
8843
			/* Case prevented by pipe_config_set_bpp. */
8877
			BUG();
8844
			BUG();
8878
		}
8845
		}
8879
 
8846
 
8880
		if (intel_crtc->config->dither)
8847
		if (intel_crtc->config->dither)
8881
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8848
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8882
 
8849
 
8883
		I915_WRITE(PIPEMISC(pipe), val);
8850
		I915_WRITE(PIPEMISC(pipe), val);
8884
	}
8851
	}
8885
}
8852
}
8886
 
8853
 
8887
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
8854
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
8888
				    struct intel_crtc_state *crtc_state,
8855
				    struct intel_crtc_state *crtc_state,
8889
				    intel_clock_t *clock,
8856
				    intel_clock_t *clock,
8890
				    bool *has_reduced_clock,
8857
				    bool *has_reduced_clock,
8891
				    intel_clock_t *reduced_clock)
8858
				    intel_clock_t *reduced_clock)
8892
{
8859
{
8893
	struct drm_device *dev = crtc->dev;
8860
	struct drm_device *dev = crtc->dev;
8894
	struct drm_i915_private *dev_priv = dev->dev_private;
8861
	struct drm_i915_private *dev_priv = dev->dev_private;
8895
	int refclk;
8862
	int refclk;
8896
	const intel_limit_t *limit;
8863
	const intel_limit_t *limit;
8897
	bool ret;
8864
	bool ret;
8898
 
8865
 
8899
	refclk = ironlake_get_refclk(crtc_state);
8866
	refclk = ironlake_get_refclk(crtc_state);
8900
 
8867
 
8901
	/*
8868
	/*
8902
	 * Returns a set of divisors for the desired target clock with the given
8869
	 * Returns a set of divisors for the desired target clock with the given
8903
	 * refclk, or FALSE.  The returned values represent the clock equation:
8870
	 * refclk, or FALSE.  The returned values represent the clock equation:
8904
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8871
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8905
	 */
8872
	 */
8906
	limit = intel_limit(crtc_state, refclk);
8873
	limit = intel_limit(crtc_state, refclk);
8907
	ret = dev_priv->display.find_dpll(limit, crtc_state,
8874
	ret = dev_priv->display.find_dpll(limit, crtc_state,
8908
					  crtc_state->port_clock,
8875
					  crtc_state->port_clock,
8909
					  refclk, NULL, clock);
8876
					  refclk, NULL, clock);
8910
	if (!ret)
8877
	if (!ret)
8911
		return false;
8878
		return false;
8912
 
8879
 
8913
	return true;
8880
	return true;
8914
}
8881
}
8915
 
8882
 
8916
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8883
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8917
{
8884
{
8918
	/*
8885
	/*
8919
	 * Account for spread spectrum to avoid
8886
	 * Account for spread spectrum to avoid
8920
	 * oversubscribing the link. Max center spread
8887
	 * oversubscribing the link. Max center spread
8921
	 * is 2.5%; use 5% for safety's sake.
8888
	 * is 2.5%; use 5% for safety's sake.
8922
	 */
8889
	 */
8923
	u32 bps = target_clock * bpp * 21 / 20;
8890
	u32 bps = target_clock * bpp * 21 / 20;
8924
	return DIV_ROUND_UP(bps, link_bw * 8);
8891
	return DIV_ROUND_UP(bps, link_bw * 8);
8925
}
8892
}
8926
 
8893
 
8927
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8894
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8928
{
8895
{
8929
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8896
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8930
}
8897
}
8931
 
8898
 
8932
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8899
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8933
				      struct intel_crtc_state *crtc_state,
8900
				      struct intel_crtc_state *crtc_state,
8934
				      u32 *fp,
8901
				      u32 *fp,
8935
				      intel_clock_t *reduced_clock, u32 *fp2)
8902
				      intel_clock_t *reduced_clock, u32 *fp2)
8936
{
8903
{
8937
	struct drm_crtc *crtc = &intel_crtc->base;
8904
	struct drm_crtc *crtc = &intel_crtc->base;
8938
	struct drm_device *dev = crtc->dev;
8905
	struct drm_device *dev = crtc->dev;
8939
	struct drm_i915_private *dev_priv = dev->dev_private;
8906
	struct drm_i915_private *dev_priv = dev->dev_private;
8940
	struct drm_atomic_state *state = crtc_state->base.state;
8907
	struct drm_atomic_state *state = crtc_state->base.state;
8941
	struct drm_connector *connector;
8908
	struct drm_connector *connector;
8942
	struct drm_connector_state *connector_state;
8909
	struct drm_connector_state *connector_state;
8943
	struct intel_encoder *encoder;
8910
	struct intel_encoder *encoder;
8944
	uint32_t dpll;
8911
	uint32_t dpll;
8945
	int factor, num_connectors = 0, i;
8912
	int factor, num_connectors = 0, i;
8946
	bool is_lvds = false, is_sdvo = false;
8913
	bool is_lvds = false, is_sdvo = false;
8947
 
8914
 
8948
	for_each_connector_in_state(state, connector, connector_state, i) {
8915
	for_each_connector_in_state(state, connector, connector_state, i) {
8949
		if (connector_state->crtc != crtc_state->base.crtc)
8916
		if (connector_state->crtc != crtc_state->base.crtc)
8950
			continue;
8917
			continue;
8951
 
8918
 
8952
		encoder = to_intel_encoder(connector_state->best_encoder);
8919
		encoder = to_intel_encoder(connector_state->best_encoder);
8953
 
8920
 
8954
		switch (encoder->type) {
8921
		switch (encoder->type) {
8955
		case INTEL_OUTPUT_LVDS:
8922
		case INTEL_OUTPUT_LVDS:
8956
			is_lvds = true;
8923
			is_lvds = true;
8957
			break;
8924
			break;
8958
		case INTEL_OUTPUT_SDVO:
8925
		case INTEL_OUTPUT_SDVO:
8959
		case INTEL_OUTPUT_HDMI:
8926
		case INTEL_OUTPUT_HDMI:
8960
			is_sdvo = true;
8927
			is_sdvo = true;
8961
			break;
8928
			break;
8962
		default:
8929
		default:
8963
			break;
8930
			break;
8964
		}
8931
		}
8965
 
8932
 
8966
		num_connectors++;
8933
		num_connectors++;
8967
	}
8934
	}
8968
 
8935
 
8969
	/* Enable autotuning of the PLL clock (if permissible) */
8936
	/* Enable autotuning of the PLL clock (if permissible) */
8970
	factor = 21;
8937
	factor = 21;
8971
	if (is_lvds) {
8938
	if (is_lvds) {
8972
		if ((intel_panel_use_ssc(dev_priv) &&
8939
		if ((intel_panel_use_ssc(dev_priv) &&
8973
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8940
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8974
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8941
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8975
			factor = 25;
8942
			factor = 25;
8976
	} else if (crtc_state->sdvo_tv_clock)
8943
	} else if (crtc_state->sdvo_tv_clock)
8977
		factor = 20;
8944
		factor = 20;
8978
 
8945
 
8979
	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8946
	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8980
		*fp |= FP_CB_TUNE;
8947
		*fp |= FP_CB_TUNE;
8981
 
8948
 
8982
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8949
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8983
		*fp2 |= FP_CB_TUNE;
8950
		*fp2 |= FP_CB_TUNE;
8984
 
8951
 
8985
	dpll = 0;
8952
	dpll = 0;
8986
 
8953
 
8987
	if (is_lvds)
8954
	if (is_lvds)
8988
		dpll |= DPLLB_MODE_LVDS;
8955
		dpll |= DPLLB_MODE_LVDS;
8989
	else
8956
	else
8990
		dpll |= DPLLB_MODE_DAC_SERIAL;
8957
		dpll |= DPLLB_MODE_DAC_SERIAL;
8991
 
8958
 
8992
	dpll |= (crtc_state->pixel_multiplier - 1)
8959
	dpll |= (crtc_state->pixel_multiplier - 1)
8993
		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8960
		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8994
 
8961
 
8995
	if (is_sdvo)
8962
	if (is_sdvo)
8996
		dpll |= DPLL_SDVO_HIGH_SPEED;
8963
		dpll |= DPLL_SDVO_HIGH_SPEED;
8997
	if (crtc_state->has_dp_encoder)
8964
	if (crtc_state->has_dp_encoder)
8998
		dpll |= DPLL_SDVO_HIGH_SPEED;
8965
		dpll |= DPLL_SDVO_HIGH_SPEED;
8999
 
8966
 
9000
	/* compute bitmask from p1 value */
8967
	/* compute bitmask from p1 value */
9001
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8968
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9002
	/* also FPA1 */
8969
	/* also FPA1 */
9003
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8970
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9004
 
8971
 
9005
	switch (crtc_state->dpll.p2) {
8972
	switch (crtc_state->dpll.p2) {
9006
	case 5:
8973
	case 5:
9007
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8974
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9008
		break;
8975
		break;
9009
	case 7:
8976
	case 7:
9010
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8977
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9011
		break;
8978
		break;
9012
	case 10:
8979
	case 10:
9013
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8980
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9014
		break;
8981
		break;
9015
	case 14:
8982
	case 14:
9016
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8983
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9017
		break;
8984
		break;
9018
	}
8985
	}
9019
 
8986
 
9020
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
8987
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
9021
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8988
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9022
	else
8989
	else
9023
		dpll |= PLL_REF_INPUT_DREFCLK;
8990
		dpll |= PLL_REF_INPUT_DREFCLK;
9024
 
8991
 
9025
	return dpll | DPLL_VCO_ENABLE;
8992
	return dpll | DPLL_VCO_ENABLE;
9026
}
8993
}
9027
 
8994
 
9028
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8995
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9029
				       struct intel_crtc_state *crtc_state)
8996
				       struct intel_crtc_state *crtc_state)
9030
{
8997
{
9031
	struct drm_device *dev = crtc->base.dev;
8998
	struct drm_device *dev = crtc->base.dev;
9032
	intel_clock_t clock, reduced_clock;
8999
	intel_clock_t clock, reduced_clock;
9033
	u32 dpll = 0, fp = 0, fp2 = 0;
9000
	u32 dpll = 0, fp = 0, fp2 = 0;
9034
	bool ok, has_reduced_clock = false;
9001
	bool ok, has_reduced_clock = false;
9035
	bool is_lvds = false;
9002
	bool is_lvds = false;
9036
	struct intel_shared_dpll *pll;
9003
	struct intel_shared_dpll *pll;
9037
 
9004
 
9038
	memset(&crtc_state->dpll_hw_state, 0,
9005
	memset(&crtc_state->dpll_hw_state, 0,
9039
	       sizeof(crtc_state->dpll_hw_state));
9006
	       sizeof(crtc_state->dpll_hw_state));
9040
 
9007
 
9041
	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
9008
	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
9042
 
9009
 
9043
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
9010
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
9044
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9011
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9045
 
9012
 
9046
	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
9013
	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
9047
				     &has_reduced_clock, &reduced_clock);
9014
				     &has_reduced_clock, &reduced_clock);
9048
	if (!ok && !crtc_state->clock_set) {
9015
	if (!ok && !crtc_state->clock_set) {
9049
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9016
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9050
		return -EINVAL;
9017
		return -EINVAL;
9051
	}
9018
	}
9052
	/* Compat-code for transition, will disappear. */
9019
	/* Compat-code for transition, will disappear. */
9053
	if (!crtc_state->clock_set) {
9020
	if (!crtc_state->clock_set) {
9054
		crtc_state->dpll.n = clock.n;
9021
		crtc_state->dpll.n = clock.n;
9055
		crtc_state->dpll.m1 = clock.m1;
9022
		crtc_state->dpll.m1 = clock.m1;
9056
		crtc_state->dpll.m2 = clock.m2;
9023
		crtc_state->dpll.m2 = clock.m2;
9057
		crtc_state->dpll.p1 = clock.p1;
9024
		crtc_state->dpll.p1 = clock.p1;
9058
		crtc_state->dpll.p2 = clock.p2;
9025
		crtc_state->dpll.p2 = clock.p2;
9059
	}
9026
	}
9060
 
9027
 
9061
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9028
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9062
	if (crtc_state->has_pch_encoder) {
9029
	if (crtc_state->has_pch_encoder) {
9063
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9030
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9064
		if (has_reduced_clock)
9031
		if (has_reduced_clock)
9065
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
9032
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
9066
 
9033
 
9067
		dpll = ironlake_compute_dpll(crtc, crtc_state,
9034
		dpll = ironlake_compute_dpll(crtc, crtc_state,
9068
					     &fp, &reduced_clock,
9035
					     &fp, &reduced_clock,
9069
					     has_reduced_clock ? &fp2 : NULL);
9036
					     has_reduced_clock ? &fp2 : NULL);
9070
 
9037
 
9071
		crtc_state->dpll_hw_state.dpll = dpll;
9038
		crtc_state->dpll_hw_state.dpll = dpll;
9072
		crtc_state->dpll_hw_state.fp0 = fp;
9039
		crtc_state->dpll_hw_state.fp0 = fp;
9073
		if (has_reduced_clock)
9040
		if (has_reduced_clock)
9074
			crtc_state->dpll_hw_state.fp1 = fp2;
9041
			crtc_state->dpll_hw_state.fp1 = fp2;
9075
		else
9042
		else
9076
			crtc_state->dpll_hw_state.fp1 = fp;
9043
			crtc_state->dpll_hw_state.fp1 = fp;
9077
 
9044
 
9078
		pll = intel_get_shared_dpll(crtc, crtc_state);
9045
		pll = intel_get_shared_dpll(crtc, crtc_state);
9079
		if (pll == NULL) {
9046
		if (pll == NULL) {
9080
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9047
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9081
					 pipe_name(crtc->pipe));
9048
					 pipe_name(crtc->pipe));
9082
			return -EINVAL;
9049
			return -EINVAL;
9083
		}
9050
		}
9084
	}
9051
	}
9085
 
9052
 
9086
	if (is_lvds && has_reduced_clock)
9053
	if (is_lvds && has_reduced_clock)
9087
		crtc->lowfreq_avail = true;
9054
		crtc->lowfreq_avail = true;
9088
	else
9055
	else
9089
		crtc->lowfreq_avail = false;
9056
		crtc->lowfreq_avail = false;
9090
 
9057
 
9091
	return 0;
9058
	return 0;
9092
}
9059
}
9093
 
9060
 
9094
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9061
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9095
					 struct intel_link_m_n *m_n)
9062
					 struct intel_link_m_n *m_n)
9096
{
9063
{
9097
	struct drm_device *dev = crtc->base.dev;
9064
	struct drm_device *dev = crtc->base.dev;
9098
	struct drm_i915_private *dev_priv = dev->dev_private;
9065
	struct drm_i915_private *dev_priv = dev->dev_private;
9099
	enum pipe pipe = crtc->pipe;
9066
	enum pipe pipe = crtc->pipe;
9100
 
9067
 
9101
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9068
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9102
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9069
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9103
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9070
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9104
		& ~TU_SIZE_MASK;
9071
		& ~TU_SIZE_MASK;
9105
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9072
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9106
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9073
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9107
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9074
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9108
}
9075
}
9109
 
9076
 
9110
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9077
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9111
					 enum transcoder transcoder,
9078
					 enum transcoder transcoder,
9112
					 struct intel_link_m_n *m_n,
9079
					 struct intel_link_m_n *m_n,
9113
					 struct intel_link_m_n *m2_n2)
9080
					 struct intel_link_m_n *m2_n2)
9114
{
9081
{
9115
	struct drm_device *dev = crtc->base.dev;
9082
	struct drm_device *dev = crtc->base.dev;
9116
	struct drm_i915_private *dev_priv = dev->dev_private;
9083
	struct drm_i915_private *dev_priv = dev->dev_private;
9117
	enum pipe pipe = crtc->pipe;
9084
	enum pipe pipe = crtc->pipe;
9118
 
9085
 
9119
	if (INTEL_INFO(dev)->gen >= 5) {
9086
	if (INTEL_INFO(dev)->gen >= 5) {
9120
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9087
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9121
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9088
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9122
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9089
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9123
			& ~TU_SIZE_MASK;
9090
			& ~TU_SIZE_MASK;
9124
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9091
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9125
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9092
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9126
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9093
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9127
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9094
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9128
		 * gen < 8) and if DRRS is supported (to make sure the
9095
		 * gen < 8) and if DRRS is supported (to make sure the
9129
		 * registers are not unnecessarily read).
9096
		 * registers are not unnecessarily read).
9130
		 */
9097
		 */
9131
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9098
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9132
			crtc->config->has_drrs) {
9099
			crtc->config->has_drrs) {
9133
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9100
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9134
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9101
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9135
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9102
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9136
					& ~TU_SIZE_MASK;
9103
					& ~TU_SIZE_MASK;
9137
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9104
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9138
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9105
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9139
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9106
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9140
		}
9107
		}
9141
	} else {
9108
	} else {
9142
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9109
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9143
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9110
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9144
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9111
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9145
			& ~TU_SIZE_MASK;
9112
			& ~TU_SIZE_MASK;
9146
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9113
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9147
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9114
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9148
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9115
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9149
	}
9116
	}
9150
}
9117
}
9151
 
9118
 
9152
void intel_dp_get_m_n(struct intel_crtc *crtc,
9119
void intel_dp_get_m_n(struct intel_crtc *crtc,
9153
		      struct intel_crtc_state *pipe_config)
9120
		      struct intel_crtc_state *pipe_config)
9154
{
9121
{
9155
	if (pipe_config->has_pch_encoder)
9122
	if (pipe_config->has_pch_encoder)
9156
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9123
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9157
	else
9124
	else
9158
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9125
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9159
					     &pipe_config->dp_m_n,
9126
					     &pipe_config->dp_m_n,
9160
					     &pipe_config->dp_m2_n2);
9127
					     &pipe_config->dp_m2_n2);
9161
}
9128
}
9162
 
9129
 
9163
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9130
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9164
					struct intel_crtc_state *pipe_config)
9131
					struct intel_crtc_state *pipe_config)
9165
{
9132
{
9166
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9133
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9167
				     &pipe_config->fdi_m_n, NULL);
9134
				     &pipe_config->fdi_m_n, NULL);
9168
}
9135
}
9169
 
9136
 
9170
static void skylake_get_pfit_config(struct intel_crtc *crtc,
9137
static void skylake_get_pfit_config(struct intel_crtc *crtc,
9171
				    struct intel_crtc_state *pipe_config)
9138
				    struct intel_crtc_state *pipe_config)
9172
{
9139
{
9173
	struct drm_device *dev = crtc->base.dev;
9140
	struct drm_device *dev = crtc->base.dev;
9174
	struct drm_i915_private *dev_priv = dev->dev_private;
9141
	struct drm_i915_private *dev_priv = dev->dev_private;
9175
	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9142
	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9176
	uint32_t ps_ctrl = 0;
9143
	uint32_t ps_ctrl = 0;
9177
	int id = -1;
9144
	int id = -1;
9178
	int i;
9145
	int i;
9179
 
9146
 
9180
	/* find scaler attached to this pipe */
9147
	/* find scaler attached to this pipe */
9181
	for (i = 0; i < crtc->num_scalers; i++) {
9148
	for (i = 0; i < crtc->num_scalers; i++) {
9182
		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9149
		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9183
		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9150
		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9184
			id = i;
9151
			id = i;
9185
			pipe_config->pch_pfit.enabled = true;
9152
			pipe_config->pch_pfit.enabled = true;
9186
			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9153
			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9187
			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9154
			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9188
			break;
9155
			break;
9189
		}
9156
		}
9190
	}
9157
	}
9191
 
9158
 
9192
	scaler_state->scaler_id = id;
9159
	scaler_state->scaler_id = id;
9193
	if (id >= 0) {
9160
	if (id >= 0) {
9194
		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9161
		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9195
	} else {
9162
	} else {
9196
		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9163
		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9197
	}
9164
	}
9198
}
9165
}
9199
 
9166
 
9200
static void
9167
static void
9201
skylake_get_initial_plane_config(struct intel_crtc *crtc,
9168
skylake_get_initial_plane_config(struct intel_crtc *crtc,
9202
				 struct intel_initial_plane_config *plane_config)
9169
				 struct intel_initial_plane_config *plane_config)
9203
{
9170
{
9204
	struct drm_device *dev = crtc->base.dev;
9171
	struct drm_device *dev = crtc->base.dev;
9205
	struct drm_i915_private *dev_priv = dev->dev_private;
9172
	struct drm_i915_private *dev_priv = dev->dev_private;
9206
	u32 val, base, offset, stride_mult, tiling;
9173
	u32 val, base, offset, stride_mult, tiling;
9207
	int pipe = crtc->pipe;
9174
	int pipe = crtc->pipe;
9208
	int fourcc, pixel_format;
9175
	int fourcc, pixel_format;
9209
	unsigned int aligned_height;
9176
	unsigned int aligned_height;
9210
	struct drm_framebuffer *fb;
9177
	struct drm_framebuffer *fb;
9211
	struct intel_framebuffer *intel_fb;
9178
	struct intel_framebuffer *intel_fb;
9212
 
9179
 
9213
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9180
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9214
	if (!intel_fb) {
9181
	if (!intel_fb) {
9215
		DRM_DEBUG_KMS("failed to alloc fb\n");
9182
		DRM_DEBUG_KMS("failed to alloc fb\n");
9216
		return;
9183
		return;
9217
	}
9184
	}
9218
 
9185
 
9219
	fb = &intel_fb->base;
9186
	fb = &intel_fb->base;
9220
 
9187
 
9221
	val = I915_READ(PLANE_CTL(pipe, 0));
9188
	val = I915_READ(PLANE_CTL(pipe, 0));
9222
	if (!(val & PLANE_CTL_ENABLE))
9189
	if (!(val & PLANE_CTL_ENABLE))
9223
		goto error;
9190
		goto error;
9224
 
9191
 
9225
	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9192
	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9226
	fourcc = skl_format_to_fourcc(pixel_format,
9193
	fourcc = skl_format_to_fourcc(pixel_format,
9227
				      val & PLANE_CTL_ORDER_RGBX,
9194
				      val & PLANE_CTL_ORDER_RGBX,
9228
				      val & PLANE_CTL_ALPHA_MASK);
9195
				      val & PLANE_CTL_ALPHA_MASK);
9229
	fb->pixel_format = fourcc;
9196
	fb->pixel_format = fourcc;
9230
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9197
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9231
 
9198
 
9232
	tiling = val & PLANE_CTL_TILED_MASK;
9199
	tiling = val & PLANE_CTL_TILED_MASK;
9233
	switch (tiling) {
9200
	switch (tiling) {
9234
	case PLANE_CTL_TILED_LINEAR:
9201
	case PLANE_CTL_TILED_LINEAR:
9235
		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9202
		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9236
		break;
9203
		break;
9237
	case PLANE_CTL_TILED_X:
9204
	case PLANE_CTL_TILED_X:
9238
		plane_config->tiling = I915_TILING_X;
9205
		plane_config->tiling = I915_TILING_X;
9239
		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9206
		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9240
		break;
9207
		break;
9241
	case PLANE_CTL_TILED_Y:
9208
	case PLANE_CTL_TILED_Y:
9242
		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9209
		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9243
		break;
9210
		break;
9244
	case PLANE_CTL_TILED_YF:
9211
	case PLANE_CTL_TILED_YF:
9245
		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9212
		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9246
		break;
9213
		break;
9247
	default:
9214
	default:
9248
		MISSING_CASE(tiling);
9215
		MISSING_CASE(tiling);
9249
		goto error;
9216
		goto error;
9250
	}
9217
	}
9251
 
9218
 
9252
	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9219
	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9253
	plane_config->base = base;
9220
	plane_config->base = base;
9254
 
9221
 
9255
	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9222
	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9256
 
9223
 
9257
	val = I915_READ(PLANE_SIZE(pipe, 0));
9224
	val = I915_READ(PLANE_SIZE(pipe, 0));
9258
	fb->height = ((val >> 16) & 0xfff) + 1;
9225
	fb->height = ((val >> 16) & 0xfff) + 1;
9259
	fb->width = ((val >> 0) & 0x1fff) + 1;
9226
	fb->width = ((val >> 0) & 0x1fff) + 1;
9260
 
9227
 
9261
	val = I915_READ(PLANE_STRIDE(pipe, 0));
9228
	val = I915_READ(PLANE_STRIDE(pipe, 0));
9262
	stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9229
	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9263
						fb->pixel_format);
9230
						fb->pixel_format);
9264
	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9231
	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9265
 
9232
 
9266
	aligned_height = intel_fb_align_height(dev, fb->height,
9233
	aligned_height = intel_fb_align_height(dev, fb->height,
9267
					       fb->pixel_format,
9234
					       fb->pixel_format,
9268
					       fb->modifier[0]);
9235
					       fb->modifier[0]);
9269
 
9236
 
9270
	plane_config->size = fb->pitches[0] * aligned_height;
9237
	plane_config->size = fb->pitches[0] * aligned_height;
9271
 
9238
 
9272
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9239
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9273
		      pipe_name(pipe), fb->width, fb->height,
9240
		      pipe_name(pipe), fb->width, fb->height,
9274
		      fb->bits_per_pixel, base, fb->pitches[0],
9241
		      fb->bits_per_pixel, base, fb->pitches[0],
9275
		      plane_config->size);
9242
		      plane_config->size);
9276
 
9243
 
9277
	plane_config->fb = intel_fb;
9244
	plane_config->fb = intel_fb;
9278
	return;
9245
	return;
9279
 
9246
 
9280
error:
9247
error:
9281
	kfree(fb);
9248
	kfree(fb);
9282
}
9249
}
9283
 
9250
 
9284
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9251
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9285
				     struct intel_crtc_state *pipe_config)
9252
				     struct intel_crtc_state *pipe_config)
9286
{
9253
{
9287
	struct drm_device *dev = crtc->base.dev;
9254
	struct drm_device *dev = crtc->base.dev;
9288
	struct drm_i915_private *dev_priv = dev->dev_private;
9255
	struct drm_i915_private *dev_priv = dev->dev_private;
9289
	uint32_t tmp;
9256
	uint32_t tmp;
9290
 
9257
 
9291
	tmp = I915_READ(PF_CTL(crtc->pipe));
9258
	tmp = I915_READ(PF_CTL(crtc->pipe));
9292
 
9259
 
9293
	if (tmp & PF_ENABLE) {
9260
	if (tmp & PF_ENABLE) {
9294
		pipe_config->pch_pfit.enabled = true;
9261
		pipe_config->pch_pfit.enabled = true;
9295
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9262
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9296
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9263
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9297
 
9264
 
9298
		/* We currently do not free assignements of panel fitters on
9265
		/* We currently do not free assignements of panel fitters on
9299
		 * ivb/hsw (since we don't use the higher upscaling modes which
9266
		 * ivb/hsw (since we don't use the higher upscaling modes which
9300
		 * differentiates them) so just WARN about this case for now. */
9267
		 * differentiates them) so just WARN about this case for now. */
9301
		if (IS_GEN7(dev)) {
9268
		if (IS_GEN7(dev)) {
9302
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9269
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9303
				PF_PIPE_SEL_IVB(crtc->pipe));
9270
				PF_PIPE_SEL_IVB(crtc->pipe));
9304
		}
9271
		}
9305
	}
9272
	}
9306
}
9273
}
9307
 
9274
 
9308
static void
9275
static void
9309
ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9276
ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9310
				  struct intel_initial_plane_config *plane_config)
9277
				  struct intel_initial_plane_config *plane_config)
9311
{
9278
{
9312
	struct drm_device *dev = crtc->base.dev;
9279
	struct drm_device *dev = crtc->base.dev;
9313
	struct drm_i915_private *dev_priv = dev->dev_private;
9280
	struct drm_i915_private *dev_priv = dev->dev_private;
9314
	u32 val, base, offset;
9281
	u32 val, base, offset;
9315
	int pipe = crtc->pipe;
9282
	int pipe = crtc->pipe;
9316
	int fourcc, pixel_format;
9283
	int fourcc, pixel_format;
9317
	unsigned int aligned_height;
9284
	unsigned int aligned_height;
9318
	struct drm_framebuffer *fb;
9285
	struct drm_framebuffer *fb;
9319
	struct intel_framebuffer *intel_fb;
9286
	struct intel_framebuffer *intel_fb;
9320
 
9287
 
9321
	val = I915_READ(DSPCNTR(pipe));
9288
	val = I915_READ(DSPCNTR(pipe));
9322
	if (!(val & DISPLAY_PLANE_ENABLE))
9289
	if (!(val & DISPLAY_PLANE_ENABLE))
9323
		return;
9290
		return;
9324
 
9291
 
9325
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9292
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9326
	if (!intel_fb) {
9293
	if (!intel_fb) {
9327
		DRM_DEBUG_KMS("failed to alloc fb\n");
9294
		DRM_DEBUG_KMS("failed to alloc fb\n");
9328
		return;
9295
		return;
9329
	}
9296
	}
9330
 
9297
 
9331
	fb = &intel_fb->base;
9298
	fb = &intel_fb->base;
9332
 
9299
 
9333
	if (INTEL_INFO(dev)->gen >= 4) {
9300
	if (INTEL_INFO(dev)->gen >= 4) {
9334
		if (val & DISPPLANE_TILED) {
9301
		if (val & DISPPLANE_TILED) {
9335
			plane_config->tiling = I915_TILING_X;
9302
			plane_config->tiling = I915_TILING_X;
9336
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9303
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9337
		}
9304
		}
9338
	}
9305
	}
9339
 
9306
 
9340
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9307
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9341
	fourcc = i9xx_format_to_fourcc(pixel_format);
9308
	fourcc = i9xx_format_to_fourcc(pixel_format);
9342
	fb->pixel_format = fourcc;
9309
	fb->pixel_format = fourcc;
9343
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9310
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9344
 
9311
 
9345
	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9312
	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9346
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9313
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9347
		offset = I915_READ(DSPOFFSET(pipe));
9314
		offset = I915_READ(DSPOFFSET(pipe));
9348
	} else {
9315
	} else {
9349
		if (plane_config->tiling)
9316
		if (plane_config->tiling)
9350
			offset = I915_READ(DSPTILEOFF(pipe));
9317
			offset = I915_READ(DSPTILEOFF(pipe));
9351
		else
9318
		else
9352
			offset = I915_READ(DSPLINOFF(pipe));
9319
			offset = I915_READ(DSPLINOFF(pipe));
9353
	}
9320
	}
9354
	plane_config->base = base;
9321
	plane_config->base = base;
9355
 
9322
 
9356
	val = I915_READ(PIPESRC(pipe));
9323
	val = I915_READ(PIPESRC(pipe));
9357
	fb->width = ((val >> 16) & 0xfff) + 1;
9324
	fb->width = ((val >> 16) & 0xfff) + 1;
9358
	fb->height = ((val >> 0) & 0xfff) + 1;
9325
	fb->height = ((val >> 0) & 0xfff) + 1;
9359
 
9326
 
9360
	val = I915_READ(DSPSTRIDE(pipe));
9327
	val = I915_READ(DSPSTRIDE(pipe));
9361
	fb->pitches[0] = val & 0xffffffc0;
9328
	fb->pitches[0] = val & 0xffffffc0;
9362
 
9329
 
9363
	aligned_height = intel_fb_align_height(dev, fb->height,
9330
	aligned_height = intel_fb_align_height(dev, fb->height,
9364
					       fb->pixel_format,
9331
					       fb->pixel_format,
9365
					       fb->modifier[0]);
9332
					       fb->modifier[0]);
9366
 
9333
 
9367
	plane_config->size = fb->pitches[0] * aligned_height;
9334
	plane_config->size = fb->pitches[0] * aligned_height;
9368
 
9335
 
9369
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9336
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9370
		      pipe_name(pipe), fb->width, fb->height,
9337
		      pipe_name(pipe), fb->width, fb->height,
9371
		      fb->bits_per_pixel, base, fb->pitches[0],
9338
		      fb->bits_per_pixel, base, fb->pitches[0],
9372
		      plane_config->size);
9339
		      plane_config->size);
9373
 
9340
 
9374
	plane_config->fb = intel_fb;
9341
	plane_config->fb = intel_fb;
9375
}
9342
}
9376
 
9343
 
9377
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9344
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9378
				     struct intel_crtc_state *pipe_config)
9345
				     struct intel_crtc_state *pipe_config)
9379
{
9346
{
9380
	struct drm_device *dev = crtc->base.dev;
9347
	struct drm_device *dev = crtc->base.dev;
9381
	struct drm_i915_private *dev_priv = dev->dev_private;
9348
	struct drm_i915_private *dev_priv = dev->dev_private;
9382
	enum intel_display_power_domain power_domain;
9349
	enum intel_display_power_domain power_domain;
9383
	uint32_t tmp;
9350
	uint32_t tmp;
9384
	bool ret;
9351
	bool ret;
9385
 
9352
 
9386
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9353
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9387
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9354
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9388
		return false;
9355
		return false;
9389
 
9356
 
9390
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9357
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9391
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9358
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9392
 
9359
 
9393
	ret = false;
9360
	ret = false;
9394
	tmp = I915_READ(PIPECONF(crtc->pipe));
9361
	tmp = I915_READ(PIPECONF(crtc->pipe));
9395
	if (!(tmp & PIPECONF_ENABLE))
9362
	if (!(tmp & PIPECONF_ENABLE))
9396
		goto out;
9363
		goto out;
9397
 
9364
 
9398
	switch (tmp & PIPECONF_BPC_MASK) {
9365
	switch (tmp & PIPECONF_BPC_MASK) {
9399
	case PIPECONF_6BPC:
9366
	case PIPECONF_6BPC:
9400
		pipe_config->pipe_bpp = 18;
9367
		pipe_config->pipe_bpp = 18;
9401
		break;
9368
		break;
9402
	case PIPECONF_8BPC:
9369
	case PIPECONF_8BPC:
9403
		pipe_config->pipe_bpp = 24;
9370
		pipe_config->pipe_bpp = 24;
9404
		break;
9371
		break;
9405
	case PIPECONF_10BPC:
9372
	case PIPECONF_10BPC:
9406
		pipe_config->pipe_bpp = 30;
9373
		pipe_config->pipe_bpp = 30;
9407
		break;
9374
		break;
9408
	case PIPECONF_12BPC:
9375
	case PIPECONF_12BPC:
9409
		pipe_config->pipe_bpp = 36;
9376
		pipe_config->pipe_bpp = 36;
9410
		break;
9377
		break;
9411
	default:
9378
	default:
9412
		break;
9379
		break;
9413
	}
9380
	}
9414
 
9381
 
9415
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9382
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9416
		pipe_config->limited_color_range = true;
9383
		pipe_config->limited_color_range = true;
9417
 
9384
 
9418
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9385
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9419
		struct intel_shared_dpll *pll;
9386
		struct intel_shared_dpll *pll;
9420
 
9387
 
9421
		pipe_config->has_pch_encoder = true;
9388
		pipe_config->has_pch_encoder = true;
9422
 
9389
 
9423
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9390
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9424
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9391
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9425
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9392
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9426
 
9393
 
9427
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9394
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9428
 
9395
 
9429
		if (HAS_PCH_IBX(dev_priv->dev)) {
9396
		if (HAS_PCH_IBX(dev_priv->dev)) {
9430
			pipe_config->shared_dpll =
9397
			pipe_config->shared_dpll =
9431
				(enum intel_dpll_id) crtc->pipe;
9398
				(enum intel_dpll_id) crtc->pipe;
9432
		} else {
9399
		} else {
9433
			tmp = I915_READ(PCH_DPLL_SEL);
9400
			tmp = I915_READ(PCH_DPLL_SEL);
9434
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9401
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9435
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9402
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9436
			else
9403
			else
9437
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9404
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9438
		}
9405
		}
9439
 
9406
 
9440
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9407
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9441
 
9408
 
9442
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9409
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9443
					   &pipe_config->dpll_hw_state));
9410
					   &pipe_config->dpll_hw_state));
9444
 
9411
 
9445
		tmp = pipe_config->dpll_hw_state.dpll;
9412
		tmp = pipe_config->dpll_hw_state.dpll;
9446
		pipe_config->pixel_multiplier =
9413
		pipe_config->pixel_multiplier =
9447
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9414
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9448
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9415
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9449
 
9416
 
9450
		ironlake_pch_clock_get(crtc, pipe_config);
9417
		ironlake_pch_clock_get(crtc, pipe_config);
9451
	} else {
9418
	} else {
9452
		pipe_config->pixel_multiplier = 1;
9419
		pipe_config->pixel_multiplier = 1;
9453
	}
9420
	}
9454
 
9421
 
9455
	intel_get_pipe_timings(crtc, pipe_config);
9422
	intel_get_pipe_timings(crtc, pipe_config);
9456
 
9423
 
9457
	ironlake_get_pfit_config(crtc, pipe_config);
9424
	ironlake_get_pfit_config(crtc, pipe_config);
9458
 
9425
 
9459
	ret = true;
9426
	ret = true;
9460
 
9427
 
9461
out:
9428
out:
9462
	intel_display_power_put(dev_priv, power_domain);
9429
	intel_display_power_put(dev_priv, power_domain);
9463
 
9430
 
9464
	return ret;
9431
	return ret;
9465
}
9432
}
9466
 
9433
 
9467
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9434
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9468
{
9435
{
9469
	struct drm_device *dev = dev_priv->dev;
9436
	struct drm_device *dev = dev_priv->dev;
9470
	struct intel_crtc *crtc;
9437
	struct intel_crtc *crtc;
9471
 
9438
 
9472
	for_each_intel_crtc(dev, crtc)
9439
	for_each_intel_crtc(dev, crtc)
9473
		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9440
		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9474
		     pipe_name(crtc->pipe));
9441
		     pipe_name(crtc->pipe));
9475
 
9442
 
9476
	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9443
	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9477
	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9444
	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9478
	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9445
	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9479
	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9446
	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9480
	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9447
	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9481
	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9448
	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9482
	     "CPU PWM1 enabled\n");
9449
	     "CPU PWM1 enabled\n");
9483
	if (IS_HASWELL(dev))
9450
	if (IS_HASWELL(dev))
9484
		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9451
		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9485
		     "CPU PWM2 enabled\n");
9452
		     "CPU PWM2 enabled\n");
9486
	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9453
	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9487
	     "PCH PWM1 enabled\n");
9454
	     "PCH PWM1 enabled\n");
9488
	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9455
	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9489
	     "Utility pin enabled\n");
9456
	     "Utility pin enabled\n");
9490
	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9457
	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9491
 
9458
 
9492
	/*
9459
	/*
9493
	 * In theory we can still leave IRQs enabled, as long as only the HPD
9460
	 * In theory we can still leave IRQs enabled, as long as only the HPD
9494
	 * interrupts remain enabled. We used to check for that, but since it's
9461
	 * interrupts remain enabled. We used to check for that, but since it's
9495
	 * gen-specific and since we only disable LCPLL after we fully disable
9462
	 * gen-specific and since we only disable LCPLL after we fully disable
9496
	 * the interrupts, the check below should be enough.
9463
	 * the interrupts, the check below should be enough.
9497
	 */
9464
	 */
9498
	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9465
	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9499
}
9466
}
9500
 
9467
 
9501
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9468
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9502
{
9469
{
9503
	struct drm_device *dev = dev_priv->dev;
9470
	struct drm_device *dev = dev_priv->dev;
9504
 
9471
 
9505
	if (IS_HASWELL(dev))
9472
	if (IS_HASWELL(dev))
9506
		return I915_READ(D_COMP_HSW);
9473
		return I915_READ(D_COMP_HSW);
9507
	else
9474
	else
9508
		return I915_READ(D_COMP_BDW);
9475
		return I915_READ(D_COMP_BDW);
9509
}
9476
}
9510
 
9477
 
9511
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9478
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9512
{
9479
{
9513
	struct drm_device *dev = dev_priv->dev;
9480
	struct drm_device *dev = dev_priv->dev;
9514
 
9481
 
9515
	if (IS_HASWELL(dev)) {
9482
	if (IS_HASWELL(dev)) {
9516
		mutex_lock(&dev_priv->rps.hw_lock);
9483
		mutex_lock(&dev_priv->rps.hw_lock);
9517
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9484
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9518
					    val))
9485
					    val))
9519
			DRM_ERROR("Failed to write to D_COMP\n");
9486
			DRM_ERROR("Failed to write to D_COMP\n");
9520
		mutex_unlock(&dev_priv->rps.hw_lock);
9487
		mutex_unlock(&dev_priv->rps.hw_lock);
9521
	} else {
9488
	} else {
9522
		I915_WRITE(D_COMP_BDW, val);
9489
		I915_WRITE(D_COMP_BDW, val);
9523
		POSTING_READ(D_COMP_BDW);
9490
		POSTING_READ(D_COMP_BDW);
9524
	}
9491
	}
9525
}
9492
}
9526
 
9493
 
9527
/*
9494
/*
9528
 * This function implements pieces of two sequences from BSpec:
9495
 * This function implements pieces of two sequences from BSpec:
9529
 * - Sequence for display software to disable LCPLL
9496
 * - Sequence for display software to disable LCPLL
9530
 * - Sequence for display software to allow package C8+
9497
 * - Sequence for display software to allow package C8+
9531
 * The steps implemented here are just the steps that actually touch the LCPLL
9498
 * The steps implemented here are just the steps that actually touch the LCPLL
9532
 * register. Callers should take care of disabling all the display engine
9499
 * register. Callers should take care of disabling all the display engine
9533
 * functions, doing the mode unset, fixing interrupts, etc.
9500
 * functions, doing the mode unset, fixing interrupts, etc.
9534
 */
9501
 */
9535
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9502
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9536
			      bool switch_to_fclk, bool allow_power_down)
9503
			      bool switch_to_fclk, bool allow_power_down)
9537
{
9504
{
9538
	uint32_t val;
9505
	uint32_t val;
9539
 
9506
 
9540
	assert_can_disable_lcpll(dev_priv);
9507
	assert_can_disable_lcpll(dev_priv);
9541
 
9508
 
9542
	val = I915_READ(LCPLL_CTL);
9509
	val = I915_READ(LCPLL_CTL);
9543
 
9510
 
9544
	if (switch_to_fclk) {
9511
	if (switch_to_fclk) {
9545
		val |= LCPLL_CD_SOURCE_FCLK;
9512
		val |= LCPLL_CD_SOURCE_FCLK;
9546
		I915_WRITE(LCPLL_CTL, val);
9513
		I915_WRITE(LCPLL_CTL, val);
9547
 
9514
 
9548
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9515
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9549
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9516
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9550
			DRM_ERROR("Switching to FCLK failed\n");
9517
			DRM_ERROR("Switching to FCLK failed\n");
9551
 
9518
 
9552
		val = I915_READ(LCPLL_CTL);
9519
		val = I915_READ(LCPLL_CTL);
9553
	}
9520
	}
9554
 
9521
 
9555
	val |= LCPLL_PLL_DISABLE;
9522
	val |= LCPLL_PLL_DISABLE;
9556
	I915_WRITE(LCPLL_CTL, val);
9523
	I915_WRITE(LCPLL_CTL, val);
9557
	POSTING_READ(LCPLL_CTL);
9524
	POSTING_READ(LCPLL_CTL);
9558
 
9525
 
9559
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9526
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9560
		DRM_ERROR("LCPLL still locked\n");
9527
		DRM_ERROR("LCPLL still locked\n");
9561
 
9528
 
9562
	val = hsw_read_dcomp(dev_priv);
9529
	val = hsw_read_dcomp(dev_priv);
9563
	val |= D_COMP_COMP_DISABLE;
9530
	val |= D_COMP_COMP_DISABLE;
9564
	hsw_write_dcomp(dev_priv, val);
9531
	hsw_write_dcomp(dev_priv, val);
9565
	ndelay(100);
9532
	ndelay(100);
9566
 
9533
 
9567
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9534
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9568
		     1))
9535
		     1))
9569
		DRM_ERROR("D_COMP RCOMP still in progress\n");
9536
		DRM_ERROR("D_COMP RCOMP still in progress\n");
9570
 
9537
 
9571
	if (allow_power_down) {
9538
	if (allow_power_down) {
9572
		val = I915_READ(LCPLL_CTL);
9539
		val = I915_READ(LCPLL_CTL);
9573
		val |= LCPLL_POWER_DOWN_ALLOW;
9540
		val |= LCPLL_POWER_DOWN_ALLOW;
9574
		I915_WRITE(LCPLL_CTL, val);
9541
		I915_WRITE(LCPLL_CTL, val);
9575
		POSTING_READ(LCPLL_CTL);
9542
		POSTING_READ(LCPLL_CTL);
9576
	}
9543
	}
9577
}
9544
}
9578
 
9545
 
9579
/*
9546
/*
9580
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9547
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9581
 * source.
9548
 * source.
9582
 */
9549
 */
9583
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9550
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9584
{
9551
{
9585
	uint32_t val;
9552
	uint32_t val;
9586
 
9553
 
9587
	val = I915_READ(LCPLL_CTL);
9554
	val = I915_READ(LCPLL_CTL);
9588
 
9555
 
9589
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9556
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9590
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9557
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9591
		return;
9558
		return;
9592
 
9559
 
9593
	/*
9560
	/*
9594
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9561
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9595
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9562
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9596
	 */
9563
	 */
9597
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9564
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9598
 
9565
 
9599
	if (val & LCPLL_POWER_DOWN_ALLOW) {
9566
	if (val & LCPLL_POWER_DOWN_ALLOW) {
9600
		val &= ~LCPLL_POWER_DOWN_ALLOW;
9567
		val &= ~LCPLL_POWER_DOWN_ALLOW;
9601
		I915_WRITE(LCPLL_CTL, val);
9568
		I915_WRITE(LCPLL_CTL, val);
9602
		POSTING_READ(LCPLL_CTL);
9569
		POSTING_READ(LCPLL_CTL);
9603
	}
9570
	}
9604
 
9571
 
9605
	val = hsw_read_dcomp(dev_priv);
9572
	val = hsw_read_dcomp(dev_priv);
9606
	val |= D_COMP_COMP_FORCE;
9573
	val |= D_COMP_COMP_FORCE;
9607
	val &= ~D_COMP_COMP_DISABLE;
9574
	val &= ~D_COMP_COMP_DISABLE;
9608
	hsw_write_dcomp(dev_priv, val);
9575
	hsw_write_dcomp(dev_priv, val);
9609
 
9576
 
9610
	val = I915_READ(LCPLL_CTL);
9577
	val = I915_READ(LCPLL_CTL);
9611
	val &= ~LCPLL_PLL_DISABLE;
9578
	val &= ~LCPLL_PLL_DISABLE;
9612
	I915_WRITE(LCPLL_CTL, val);
9579
	I915_WRITE(LCPLL_CTL, val);
9613
 
9580
 
9614
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9581
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9615
		DRM_ERROR("LCPLL not locked yet\n");
9582
		DRM_ERROR("LCPLL not locked yet\n");
9616
 
9583
 
9617
	if (val & LCPLL_CD_SOURCE_FCLK) {
9584
	if (val & LCPLL_CD_SOURCE_FCLK) {
9618
		val = I915_READ(LCPLL_CTL);
9585
		val = I915_READ(LCPLL_CTL);
9619
		val &= ~LCPLL_CD_SOURCE_FCLK;
9586
		val &= ~LCPLL_CD_SOURCE_FCLK;
9620
		I915_WRITE(LCPLL_CTL, val);
9587
		I915_WRITE(LCPLL_CTL, val);
9621
 
9588
 
9622
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9589
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9623
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9590
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9624
			DRM_ERROR("Switching back to LCPLL failed\n");
9591
			DRM_ERROR("Switching back to LCPLL failed\n");
9625
	}
9592
	}
9626
 
9593
 
9627
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9594
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9628
	intel_update_cdclk(dev_priv->dev);
9595
	intel_update_cdclk(dev_priv->dev);
9629
}
9596
}
9630
 
9597
 
9631
/*
9598
/*
9632
 * Package states C8 and deeper are really deep PC states that can only be
9599
 * Package states C8 and deeper are really deep PC states that can only be
9633
 * reached when all the devices on the system allow it, so even if the graphics
9600
 * reached when all the devices on the system allow it, so even if the graphics
9634
 * device allows PC8+, it doesn't mean the system will actually get to these
9601
 * device allows PC8+, it doesn't mean the system will actually get to these
9635
 * states. Our driver only allows PC8+ when going into runtime PM.
9602
 * states. Our driver only allows PC8+ when going into runtime PM.
9636
 *
9603
 *
9637
 * The requirements for PC8+ are that all the outputs are disabled, the power
9604
 * The requirements for PC8+ are that all the outputs are disabled, the power
9638
 * well is disabled and most interrupts are disabled, and these are also
9605
 * well is disabled and most interrupts are disabled, and these are also
9639
 * requirements for runtime PM. When these conditions are met, we manually do
9606
 * requirements for runtime PM. When these conditions are met, we manually do
9640
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9607
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9641
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9608
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9642
 * hang the machine.
9609
 * hang the machine.
9643
 *
9610
 *
9644
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9611
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9645
 * the state of some registers, so when we come back from PC8+ we need to
9612
 * the state of some registers, so when we come back from PC8+ we need to
9646
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9613
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9647
 * need to take care of the registers kept by RC6. Notice that this happens even
9614
 * need to take care of the registers kept by RC6. Notice that this happens even
9648
 * if we don't put the device in PCI D3 state (which is what currently happens
9615
 * if we don't put the device in PCI D3 state (which is what currently happens
9649
 * because of the runtime PM support).
9616
 * because of the runtime PM support).
9650
 *
9617
 *
9651
 * For more, read "Display Sequences for Package C8" on the hardware
9618
 * For more, read "Display Sequences for Package C8" on the hardware
9652
 * documentation.
9619
 * documentation.
9653
 */
9620
 */
9654
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9621
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9655
{
9622
{
9656
	struct drm_device *dev = dev_priv->dev;
9623
	struct drm_device *dev = dev_priv->dev;
9657
	uint32_t val;
9624
	uint32_t val;
9658
 
9625
 
9659
	DRM_DEBUG_KMS("Enabling package C8+\n");
9626
	DRM_DEBUG_KMS("Enabling package C8+\n");
9660
 
9627
 
9661
	if (HAS_PCH_LPT_LP(dev)) {
9628
	if (HAS_PCH_LPT_LP(dev)) {
9662
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9629
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9663
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9630
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9664
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9631
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9665
	}
9632
	}
9666
 
9633
 
9667
	lpt_disable_clkout_dp(dev);
9634
	lpt_disable_clkout_dp(dev);
9668
	hsw_disable_lcpll(dev_priv, true, true);
9635
	hsw_disable_lcpll(dev_priv, true, true);
9669
}
9636
}
9670
 
9637
 
9671
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9638
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9672
{
9639
{
9673
	struct drm_device *dev = dev_priv->dev;
9640
	struct drm_device *dev = dev_priv->dev;
9674
	uint32_t val;
9641
	uint32_t val;
9675
 
9642
 
9676
	DRM_DEBUG_KMS("Disabling package C8+\n");
9643
	DRM_DEBUG_KMS("Disabling package C8+\n");
9677
 
9644
 
9678
	hsw_restore_lcpll(dev_priv);
9645
	hsw_restore_lcpll(dev_priv);
9679
	lpt_init_pch_refclk(dev);
9646
	lpt_init_pch_refclk(dev);
9680
 
9647
 
9681
	if (HAS_PCH_LPT_LP(dev)) {
9648
	if (HAS_PCH_LPT_LP(dev)) {
9682
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9649
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9683
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9650
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9684
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9651
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9685
	}
9652
	}
9686
 
-
 
9687
	intel_prepare_ddi(dev);
-
 
9688
}
9653
}
9689
 
9654
 
9690
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9655
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9691
{
9656
{
9692
	struct drm_device *dev = old_state->dev;
9657
	struct drm_device *dev = old_state->dev;
-
 
9658
	struct intel_atomic_state *old_intel_state =
-
 
9659
		to_intel_atomic_state(old_state);
9693
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9660
	unsigned int req_cdclk = old_intel_state->dev_cdclk;
9694
 
9661
 
9695
	broxton_set_cdclk(dev, req_cdclk);
9662
	broxton_set_cdclk(dev, req_cdclk);
9696
}
9663
}
9697
 
9664
 
9698
/* compute the max rate for new configuration */
9665
/* compute the max rate for new configuration */
9699
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9666
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9700
{
9667
{
-
 
9668
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
 
9669
	struct drm_i915_private *dev_priv = state->dev->dev_private;
9701
	struct intel_crtc *intel_crtc;
9670
	struct drm_crtc *crtc;
-
 
9671
	struct drm_crtc_state *cstate;
9702
	struct intel_crtc_state *crtc_state;
9672
	struct intel_crtc_state *crtc_state;
9703
	int max_pixel_rate = 0;
9673
	unsigned max_pixel_rate = 0, i;
-
 
9674
	enum pipe pipe;
9704
 
9675
 
9705
	for_each_intel_crtc(state->dev, intel_crtc) {
9676
	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9706
		int pixel_rate;
9677
	       sizeof(intel_state->min_pixclk));
9707
 
9678
 
9708
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-
 
-
 
9679
	for_each_crtc_in_state(state, crtc, cstate, i) {
9709
		if (IS_ERR(crtc_state))
9680
		int pixel_rate;
-
 
9681
 
9710
			return PTR_ERR(crtc_state);
9682
		crtc_state = to_intel_crtc_state(cstate);
-
 
9683
		if (!crtc_state->base.enable) {
9711
 
9684
			intel_state->min_pixclk[i] = 0;
9712
		if (!crtc_state->base.enable)
9685
			continue;
9713
			continue;
9686
		}
9714
 
9687
 
9715
		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9688
		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9716
 
9689
 
9717
		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9690
		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9718
		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
9691
		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9719
			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9692
			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9720
 
9693
 
9721
		max_pixel_rate = max(max_pixel_rate, pixel_rate);
9694
		intel_state->min_pixclk[i] = pixel_rate;
-
 
9695
	}
-
 
9696
 
-
 
9697
	for_each_pipe(dev_priv, pipe)
9722
	}
9698
		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9723
 
9699
 
9724
	return max_pixel_rate;
9700
	return max_pixel_rate;
9725
}
9701
}
9726
 
9702
 
9727
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9703
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9728
{
9704
{
9729
	struct drm_i915_private *dev_priv = dev->dev_private;
9705
	struct drm_i915_private *dev_priv = dev->dev_private;
9730
	uint32_t val, data;
9706
	uint32_t val, data;
9731
	int ret;
9707
	int ret;
9732
 
9708
 
9733
	if (WARN((I915_READ(LCPLL_CTL) &
9709
	if (WARN((I915_READ(LCPLL_CTL) &
9734
		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9710
		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9735
		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9711
		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9736
		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9712
		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9737
		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9713
		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9738
		 "trying to change cdclk frequency with cdclk not enabled\n"))
9714
		 "trying to change cdclk frequency with cdclk not enabled\n"))
9739
		return;
9715
		return;
9740
 
9716
 
9741
	mutex_lock(&dev_priv->rps.hw_lock);
9717
	mutex_lock(&dev_priv->rps.hw_lock);
9742
	ret = sandybridge_pcode_write(dev_priv,
9718
	ret = sandybridge_pcode_write(dev_priv,
9743
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9719
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9744
	mutex_unlock(&dev_priv->rps.hw_lock);
9720
	mutex_unlock(&dev_priv->rps.hw_lock);
9745
	if (ret) {
9721
	if (ret) {
9746
		DRM_ERROR("failed to inform pcode about cdclk change\n");
9722
		DRM_ERROR("failed to inform pcode about cdclk change\n");
9747
		return;
9723
		return;
9748
	}
9724
	}
9749
 
9725
 
9750
	val = I915_READ(LCPLL_CTL);
9726
	val = I915_READ(LCPLL_CTL);
9751
	val |= LCPLL_CD_SOURCE_FCLK;
9727
	val |= LCPLL_CD_SOURCE_FCLK;
9752
	I915_WRITE(LCPLL_CTL, val);
9728
	I915_WRITE(LCPLL_CTL, val);
9753
 
9729
 
9754
	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9730
	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9755
			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9731
			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9756
		DRM_ERROR("Switching to FCLK failed\n");
9732
		DRM_ERROR("Switching to FCLK failed\n");
9757
 
9733
 
9758
	val = I915_READ(LCPLL_CTL);
9734
	val = I915_READ(LCPLL_CTL);
9759
	val &= ~LCPLL_CLK_FREQ_MASK;
9735
	val &= ~LCPLL_CLK_FREQ_MASK;
9760
 
9736
 
9761
	switch (cdclk) {
9737
	switch (cdclk) {
9762
	case 450000:
9738
	case 450000:
9763
		val |= LCPLL_CLK_FREQ_450;
9739
		val |= LCPLL_CLK_FREQ_450;
9764
		data = 0;
9740
		data = 0;
9765
		break;
9741
		break;
9766
	case 540000:
9742
	case 540000:
9767
		val |= LCPLL_CLK_FREQ_54O_BDW;
9743
		val |= LCPLL_CLK_FREQ_54O_BDW;
9768
		data = 1;
9744
		data = 1;
9769
		break;
9745
		break;
9770
	case 337500:
9746
	case 337500:
9771
		val |= LCPLL_CLK_FREQ_337_5_BDW;
9747
		val |= LCPLL_CLK_FREQ_337_5_BDW;
9772
		data = 2;
9748
		data = 2;
9773
		break;
9749
		break;
9774
	case 675000:
9750
	case 675000:
9775
		val |= LCPLL_CLK_FREQ_675_BDW;
9751
		val |= LCPLL_CLK_FREQ_675_BDW;
9776
		data = 3;
9752
		data = 3;
9777
		break;
9753
		break;
9778
	default:
9754
	default:
9779
		WARN(1, "invalid cdclk frequency\n");
9755
		WARN(1, "invalid cdclk frequency\n");
9780
		return;
9756
		return;
9781
	}
9757
	}
9782
 
9758
 
9783
	I915_WRITE(LCPLL_CTL, val);
9759
	I915_WRITE(LCPLL_CTL, val);
9784
 
9760
 
9785
	val = I915_READ(LCPLL_CTL);
9761
	val = I915_READ(LCPLL_CTL);
9786
	val &= ~LCPLL_CD_SOURCE_FCLK;
9762
	val &= ~LCPLL_CD_SOURCE_FCLK;
9787
	I915_WRITE(LCPLL_CTL, val);
9763
	I915_WRITE(LCPLL_CTL, val);
9788
 
9764
 
9789
	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9765
	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9790
				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9766
				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9791
		DRM_ERROR("Switching back to LCPLL failed\n");
9767
		DRM_ERROR("Switching back to LCPLL failed\n");
9792
 
9768
 
9793
	mutex_lock(&dev_priv->rps.hw_lock);
9769
	mutex_lock(&dev_priv->rps.hw_lock);
9794
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9770
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9795
	mutex_unlock(&dev_priv->rps.hw_lock);
9771
	mutex_unlock(&dev_priv->rps.hw_lock);
9796
 
9772
 
9797
	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9773
	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9798
 
9774
 
9799
	intel_update_cdclk(dev);
9775
	intel_update_cdclk(dev);
9800
 
9776
 
9801
	WARN(cdclk != dev_priv->cdclk_freq,
9777
	WARN(cdclk != dev_priv->cdclk_freq,
9802
	     "cdclk requested %d kHz but got %d kHz\n",
9778
	     "cdclk requested %d kHz but got %d kHz\n",
9803
	     cdclk, dev_priv->cdclk_freq);
9779
	     cdclk, dev_priv->cdclk_freq);
9804
}
9780
}
9805
 
9781
 
9806
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9782
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9807
{
9783
{
9808
	struct drm_i915_private *dev_priv = to_i915(state->dev);
9784
	struct drm_i915_private *dev_priv = to_i915(state->dev);
-
 
9785
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9809
	int max_pixclk = ilk_max_pixel_rate(state);
9786
	int max_pixclk = ilk_max_pixel_rate(state);
9810
	int cdclk;
9787
	int cdclk;
9811
 
9788
 
9812
	/*
9789
	/*
9813
	 * FIXME should also account for plane ratio
9790
	 * FIXME should also account for plane ratio
9814
	 * once 64bpp pixel formats are supported.
9791
	 * once 64bpp pixel formats are supported.
9815
	 */
9792
	 */
9816
	if (max_pixclk > 540000)
9793
	if (max_pixclk > 540000)
9817
		cdclk = 675000;
9794
		cdclk = 675000;
9818
	else if (max_pixclk > 450000)
9795
	else if (max_pixclk > 450000)
9819
		cdclk = 540000;
9796
		cdclk = 540000;
9820
	else if (max_pixclk > 337500)
9797
	else if (max_pixclk > 337500)
9821
		cdclk = 450000;
9798
		cdclk = 450000;
9822
	else
9799
	else
9823
		cdclk = 337500;
9800
		cdclk = 337500;
9824
 
9801
 
9825
	if (cdclk > dev_priv->max_cdclk_freq) {
9802
	if (cdclk > dev_priv->max_cdclk_freq) {
9826
		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9803
		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9827
			  cdclk, dev_priv->max_cdclk_freq);
9804
			      cdclk, dev_priv->max_cdclk_freq);
9828
		return -EINVAL;
9805
		return -EINVAL;
9829
	}
9806
	}
9830
 
9807
 
-
 
9808
	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
-
 
9809
	if (!intel_state->active_crtcs)
9831
	to_intel_atomic_state(state)->cdclk = cdclk;
9810
		intel_state->dev_cdclk = 337500;
9832
 
9811
 
9833
	return 0;
9812
	return 0;
9834
}
9813
}
9835
 
9814
 
9836
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9815
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9837
{
9816
{
9838
	struct drm_device *dev = old_state->dev;
9817
	struct drm_device *dev = old_state->dev;
-
 
9818
	struct intel_atomic_state *old_intel_state =
-
 
9819
		to_intel_atomic_state(old_state);
9839
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9820
	unsigned req_cdclk = old_intel_state->dev_cdclk;
9840
 
9821
 
9841
	broadwell_set_cdclk(dev, req_cdclk);
9822
	broadwell_set_cdclk(dev, req_cdclk);
9842
}
9823
}
9843
 
9824
 
9844
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9825
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9845
				      struct intel_crtc_state *crtc_state)
9826
				      struct intel_crtc_state *crtc_state)
9846
{
9827
{
-
 
9828
	struct intel_encoder *intel_encoder =
-
 
9829
		intel_ddi_get_crtc_new_encoder(crtc_state);
-
 
9830
 
-
 
9831
	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9847
	if (!intel_ddi_pll_select(crtc, crtc_state))
9832
		if (!intel_ddi_pll_select(crtc, crtc_state))
9848
		return -EINVAL;
9833
			return -EINVAL;
-
 
9834
	}
9849
 
9835
 
9850
	crtc->lowfreq_avail = false;
9836
	crtc->lowfreq_avail = false;
9851
 
9837
 
9852
	return 0;
9838
	return 0;
9853
}
9839
}
9854
 
9840
 
9855
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9841
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9856
				enum port port,
9842
				enum port port,
9857
				struct intel_crtc_state *pipe_config)
9843
				struct intel_crtc_state *pipe_config)
9858
{
9844
{
9859
	switch (port) {
9845
	switch (port) {
9860
	case PORT_A:
9846
	case PORT_A:
9861
		pipe_config->ddi_pll_sel = SKL_DPLL0;
9847
		pipe_config->ddi_pll_sel = SKL_DPLL0;
9862
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9848
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9863
		break;
9849
		break;
9864
	case PORT_B:
9850
	case PORT_B:
9865
		pipe_config->ddi_pll_sel = SKL_DPLL1;
9851
		pipe_config->ddi_pll_sel = SKL_DPLL1;
9866
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9852
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9867
		break;
9853
		break;
9868
	case PORT_C:
9854
	case PORT_C:
9869
		pipe_config->ddi_pll_sel = SKL_DPLL2;
9855
		pipe_config->ddi_pll_sel = SKL_DPLL2;
9870
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9856
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9871
		break;
9857
		break;
9872
	default:
9858
	default:
9873
		DRM_ERROR("Incorrect port type\n");
9859
		DRM_ERROR("Incorrect port type\n");
9874
	}
9860
	}
9875
}
9861
}
9876
 
9862
 
9877
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9863
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9878
				enum port port,
9864
				enum port port,
9879
				struct intel_crtc_state *pipe_config)
9865
				struct intel_crtc_state *pipe_config)
9880
{
9866
{
9881
	u32 temp, dpll_ctl1;
9867
	u32 temp, dpll_ctl1;
9882
 
9868
 
9883
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9869
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9884
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9870
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9885
 
9871
 
9886
	switch (pipe_config->ddi_pll_sel) {
9872
	switch (pipe_config->ddi_pll_sel) {
9887
	case SKL_DPLL0:
9873
	case SKL_DPLL0:
9888
		/*
9874
		/*
9889
		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9875
		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9890
		 * of the shared DPLL framework and thus needs to be read out
9876
		 * of the shared DPLL framework and thus needs to be read out
9891
		 * separately
9877
		 * separately
9892
		 */
9878
		 */
9893
		dpll_ctl1 = I915_READ(DPLL_CTRL1);
9879
		dpll_ctl1 = I915_READ(DPLL_CTRL1);
9894
		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9880
		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9895
		break;
9881
		break;
9896
	case SKL_DPLL1:
9882
	case SKL_DPLL1:
9897
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9883
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9898
		break;
9884
		break;
9899
	case SKL_DPLL2:
9885
	case SKL_DPLL2:
9900
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9886
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9901
		break;
9887
		break;
9902
	case SKL_DPLL3:
9888
	case SKL_DPLL3:
9903
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9889
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9904
		break;
9890
		break;
9905
	}
9891
	}
9906
}
9892
}
9907
 
9893
 
9908
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9894
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9909
				enum port port,
9895
				enum port port,
9910
				struct intel_crtc_state *pipe_config)
9896
				struct intel_crtc_state *pipe_config)
9911
{
9897
{
9912
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9898
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9913
 
9899
 
9914
	switch (pipe_config->ddi_pll_sel) {
9900
	switch (pipe_config->ddi_pll_sel) {
9915
	case PORT_CLK_SEL_WRPLL1:
9901
	case PORT_CLK_SEL_WRPLL1:
9916
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9902
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9917
		break;
9903
		break;
9918
	case PORT_CLK_SEL_WRPLL2:
9904
	case PORT_CLK_SEL_WRPLL2:
9919
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9905
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9920
		break;
9906
		break;
9921
	case PORT_CLK_SEL_SPLL:
9907
	case PORT_CLK_SEL_SPLL:
9922
		pipe_config->shared_dpll = DPLL_ID_SPLL;
9908
		pipe_config->shared_dpll = DPLL_ID_SPLL;
9923
		break;
9909
		break;
9924
	}
9910
	}
9925
}
9911
}
9926
 
9912
 
9927
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9913
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9928
				       struct intel_crtc_state *pipe_config)
9914
				       struct intel_crtc_state *pipe_config)
9929
{
9915
{
9930
	struct drm_device *dev = crtc->base.dev;
9916
	struct drm_device *dev = crtc->base.dev;
9931
	struct drm_i915_private *dev_priv = dev->dev_private;
9917
	struct drm_i915_private *dev_priv = dev->dev_private;
9932
	struct intel_shared_dpll *pll;
9918
	struct intel_shared_dpll *pll;
9933
	enum port port;
9919
	enum port port;
9934
	uint32_t tmp;
9920
	uint32_t tmp;
9935
 
9921
 
9936
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9922
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9937
 
9923
 
9938
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9924
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9939
 
9925
 
9940
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9926
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9941
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
9927
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
9942
	else if (IS_BROXTON(dev))
9928
	else if (IS_BROXTON(dev))
9943
		bxt_get_ddi_pll(dev_priv, port, pipe_config);
9929
		bxt_get_ddi_pll(dev_priv, port, pipe_config);
9944
	else
9930
	else
9945
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
9931
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
9946
 
9932
 
9947
	if (pipe_config->shared_dpll >= 0) {
9933
	if (pipe_config->shared_dpll >= 0) {
9948
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9934
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9949
 
9935
 
9950
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9936
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9951
					   &pipe_config->dpll_hw_state));
9937
					   &pipe_config->dpll_hw_state));
9952
	}
9938
	}
9953
 
9939
 
9954
	/*
9940
	/*
9955
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9941
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9956
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9942
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9957
	 * the PCH transcoder is on.
9943
	 * the PCH transcoder is on.
9958
	 */
9944
	 */
9959
	if (INTEL_INFO(dev)->gen < 9 &&
9945
	if (INTEL_INFO(dev)->gen < 9 &&
9960
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9946
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9961
		pipe_config->has_pch_encoder = true;
9947
		pipe_config->has_pch_encoder = true;
9962
 
9948
 
9963
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9949
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9964
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9950
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9965
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9951
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9966
 
9952
 
9967
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9953
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9968
	}
9954
	}
9969
}
9955
}
9970
 
9956
 
9971
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9957
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9972
				    struct intel_crtc_state *pipe_config)
9958
				    struct intel_crtc_state *pipe_config)
9973
{
9959
{
9974
	struct drm_device *dev = crtc->base.dev;
9960
	struct drm_device *dev = crtc->base.dev;
9975
	struct drm_i915_private *dev_priv = dev->dev_private;
9961
	struct drm_i915_private *dev_priv = dev->dev_private;
9976
	enum intel_display_power_domain power_domain;
9962
	enum intel_display_power_domain power_domain;
9977
	unsigned long power_domain_mask;
9963
	unsigned long power_domain_mask;
9978
	uint32_t tmp;
9964
	uint32_t tmp;
9979
	bool ret;
9965
	bool ret;
9980
 
9966
 
9981
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9967
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9982
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9968
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9983
		return false;
9969
		return false;
9984
	power_domain_mask = BIT(power_domain);
9970
	power_domain_mask = BIT(power_domain);
9985
 
9971
 
9986
	ret = false;
9972
	ret = false;
9987
 
9973
 
9988
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9974
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9989
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9975
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9990
 
9976
 
9991
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9977
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9992
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9978
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9993
		enum pipe trans_edp_pipe;
9979
		enum pipe trans_edp_pipe;
9994
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9980
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9995
		default:
9981
		default:
9996
			WARN(1, "unknown pipe linked to edp transcoder\n");
9982
			WARN(1, "unknown pipe linked to edp transcoder\n");
9997
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9983
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9998
		case TRANS_DDI_EDP_INPUT_A_ON:
9984
		case TRANS_DDI_EDP_INPUT_A_ON:
9999
			trans_edp_pipe = PIPE_A;
9985
			trans_edp_pipe = PIPE_A;
10000
			break;
9986
			break;
10001
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9987
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
10002
			trans_edp_pipe = PIPE_B;
9988
			trans_edp_pipe = PIPE_B;
10003
			break;
9989
			break;
10004
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9990
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
10005
			trans_edp_pipe = PIPE_C;
9991
			trans_edp_pipe = PIPE_C;
10006
			break;
9992
			break;
10007
		}
9993
		}
10008
 
9994
 
10009
		if (trans_edp_pipe == crtc->pipe)
9995
		if (trans_edp_pipe == crtc->pipe)
10010
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9996
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
10011
	}
9997
	}
10012
 
9998
 
10013
	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9999
	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10014
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10000
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10015
		goto out;
10001
		goto out;
10016
	power_domain_mask |= BIT(power_domain);
10002
	power_domain_mask |= BIT(power_domain);
10017
 
10003
 
10018
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10004
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10019
	if (!(tmp & PIPECONF_ENABLE))
10005
	if (!(tmp & PIPECONF_ENABLE))
10020
		goto out;
10006
		goto out;
10021
 
10007
 
10022
	haswell_get_ddi_port_state(crtc, pipe_config);
10008
	haswell_get_ddi_port_state(crtc, pipe_config);
10023
 
10009
 
10024
	intel_get_pipe_timings(crtc, pipe_config);
10010
	intel_get_pipe_timings(crtc, pipe_config);
10025
 
10011
 
10026
	if (INTEL_INFO(dev)->gen >= 9) {
10012
	if (INTEL_INFO(dev)->gen >= 9) {
10027
		skl_init_scalers(dev, crtc, pipe_config);
10013
		skl_init_scalers(dev, crtc, pipe_config);
10028
	}
10014
	}
10029
 
10015
 
10030
	if (INTEL_INFO(dev)->gen >= 9) {
10016
	if (INTEL_INFO(dev)->gen >= 9) {
10031
		pipe_config->scaler_state.scaler_id = -1;
10017
		pipe_config->scaler_state.scaler_id = -1;
10032
		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10018
		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10033
	}
10019
	}
10034
 
10020
 
10035
	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10021
	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10036
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10022
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10037
		power_domain_mask |= BIT(power_domain);
10023
		power_domain_mask |= BIT(power_domain);
10038
		if (INTEL_INFO(dev)->gen >= 9)
10024
		if (INTEL_INFO(dev)->gen >= 9)
10039
			skylake_get_pfit_config(crtc, pipe_config);
10025
			skylake_get_pfit_config(crtc, pipe_config);
10040
		else
10026
		else
10041
			ironlake_get_pfit_config(crtc, pipe_config);
10027
			ironlake_get_pfit_config(crtc, pipe_config);
10042
	}
10028
	}
10043
 
10029
 
10044
	if (IS_HASWELL(dev))
10030
	if (IS_HASWELL(dev))
10045
		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10031
		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10046
			(I915_READ(IPS_CTL) & IPS_ENABLE);
10032
			(I915_READ(IPS_CTL) & IPS_ENABLE);
10047
 
10033
 
10048
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
10034
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
10049
		pipe_config->pixel_multiplier =
10035
		pipe_config->pixel_multiplier =
10050
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10036
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10051
	} else {
10037
	} else {
10052
		pipe_config->pixel_multiplier = 1;
10038
		pipe_config->pixel_multiplier = 1;
10053
	}
10039
	}
10054
 
10040
 
10055
	ret = true;
10041
	ret = true;
10056
 
10042
 
10057
out:
10043
out:
10058
	for_each_power_domain(power_domain, power_domain_mask)
10044
	for_each_power_domain(power_domain, power_domain_mask)
10059
		intel_display_power_put(dev_priv, power_domain);
10045
		intel_display_power_put(dev_priv, power_domain);
10060
 
10046
 
10061
	return ret;
10047
	return ret;
10062
}
10048
}
10063
 
10049
 
-
 
10050
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10064
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10051
			       const struct intel_plane_state *plane_state)
10065
{
10052
{
10066
	struct drm_device *dev = crtc->dev;
10053
	struct drm_device *dev = crtc->dev;
10067
	struct drm_i915_private *dev_priv = dev->dev_private;
10054
	struct drm_i915_private *dev_priv = dev->dev_private;
10068
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10055
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10069
	uint32_t cntl = 0, size = 0;
10056
	uint32_t cntl = 0, size = 0;
10070
 
10057
 
10071
	if (on) {
10058
	if (plane_state && plane_state->visible) {
10072
		unsigned int width = intel_crtc->base.cursor->state->crtc_w;
10059
		unsigned int width = plane_state->base.crtc_w;
10073
		unsigned int height = intel_crtc->base.cursor->state->crtc_h;
10060
		unsigned int height = plane_state->base.crtc_h;
10074
		unsigned int stride = roundup_pow_of_two(width) * 4;
10061
		unsigned int stride = roundup_pow_of_two(width) * 4;
10075
 
10062
 
10076
		switch (stride) {
10063
		switch (stride) {
10077
		default:
10064
		default:
10078
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10065
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10079
				  width, stride);
10066
				  width, stride);
10080
			stride = 256;
10067
			stride = 256;
10081
			/* fallthrough */
10068
			/* fallthrough */
10082
		case 256:
10069
		case 256:
10083
		case 512:
10070
		case 512:
10084
		case 1024:
10071
		case 1024:
10085
		case 2048:
10072
		case 2048:
10086
			break;
10073
			break;
10087
		}
10074
		}
10088
 
10075
 
10089
		cntl |= CURSOR_ENABLE |
10076
		cntl |= CURSOR_ENABLE |
10090
			CURSOR_GAMMA_ENABLE |
10077
			CURSOR_GAMMA_ENABLE |
10091
			CURSOR_FORMAT_ARGB |
10078
			CURSOR_FORMAT_ARGB |
10092
			CURSOR_STRIDE(stride);
10079
			CURSOR_STRIDE(stride);
10093
 
10080
 
10094
		size = (height << 12) | width;
10081
		size = (height << 12) | width;
10095
	}
10082
	}
10096
 
10083
 
10097
	if (intel_crtc->cursor_cntl != 0 &&
10084
	if (intel_crtc->cursor_cntl != 0 &&
10098
	    (intel_crtc->cursor_base != base ||
10085
	    (intel_crtc->cursor_base != base ||
10099
	     intel_crtc->cursor_size != size ||
10086
	     intel_crtc->cursor_size != size ||
10100
	     intel_crtc->cursor_cntl != cntl)) {
10087
	     intel_crtc->cursor_cntl != cntl)) {
10101
		/* On these chipsets we can only modify the base/size/stride
10088
		/* On these chipsets we can only modify the base/size/stride
10102
		 * whilst the cursor is disabled.
10089
		 * whilst the cursor is disabled.
10103
		 */
10090
		 */
10104
		I915_WRITE(CURCNTR(PIPE_A), 0);
10091
		I915_WRITE(CURCNTR(PIPE_A), 0);
10105
		POSTING_READ(CURCNTR(PIPE_A));
10092
		POSTING_READ(CURCNTR(PIPE_A));
10106
		intel_crtc->cursor_cntl = 0;
10093
		intel_crtc->cursor_cntl = 0;
10107
	}
10094
	}
10108
 
10095
 
10109
	if (intel_crtc->cursor_base != base) {
10096
	if (intel_crtc->cursor_base != base) {
10110
		I915_WRITE(CURBASE(PIPE_A), base);
10097
		I915_WRITE(CURBASE(PIPE_A), base);
10111
		intel_crtc->cursor_base = base;
10098
		intel_crtc->cursor_base = base;
10112
	}
10099
	}
10113
 
10100
 
10114
	if (intel_crtc->cursor_size != size) {
10101
	if (intel_crtc->cursor_size != size) {
10115
		I915_WRITE(CURSIZE, size);
10102
		I915_WRITE(CURSIZE, size);
10116
		intel_crtc->cursor_size = size;
10103
		intel_crtc->cursor_size = size;
10117
	}
10104
	}
10118
 
10105
 
10119
	if (intel_crtc->cursor_cntl != cntl) {
10106
	if (intel_crtc->cursor_cntl != cntl) {
10120
		I915_WRITE(CURCNTR(PIPE_A), cntl);
10107
		I915_WRITE(CURCNTR(PIPE_A), cntl);
10121
		POSTING_READ(CURCNTR(PIPE_A));
10108
		POSTING_READ(CURCNTR(PIPE_A));
10122
		intel_crtc->cursor_cntl = cntl;
10109
		intel_crtc->cursor_cntl = cntl;
10123
	}
10110
	}
10124
}
10111
}
10125
 
10112
 
-
 
10113
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10126
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10114
			       const struct intel_plane_state *plane_state)
10127
{
10115
{
10128
	struct drm_device *dev = crtc->dev;
10116
	struct drm_device *dev = crtc->dev;
10129
	struct drm_i915_private *dev_priv = dev->dev_private;
10117
	struct drm_i915_private *dev_priv = dev->dev_private;
10130
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10118
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10131
	int pipe = intel_crtc->pipe;
10119
	int pipe = intel_crtc->pipe;
10132
	uint32_t cntl = 0;
10120
	uint32_t cntl = 0;
10133
 
10121
 
10134
	if (on) {
10122
	if (plane_state && plane_state->visible) {
10135
		cntl = MCURSOR_GAMMA_ENABLE;
10123
		cntl = MCURSOR_GAMMA_ENABLE;
10136
		switch (intel_crtc->base.cursor->state->crtc_w) {
10124
		switch (plane_state->base.crtc_w) {
10137
			case 64:
10125
			case 64:
10138
				cntl |= CURSOR_MODE_64_ARGB_AX;
10126
				cntl |= CURSOR_MODE_64_ARGB_AX;
10139
				break;
10127
				break;
10140
			case 128:
10128
			case 128:
10141
				cntl |= CURSOR_MODE_128_ARGB_AX;
10129
				cntl |= CURSOR_MODE_128_ARGB_AX;
10142
				break;
10130
				break;
10143
			case 256:
10131
			case 256:
10144
				cntl |= CURSOR_MODE_256_ARGB_AX;
10132
				cntl |= CURSOR_MODE_256_ARGB_AX;
10145
				break;
10133
				break;
10146
			default:
10134
			default:
10147
				MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
10135
				MISSING_CASE(plane_state->base.crtc_w);
10148
				return;
10136
				return;
10149
		}
10137
		}
10150
		cntl |= pipe << 28; /* Connect to correct pipe */
10138
		cntl |= pipe << 28; /* Connect to correct pipe */
10151
 
10139
 
10152
		if (HAS_DDI(dev))
10140
		if (HAS_DDI(dev))
10153
			cntl |= CURSOR_PIPE_CSC_ENABLE;
10141
			cntl |= CURSOR_PIPE_CSC_ENABLE;
10154
	}
-
 
10155
 
10142
 
10156
	if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
10143
		if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
-
 
10144
			cntl |= CURSOR_ROTATE_180;
10157
		cntl |= CURSOR_ROTATE_180;
10145
	}
10158
 
10146
 
10159
	if (intel_crtc->cursor_cntl != cntl) {
10147
	if (intel_crtc->cursor_cntl != cntl) {
10160
		I915_WRITE(CURCNTR(pipe), cntl);
10148
		I915_WRITE(CURCNTR(pipe), cntl);
10161
		POSTING_READ(CURCNTR(pipe));
10149
		POSTING_READ(CURCNTR(pipe));
10162
		intel_crtc->cursor_cntl = cntl;
10150
		intel_crtc->cursor_cntl = cntl;
10163
	}
10151
	}
10164
 
10152
 
10165
	/* and commit changes on next vblank */
10153
	/* and commit changes on next vblank */
10166
	I915_WRITE(CURBASE(pipe), base);
10154
	I915_WRITE(CURBASE(pipe), base);
10167
	POSTING_READ(CURBASE(pipe));
10155
	POSTING_READ(CURBASE(pipe));
10168
 
10156
 
10169
	intel_crtc->cursor_base = base;
10157
	intel_crtc->cursor_base = base;
10170
}
10158
}
10171
 
10159
 
10172
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10160
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10173
void intel_crtc_update_cursor(struct drm_crtc *crtc,
10161
void intel_crtc_update_cursor(struct drm_crtc *crtc,
10174
				     bool on)
10162
				     const struct intel_plane_state *plane_state)
10175
{
10163
{
10176
	struct drm_device *dev = crtc->dev;
10164
	struct drm_device *dev = crtc->dev;
10177
	struct drm_i915_private *dev_priv = dev->dev_private;
10165
	struct drm_i915_private *dev_priv = dev->dev_private;
10178
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10179
	int pipe = intel_crtc->pipe;
10167
	int pipe = intel_crtc->pipe;
10180
	struct drm_plane_state *cursor_state = crtc->cursor->state;
-
 
10181
	int x = cursor_state->crtc_x;
-
 
10182
	int y = cursor_state->crtc_y;
-
 
10183
	u32 base = 0, pos = 0;
-
 
10184
 
-
 
10185
	base = intel_crtc->cursor_addr;
10168
	u32 base = intel_crtc->cursor_addr;
10186
 
-
 
10187
	if (x >= intel_crtc->config->pipe_src_w)
-
 
10188
		on = false;
10169
	u32 pos = 0;
-
 
10170
 
10189
 
10171
	if (plane_state) {
10190
	if (y >= intel_crtc->config->pipe_src_h)
10172
		int x = plane_state->base.crtc_x;
10191
		on = false;
10173
		int y = plane_state->base.crtc_y;
10192
 
-
 
10193
	if (x < 0) {
-
 
10194
		if (x + cursor_state->crtc_w <= 0)
-
 
10195
			on = false;
10174
 
10196
 
10175
		if (x < 0) {
10197
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10176
			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10198
		x = -x;
10177
			x = -x;
10199
	}
10178
		}
10200
	pos |= x << CURSOR_X_SHIFT;
10179
		pos |= x << CURSOR_X_SHIFT;
10201
 
10180
 
10202
	if (y < 0) {
10181
		if (y < 0) {
10203
		if (y + cursor_state->crtc_h <= 0)
-
 
10204
			on = false;
-
 
10205
 
-
 
10206
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10182
			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10207
		y = -y;
10183
			y = -y;
10208
	}
10184
		}
10209
	pos |= y << CURSOR_Y_SHIFT;
10185
		pos |= y << CURSOR_Y_SHIFT;
10210
 
-
 
10211
	I915_WRITE(CURPOS(pipe), pos);
-
 
10212
 
10186
 
10213
	/* ILK+ do this automagically */
10187
		/* ILK+ do this automagically */
10214
	if (HAS_GMCH_DISPLAY(dev) &&
10188
		if (HAS_GMCH_DISPLAY(dev) &&
10215
	    crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
10189
		    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10216
		base += (cursor_state->crtc_h *
10190
			base += (plane_state->base.crtc_h *
-
 
10191
				 plane_state->base.crtc_w - 1) * 4;
10217
			 cursor_state->crtc_w - 1) * 4;
10192
		}
-
 
10193
	}
-
 
10194
 
10218
	}
10195
	I915_WRITE(CURPOS(pipe), pos);
10219
 
10196
 
10220
	if (IS_845G(dev) || IS_I865G(dev))
10197
	if (IS_845G(dev) || IS_I865G(dev))
10221
		i845_update_cursor(crtc, base, on);
10198
		i845_update_cursor(crtc, base, plane_state);
10222
	else
10199
	else
10223
		i9xx_update_cursor(crtc, base, on);
10200
		i9xx_update_cursor(crtc, base, plane_state);
10224
}
10201
}
10225
 
10202
 
10226
static bool cursor_size_ok(struct drm_device *dev,
10203
static bool cursor_size_ok(struct drm_device *dev,
10227
			   uint32_t width, uint32_t height)
10204
			   uint32_t width, uint32_t height)
10228
{
10205
{
10229
	if (width == 0 || height == 0)
10206
	if (width == 0 || height == 0)
10230
		return false;
10207
		return false;
10231
 
10208
 
10232
	/*
10209
	/*
10233
	 * 845g/865g are special in that they are only limited by
10210
	 * 845g/865g are special in that they are only limited by
10234
	 * the width of their cursors, the height is arbitrary up to
10211
	 * the width of their cursors, the height is arbitrary up to
10235
	 * the precision of the register. Everything else requires
10212
	 * the precision of the register. Everything else requires
10236
	 * square cursors, limited to a few power-of-two sizes.
10213
	 * square cursors, limited to a few power-of-two sizes.
10237
	 */
10214
	 */
10238
	if (IS_845G(dev) || IS_I865G(dev)) {
10215
	if (IS_845G(dev) || IS_I865G(dev)) {
10239
		if ((width & 63) != 0)
10216
		if ((width & 63) != 0)
10240
			return false;
10217
			return false;
10241
 
10218
 
10242
		if (width > (IS_845G(dev) ? 64 : 512))
10219
		if (width > (IS_845G(dev) ? 64 : 512))
10243
			return false;
10220
			return false;
10244
 
10221
 
10245
		if (height > 1023)
10222
		if (height > 1023)
10246
			return false;
10223
			return false;
10247
	} else {
10224
	} else {
10248
		switch (width | height) {
10225
		switch (width | height) {
10249
		case 256:
10226
		case 256:
10250
		case 128:
10227
		case 128:
10251
			if (IS_GEN2(dev))
10228
			if (IS_GEN2(dev))
10252
				return false;
10229
				return false;
10253
		case 64:
10230
		case 64:
10254
			break;
10231
			break;
10255
		default:
10232
		default:
10256
			return false;
10233
			return false;
10257
		}
10234
		}
10258
	}
10235
	}
10259
 
10236
 
10260
	return true;
10237
	return true;
10261
}
10238
}
10262
 
10239
 
10263
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10240
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10264
				 u16 *blue, uint32_t start, uint32_t size)
10241
				 u16 *blue, uint32_t start, uint32_t size)
10265
{
10242
{
10266
	int end = (start + size > 256) ? 256 : start + size, i;
10243
	int end = (start + size > 256) ? 256 : start + size, i;
10267
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10244
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10268
 
10245
 
10269
	for (i = start; i < end; i++) {
10246
	for (i = start; i < end; i++) {
10270
		intel_crtc->lut_r[i] = red[i] >> 8;
10247
		intel_crtc->lut_r[i] = red[i] >> 8;
10271
		intel_crtc->lut_g[i] = green[i] >> 8;
10248
		intel_crtc->lut_g[i] = green[i] >> 8;
10272
		intel_crtc->lut_b[i] = blue[i] >> 8;
10249
		intel_crtc->lut_b[i] = blue[i] >> 8;
10273
	}
10250
	}
10274
 
10251
 
10275
	intel_crtc_load_lut(crtc);
10252
	intel_crtc_load_lut(crtc);
10276
}
10253
}
10277
 
10254
 
10278
/* VESA 640x480x72Hz mode to set on the pipe */
10255
/* VESA 640x480x72Hz mode to set on the pipe */
10279
static struct drm_display_mode load_detect_mode = {
10256
static struct drm_display_mode load_detect_mode = {
10280
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10257
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10281
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10258
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10282
};
10259
};
10283
 
10260
 
10284
struct drm_framebuffer *
10261
struct drm_framebuffer *
10285
__intel_framebuffer_create(struct drm_device *dev,
10262
__intel_framebuffer_create(struct drm_device *dev,
10286
			   struct drm_mode_fb_cmd2 *mode_cmd,
10263
			   struct drm_mode_fb_cmd2 *mode_cmd,
10287
			   struct drm_i915_gem_object *obj)
10264
			   struct drm_i915_gem_object *obj)
10288
{
10265
{
10289
	struct intel_framebuffer *intel_fb;
10266
	struct intel_framebuffer *intel_fb;
10290
	int ret;
10267
	int ret;
10291
 
10268
 
10292
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10269
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10293
	if (!intel_fb)
10270
	if (!intel_fb)
10294
		return ERR_PTR(-ENOMEM);
10271
		return ERR_PTR(-ENOMEM);
10295
 
10272
 
10296
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10273
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10297
	if (ret)
10274
	if (ret)
10298
		goto err;
10275
		goto err;
10299
 
10276
 
10300
	return &intel_fb->base;
10277
	return &intel_fb->base;
10301
 
10278
 
10302
err:
10279
err:
10303
	kfree(intel_fb);
10280
	kfree(intel_fb);
10304
	return ERR_PTR(ret);
10281
	return ERR_PTR(ret);
10305
}
10282
}
10306
 
10283
 
10307
static struct drm_framebuffer *
10284
static struct drm_framebuffer *
10308
intel_framebuffer_create(struct drm_device *dev,
10285
intel_framebuffer_create(struct drm_device *dev,
10309
			 struct drm_mode_fb_cmd2 *mode_cmd,
10286
			 struct drm_mode_fb_cmd2 *mode_cmd,
10310
			 struct drm_i915_gem_object *obj)
10287
			 struct drm_i915_gem_object *obj)
10311
{
10288
{
10312
	struct drm_framebuffer *fb;
10289
	struct drm_framebuffer *fb;
10313
	int ret;
10290
	int ret;
10314
 
10291
 
10315
	ret = i915_mutex_lock_interruptible(dev);
10292
	ret = i915_mutex_lock_interruptible(dev);
10316
	if (ret)
10293
	if (ret)
10317
		return ERR_PTR(ret);
10294
		return ERR_PTR(ret);
10318
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10295
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10319
	mutex_unlock(&dev->struct_mutex);
10296
	mutex_unlock(&dev->struct_mutex);
10320
 
10297
 
10321
	return fb;
10298
	return fb;
10322
}
10299
}
10323
 
10300
 
10324
static u32
10301
static u32
10325
intel_framebuffer_pitch_for_width(int width, int bpp)
10302
intel_framebuffer_pitch_for_width(int width, int bpp)
10326
{
10303
{
10327
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10304
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10328
	return ALIGN(pitch, 64);
10305
	return ALIGN(pitch, 64);
10329
}
10306
}
10330
 
10307
 
10331
static u32
10308
static u32
10332
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10309
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10333
{
10310
{
10334
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10311
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10335
	return PAGE_ALIGN(pitch * mode->vdisplay);
10312
	return PAGE_ALIGN(pitch * mode->vdisplay);
10336
}
10313
}
10337
 
10314
 
10338
static struct drm_framebuffer *
10315
static struct drm_framebuffer *
10339
intel_framebuffer_create_for_mode(struct drm_device *dev,
10316
intel_framebuffer_create_for_mode(struct drm_device *dev,
10340
				  struct drm_display_mode *mode,
10317
				  struct drm_display_mode *mode,
10341
				  int depth, int bpp)
10318
				  int depth, int bpp)
10342
{
10319
{
10343
	struct drm_framebuffer *fb;
10320
	struct drm_framebuffer *fb;
10344
	struct drm_i915_gem_object *obj;
10321
	struct drm_i915_gem_object *obj;
10345
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10322
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10346
 
10323
 
10347
	obj = i915_gem_alloc_object(dev,
10324
	obj = i915_gem_alloc_object(dev,
10348
				    intel_framebuffer_size_for_mode(mode, bpp));
10325
				    intel_framebuffer_size_for_mode(mode, bpp));
10349
	if (obj == NULL)
10326
	if (obj == NULL)
10350
		return ERR_PTR(-ENOMEM);
10327
		return ERR_PTR(-ENOMEM);
10351
 
10328
 
10352
	mode_cmd.width = mode->hdisplay;
10329
	mode_cmd.width = mode->hdisplay;
10353
	mode_cmd.height = mode->vdisplay;
10330
	mode_cmd.height = mode->vdisplay;
10354
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10331
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10355
								bpp);
10332
								bpp);
10356
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10333
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10357
 
10334
 
10358
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10335
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10359
	if (IS_ERR(fb))
10336
	if (IS_ERR(fb))
10360
		drm_gem_object_unreference_unlocked(&obj->base);
10337
		drm_gem_object_unreference_unlocked(&obj->base);
10361
 
10338
 
10362
	return fb;
10339
	return fb;
10363
}
10340
}
10364
 
10341
 
10365
static struct drm_framebuffer *
10342
static struct drm_framebuffer *
10366
mode_fits_in_fbdev(struct drm_device *dev,
10343
mode_fits_in_fbdev(struct drm_device *dev,
10367
		   struct drm_display_mode *mode)
10344
		   struct drm_display_mode *mode)
10368
{
10345
{
10369
#ifdef CONFIG_DRM_FBDEV_EMULATION
10346
#ifdef CONFIG_DRM_FBDEV_EMULATION
10370
	struct drm_i915_private *dev_priv = dev->dev_private;
10347
	struct drm_i915_private *dev_priv = dev->dev_private;
10371
	struct drm_i915_gem_object *obj;
10348
	struct drm_i915_gem_object *obj;
10372
	struct drm_framebuffer *fb;
10349
	struct drm_framebuffer *fb;
10373
 
10350
 
10374
	if (!dev_priv->fbdev)
10351
	if (!dev_priv->fbdev)
10375
		return NULL;
10352
		return NULL;
10376
 
10353
 
10377
	if (!dev_priv->fbdev->fb)
10354
	if (!dev_priv->fbdev->fb)
10378
		return NULL;
10355
		return NULL;
10379
 
10356
 
10380
	obj = dev_priv->fbdev->fb->obj;
10357
	obj = dev_priv->fbdev->fb->obj;
10381
	BUG_ON(!obj);
10358
	BUG_ON(!obj);
10382
 
10359
 
10383
	fb = &dev_priv->fbdev->fb->base;
10360
	fb = &dev_priv->fbdev->fb->base;
10384
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10361
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10385
							       fb->bits_per_pixel))
10362
							       fb->bits_per_pixel))
10386
		return NULL;
10363
		return NULL;
10387
 
10364
 
10388
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10365
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10389
		return NULL;
10366
		return NULL;
-
 
10367
 
10390
 
10368
	drm_framebuffer_reference(fb);
10391
	return fb;
10369
	return fb;
10392
#else
10370
#else
10393
	return NULL;
10371
	return NULL;
10394
#endif
10372
#endif
10395
}
10373
}
10396
 
10374
 
10397
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10375
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10398
					   struct drm_crtc *crtc,
10376
					   struct drm_crtc *crtc,
10399
					   struct drm_display_mode *mode,
10377
					   struct drm_display_mode *mode,
10400
					   struct drm_framebuffer *fb,
10378
					   struct drm_framebuffer *fb,
10401
					   int x, int y)
10379
					   int x, int y)
10402
{
10380
{
10403
	struct drm_plane_state *plane_state;
10381
	struct drm_plane_state *plane_state;
10404
	int hdisplay, vdisplay;
10382
	int hdisplay, vdisplay;
10405
	int ret;
10383
	int ret;
10406
 
10384
 
10407
	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10385
	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10408
	if (IS_ERR(plane_state))
10386
	if (IS_ERR(plane_state))
10409
		return PTR_ERR(plane_state);
10387
		return PTR_ERR(plane_state);
10410
 
10388
 
10411
	if (mode)
10389
	if (mode)
10412
		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10390
		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10413
	else
10391
	else
10414
		hdisplay = vdisplay = 0;
10392
		hdisplay = vdisplay = 0;
10415
 
10393
 
10416
	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10394
	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10417
	if (ret)
10395
	if (ret)
10418
		return ret;
10396
		return ret;
10419
	drm_atomic_set_fb_for_plane(plane_state, fb);
10397
	drm_atomic_set_fb_for_plane(plane_state, fb);
10420
	plane_state->crtc_x = 0;
10398
	plane_state->crtc_x = 0;
10421
	plane_state->crtc_y = 0;
10399
	plane_state->crtc_y = 0;
10422
	plane_state->crtc_w = hdisplay;
10400
	plane_state->crtc_w = hdisplay;
10423
	plane_state->crtc_h = vdisplay;
10401
	plane_state->crtc_h = vdisplay;
10424
	plane_state->src_x = x << 16;
10402
	plane_state->src_x = x << 16;
10425
	plane_state->src_y = y << 16;
10403
	plane_state->src_y = y << 16;
10426
	plane_state->src_w = hdisplay << 16;
10404
	plane_state->src_w = hdisplay << 16;
10427
	plane_state->src_h = vdisplay << 16;
10405
	plane_state->src_h = vdisplay << 16;
10428
 
10406
 
10429
	return 0;
10407
	return 0;
10430
}
10408
}
10431
 
10409
 
10432
bool intel_get_load_detect_pipe(struct drm_connector *connector,
10410
bool intel_get_load_detect_pipe(struct drm_connector *connector,
10433
				struct drm_display_mode *mode,
10411
				struct drm_display_mode *mode,
10434
				struct intel_load_detect_pipe *old,
10412
				struct intel_load_detect_pipe *old,
10435
				struct drm_modeset_acquire_ctx *ctx)
10413
				struct drm_modeset_acquire_ctx *ctx)
10436
{
10414
{
10437
	struct intel_crtc *intel_crtc;
10415
	struct intel_crtc *intel_crtc;
10438
	struct intel_encoder *intel_encoder =
10416
	struct intel_encoder *intel_encoder =
10439
		intel_attached_encoder(connector);
10417
		intel_attached_encoder(connector);
10440
	struct drm_crtc *possible_crtc;
10418
	struct drm_crtc *possible_crtc;
10441
	struct drm_encoder *encoder = &intel_encoder->base;
10419
	struct drm_encoder *encoder = &intel_encoder->base;
10442
	struct drm_crtc *crtc = NULL;
10420
	struct drm_crtc *crtc = NULL;
10443
	struct drm_device *dev = encoder->dev;
10421
	struct drm_device *dev = encoder->dev;
10444
	struct drm_framebuffer *fb;
10422
	struct drm_framebuffer *fb;
10445
	struct drm_mode_config *config = &dev->mode_config;
10423
	struct drm_mode_config *config = &dev->mode_config;
10446
	struct drm_atomic_state *state = NULL;
10424
	struct drm_atomic_state *state = NULL, *restore_state = NULL;
10447
	struct drm_connector_state *connector_state;
10425
	struct drm_connector_state *connector_state;
10448
	struct intel_crtc_state *crtc_state;
10426
	struct intel_crtc_state *crtc_state;
10449
	int ret, i = -1;
10427
	int ret, i = -1;
10450
 
10428
 
10451
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10429
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10452
		      connector->base.id, connector->name,
10430
		      connector->base.id, connector->name,
10453
		      encoder->base.id, encoder->name);
10431
		      encoder->base.id, encoder->name);
-
 
10432
 
-
 
10433
	old->restore_state = NULL;
10454
 
10434
 
10455
retry:
10435
retry:
10456
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10436
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10457
	if (ret)
10437
	if (ret)
10458
		goto fail;
10438
		goto fail;
10459
 
10439
 
10460
	/*
10440
	/*
10461
	 * Algorithm gets a little messy:
10441
	 * Algorithm gets a little messy:
10462
	 *
10442
	 *
10463
	 *   - if the connector already has an assigned crtc, use it (but make
10443
	 *   - if the connector already has an assigned crtc, use it (but make
10464
	 *     sure it's on first)
10444
	 *     sure it's on first)
10465
	 *
10445
	 *
10466
	 *   - try to find the first unused crtc that can drive this connector,
10446
	 *   - try to find the first unused crtc that can drive this connector,
10467
	 *     and use that if we find one
10447
	 *     and use that if we find one
10468
	 */
10448
	 */
10469
 
10449
 
10470
	/* See if we already have a CRTC for this connector */
10450
	/* See if we already have a CRTC for this connector */
10471
	if (encoder->crtc) {
10451
	if (connector->state->crtc) {
10472
		crtc = encoder->crtc;
10452
		crtc = connector->state->crtc;
10473
 
10453
 
10474
		ret = drm_modeset_lock(&crtc->mutex, ctx);
10454
		ret = drm_modeset_lock(&crtc->mutex, ctx);
10475
		if (ret)
10455
		if (ret)
10476
			goto fail;
10456
			goto fail;
10477
		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
-
 
10478
		if (ret)
-
 
10479
			goto fail;
-
 
10480
 
-
 
10481
		old->dpms_mode = connector->dpms;
-
 
10482
		old->load_detect_temp = false;
-
 
10483
 
10457
 
10484
		/* Make sure the crtc and connector are running */
-
 
10485
		if (connector->dpms != DRM_MODE_DPMS_ON)
-
 
10486
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
-
 
10487
 
10458
		/* Make sure the crtc and connector are running */
10488
		return true;
10459
		goto found;
10489
	}
10460
	}
10490
 
10461
 
10491
	/* Find an unused one (if possible) */
10462
	/* Find an unused one (if possible) */
10492
	for_each_crtc(dev, possible_crtc) {
10463
	for_each_crtc(dev, possible_crtc) {
10493
		i++;
10464
		i++;
10494
		if (!(encoder->possible_crtcs & (1 << i)))
10465
		if (!(encoder->possible_crtcs & (1 << i)))
10495
			continue;
10466
			continue;
-
 
10467
 
-
 
10468
		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
-
 
10469
		if (ret)
-
 
10470
			goto fail;
-
 
10471
 
10496
		if (possible_crtc->state->enable)
10472
		if (possible_crtc->state->enable) {
-
 
10473
			drm_modeset_unlock(&possible_crtc->mutex);
10497
			continue;
10474
			continue;
-
 
10475
		}
10498
 
10476
 
10499
		crtc = possible_crtc;
10477
		crtc = possible_crtc;
10500
		break;
10478
		break;
10501
	}
10479
	}
10502
 
10480
 
10503
	/*
10481
	/*
10504
	 * If we didn't find an unused CRTC, don't use any.
10482
	 * If we didn't find an unused CRTC, don't use any.
10505
	 */
10483
	 */
10506
	if (!crtc) {
10484
	if (!crtc) {
10507
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10485
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10508
		goto fail;
10486
		goto fail;
10509
	}
10487
	}
10510
 
-
 
10511
	ret = drm_modeset_lock(&crtc->mutex, ctx);
10488
 
10512
	if (ret)
10489
found:
-
 
10490
	intel_crtc = to_intel_crtc(crtc);
10513
		goto fail;
10491
 
10514
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10492
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10515
	if (ret)
10493
	if (ret)
10516
		goto fail;
10494
		goto fail;
10517
 
-
 
10518
	intel_crtc = to_intel_crtc(crtc);
-
 
10519
	old->dpms_mode = connector->dpms;
-
 
10520
	old->load_detect_temp = true;
-
 
10521
	old->release_fb = NULL;
-
 
10522
 
10495
 
-
 
10496
	state = drm_atomic_state_alloc(dev);
10523
	state = drm_atomic_state_alloc(dev);
10497
	restore_state = drm_atomic_state_alloc(dev);
-
 
10498
	if (!state || !restore_state) {
10524
	if (!state)
10499
		ret = -ENOMEM;
-
 
10500
		goto fail;
10525
		return false;
10501
	}
-
 
10502
 
10526
 
10503
	state->acquire_ctx = ctx;
10527
	state->acquire_ctx = ctx;
10504
	restore_state->acquire_ctx = ctx;
10528
 
10505
 
10529
	connector_state = drm_atomic_get_connector_state(state, connector);
10506
	connector_state = drm_atomic_get_connector_state(state, connector);
10530
	if (IS_ERR(connector_state)) {
10507
	if (IS_ERR(connector_state)) {
10531
		ret = PTR_ERR(connector_state);
10508
		ret = PTR_ERR(connector_state);
10532
		goto fail;
10509
		goto fail;
10533
	}
10510
	}
10534
 
10511
 
-
 
10512
	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10535
	connector_state->crtc = crtc;
10513
	if (ret)
10536
	connector_state->best_encoder = &intel_encoder->base;
10514
		goto fail;
10537
 
10515
 
10538
	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10516
	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10539
	if (IS_ERR(crtc_state)) {
10517
	if (IS_ERR(crtc_state)) {
10540
		ret = PTR_ERR(crtc_state);
10518
		ret = PTR_ERR(crtc_state);
10541
		goto fail;
10519
		goto fail;
10542
	}
10520
	}
10543
 
10521
 
10544
	crtc_state->base.active = crtc_state->base.enable = true;
10522
	crtc_state->base.active = crtc_state->base.enable = true;
10545
 
10523
 
10546
	if (!mode)
10524
	if (!mode)
10547
		mode = &load_detect_mode;
10525
		mode = &load_detect_mode;
10548
 
10526
 
10549
	/* We need a framebuffer large enough to accommodate all accesses
10527
	/* We need a framebuffer large enough to accommodate all accesses
10550
	 * that the plane may generate whilst we perform load detection.
10528
	 * that the plane may generate whilst we perform load detection.
10551
	 * We can not rely on the fbcon either being present (we get called
10529
	 * We can not rely on the fbcon either being present (we get called
10552
	 * during its initialisation to detect all boot displays, or it may
10530
	 * during its initialisation to detect all boot displays, or it may
10553
	 * not even exist) or that it is large enough to satisfy the
10531
	 * not even exist) or that it is large enough to satisfy the
10554
	 * requested mode.
10532
	 * requested mode.
10555
	 */
10533
	 */
10556
	fb = mode_fits_in_fbdev(dev, mode);
10534
	fb = mode_fits_in_fbdev(dev, mode);
10557
	if (fb == NULL) {
10535
	if (fb == NULL) {
10558
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10536
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10559
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10537
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10560
		old->release_fb = fb;
-
 
10561
	} else
10538
	} else
10562
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10539
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10563
	if (IS_ERR(fb)) {
10540
	if (IS_ERR(fb)) {
10564
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10541
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10565
		goto fail;
10542
		goto fail;
10566
	}
10543
	}
10567
 
10544
 
10568
	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10545
	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10569
	if (ret)
10546
	if (ret)
10570
		goto fail;
10547
		goto fail;
10571
 
10548
 
-
 
10549
	drm_framebuffer_unreference(fb);
-
 
10550
 
-
 
10551
	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
-
 
10552
	if (ret)
-
 
10553
		goto fail;
-
 
10554
 
-
 
10555
	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
-
 
10556
	if (!ret)
-
 
10557
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
-
 
10558
	if (!ret)
-
 
10559
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
-
 
10560
	if (ret) {
-
 
10561
		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
-
 
10562
		goto fail;
10572
	drm_mode_copy(&crtc_state->base.mode, mode);
10563
	}
-
 
10564
 
10573
 
10565
	ret = drm_atomic_commit(state);
10574
	if (drm_atomic_commit(state)) {
-
 
10575
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
-
 
10576
		if (old->release_fb)
10566
	if (ret) {
10577
			old->release_fb->funcs->destroy(old->release_fb);
10567
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
-
 
10568
		goto fail;
10578
		goto fail;
10569
	}
10579
	}
10570
 
10580
	crtc->primary->crtc = crtc;
10571
	old->restore_state = restore_state;
10581
 
10572
 
10582
	/* let the connector get through one full cycle before testing */
10573
	/* let the connector get through one full cycle before testing */
10583
	intel_wait_for_vblank(dev, intel_crtc->pipe);
10574
	intel_wait_for_vblank(dev, intel_crtc->pipe);
10584
	return true;
10575
	return true;
10585
 
10576
 
10586
fail:
10577
fail:
10587
	drm_atomic_state_free(state);
10578
	drm_atomic_state_free(state);
-
 
10579
	drm_atomic_state_free(restore_state);
10588
	state = NULL;
10580
	restore_state = state = NULL;
10589
 
10581
 
10590
	if (ret == -EDEADLK) {
10582
	if (ret == -EDEADLK) {
10591
		drm_modeset_backoff(ctx);
10583
		drm_modeset_backoff(ctx);
10592
		goto retry;
10584
		goto retry;
10593
	}
10585
	}
10594
 
10586
 
10595
	return false;
10587
	return false;
10596
}
10588
}
10597
 
10589
 
10598
void intel_release_load_detect_pipe(struct drm_connector *connector,
10590
void intel_release_load_detect_pipe(struct drm_connector *connector,
10599
				    struct intel_load_detect_pipe *old,
10591
				    struct intel_load_detect_pipe *old,
10600
				    struct drm_modeset_acquire_ctx *ctx)
10592
				    struct drm_modeset_acquire_ctx *ctx)
10601
{
10593
{
10602
	struct drm_device *dev = connector->dev;
-
 
10603
	struct intel_encoder *intel_encoder =
10594
	struct intel_encoder *intel_encoder =
10604
		intel_attached_encoder(connector);
10595
		intel_attached_encoder(connector);
10605
	struct drm_encoder *encoder = &intel_encoder->base;
10596
	struct drm_encoder *encoder = &intel_encoder->base;
10606
	struct drm_crtc *crtc = encoder->crtc;
-
 
10607
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
10608
	struct drm_atomic_state *state;
-
 
10609
	struct drm_connector_state *connector_state;
10597
	struct drm_atomic_state *state = old->restore_state;
10610
	struct intel_crtc_state *crtc_state;
-
 
10611
	int ret;
10598
	int ret;
10612
 
10599
 
10613
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10600
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10614
		      connector->base.id, connector->name,
10601
		      connector->base.id, connector->name,
10615
		      encoder->base.id, encoder->name);
10602
		      encoder->base.id, encoder->name);
10616
 
-
 
10617
	if (old->load_detect_temp) {
-
 
10618
		state = drm_atomic_state_alloc(dev);
10603
 
10619
		if (!state)
-
 
10620
			goto fail;
-
 
10621
 
-
 
10622
		state->acquire_ctx = ctx;
-
 
10623
 
-
 
10624
		connector_state = drm_atomic_get_connector_state(state, connector);
-
 
10625
		if (IS_ERR(connector_state))
-
 
10626
			goto fail;
-
 
10627
 
-
 
10628
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-
 
10629
		if (IS_ERR(crtc_state))
-
 
10630
			goto fail;
-
 
10631
 
-
 
10632
		connector_state->best_encoder = NULL;
-
 
10633
		connector_state->crtc = NULL;
-
 
10634
 
-
 
10635
		crtc_state->base.enable = crtc_state->base.active = false;
-
 
10636
 
-
 
10637
		ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
-
 
10638
						      0, 0);
-
 
10639
		if (ret)
-
 
10640
			goto fail;
-
 
10641
 
-
 
10642
		ret = drm_atomic_commit(state);
-
 
10643
		if (ret)
-
 
10644
			goto fail;
-
 
10645
 
-
 
10646
		if (old->release_fb) {
-
 
10647
			drm_framebuffer_unregister_private(old->release_fb);
-
 
10648
			drm_framebuffer_unreference(old->release_fb);
-
 
10649
		}
-
 
10650
 
10604
	if (!state)
10651
		return;
-
 
10652
	}
-
 
10653
 
-
 
10654
	/* Switch crtc and encoder back off if necessary */
10605
		return;
10655
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
-
 
10656
		connector->funcs->dpms(connector, old->dpms_mode);
10606
 
10657
 
-
 
10658
	return;
10607
	ret = drm_atomic_commit(state);
10659
fail:
10608
	if (ret) {
10660
	DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10609
		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
-
 
10610
		drm_atomic_state_free(state);
10661
	drm_atomic_state_free(state);
10611
	}
10662
}
10612
}
10663
 
10613
 
10664
static int i9xx_pll_refclk(struct drm_device *dev,
10614
static int i9xx_pll_refclk(struct drm_device *dev,
10665
			   const struct intel_crtc_state *pipe_config)
10615
			   const struct intel_crtc_state *pipe_config)
10666
{
10616
{
10667
	struct drm_i915_private *dev_priv = dev->dev_private;
10617
	struct drm_i915_private *dev_priv = dev->dev_private;
10668
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10618
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10669
 
10619
 
10670
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10620
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10671
		return dev_priv->vbt.lvds_ssc_freq;
10621
		return dev_priv->vbt.lvds_ssc_freq;
10672
	else if (HAS_PCH_SPLIT(dev))
10622
	else if (HAS_PCH_SPLIT(dev))
10673
		return 120000;
10623
		return 120000;
10674
	else if (!IS_GEN2(dev))
10624
	else if (!IS_GEN2(dev))
10675
		return 96000;
10625
		return 96000;
10676
	else
10626
	else
10677
		return 48000;
10627
		return 48000;
10678
}
10628
}
10679
 
10629
 
10680
/* Returns the clock of the currently programmed mode of the given pipe. */
10630
/* Returns the clock of the currently programmed mode of the given pipe. */
10681
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10631
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10682
				struct intel_crtc_state *pipe_config)
10632
				struct intel_crtc_state *pipe_config)
10683
{
10633
{
10684
	struct drm_device *dev = crtc->base.dev;
10634
	struct drm_device *dev = crtc->base.dev;
10685
	struct drm_i915_private *dev_priv = dev->dev_private;
10635
	struct drm_i915_private *dev_priv = dev->dev_private;
10686
	int pipe = pipe_config->cpu_transcoder;
10636
	int pipe = pipe_config->cpu_transcoder;
10687
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10637
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10688
	u32 fp;
10638
	u32 fp;
10689
	intel_clock_t clock;
10639
	intel_clock_t clock;
10690
	int port_clock;
10640
	int port_clock;
10691
	int refclk = i9xx_pll_refclk(dev, pipe_config);
10641
	int refclk = i9xx_pll_refclk(dev, pipe_config);
10692
 
10642
 
10693
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10643
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10694
		fp = pipe_config->dpll_hw_state.fp0;
10644
		fp = pipe_config->dpll_hw_state.fp0;
10695
	else
10645
	else
10696
		fp = pipe_config->dpll_hw_state.fp1;
10646
		fp = pipe_config->dpll_hw_state.fp1;
10697
 
10647
 
10698
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10648
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10699
	if (IS_PINEVIEW(dev)) {
10649
	if (IS_PINEVIEW(dev)) {
10700
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10650
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10701
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10651
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10702
	} else {
10652
	} else {
10703
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10653
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10704
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10654
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10705
	}
10655
	}
10706
 
10656
 
10707
	if (!IS_GEN2(dev)) {
10657
	if (!IS_GEN2(dev)) {
10708
		if (IS_PINEVIEW(dev))
10658
		if (IS_PINEVIEW(dev))
10709
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10659
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10710
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10660
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10711
		else
10661
		else
10712
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10662
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10713
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10663
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10714
 
10664
 
10715
		switch (dpll & DPLL_MODE_MASK) {
10665
		switch (dpll & DPLL_MODE_MASK) {
10716
		case DPLLB_MODE_DAC_SERIAL:
10666
		case DPLLB_MODE_DAC_SERIAL:
10717
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10667
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10718
				5 : 10;
10668
				5 : 10;
10719
			break;
10669
			break;
10720
		case DPLLB_MODE_LVDS:
10670
		case DPLLB_MODE_LVDS:
10721
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10671
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10722
				7 : 14;
10672
				7 : 14;
10723
			break;
10673
			break;
10724
		default:
10674
		default:
10725
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10675
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10726
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10676
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10727
			return;
10677
			return;
10728
		}
10678
		}
10729
 
10679
 
10730
		if (IS_PINEVIEW(dev))
10680
		if (IS_PINEVIEW(dev))
10731
			port_clock = pnv_calc_dpll_params(refclk, &clock);
10681
			port_clock = pnv_calc_dpll_params(refclk, &clock);
10732
		else
10682
		else
10733
			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10683
			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10734
	} else {
10684
	} else {
10735
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10685
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10736
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10686
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10737
 
10687
 
10738
		if (is_lvds) {
10688
		if (is_lvds) {
10739
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10689
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10740
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10690
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10741
 
10691
 
10742
			if (lvds & LVDS_CLKB_POWER_UP)
10692
			if (lvds & LVDS_CLKB_POWER_UP)
10743
				clock.p2 = 7;
10693
				clock.p2 = 7;
10744
			else
10694
			else
10745
				clock.p2 = 14;
10695
				clock.p2 = 14;
10746
		} else {
10696
		} else {
10747
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10697
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10748
				clock.p1 = 2;
10698
				clock.p1 = 2;
10749
			else {
10699
			else {
10750
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10700
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10751
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10701
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10752
			}
10702
			}
10753
			if (dpll & PLL_P2_DIVIDE_BY_4)
10703
			if (dpll & PLL_P2_DIVIDE_BY_4)
10754
				clock.p2 = 4;
10704
				clock.p2 = 4;
10755
			else
10705
			else
10756
				clock.p2 = 2;
10706
				clock.p2 = 2;
10757
		}
10707
		}
10758
 
10708
 
10759
		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10709
		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10760
	}
10710
	}
10761
 
10711
 
10762
	/*
10712
	/*
10763
	 * This value includes pixel_multiplier. We will use
10713
	 * This value includes pixel_multiplier. We will use
10764
	 * port_clock to compute adjusted_mode.crtc_clock in the
10714
	 * port_clock to compute adjusted_mode.crtc_clock in the
10765
	 * encoder's get_config() function.
10715
	 * encoder's get_config() function.
10766
	 */
10716
	 */
10767
	pipe_config->port_clock = port_clock;
10717
	pipe_config->port_clock = port_clock;
10768
}
10718
}
10769
 
10719
 
10770
int intel_dotclock_calculate(int link_freq,
10720
int intel_dotclock_calculate(int link_freq,
10771
			     const struct intel_link_m_n *m_n)
10721
			     const struct intel_link_m_n *m_n)
10772
{
10722
{
10773
	/*
10723
	/*
10774
	 * The calculation for the data clock is:
10724
	 * The calculation for the data clock is:
10775
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10725
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10776
	 * But we want to avoid losing precison if possible, so:
10726
	 * But we want to avoid losing precison if possible, so:
10777
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10727
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10778
	 *
10728
	 *
10779
	 * and the link clock is simpler:
10729
	 * and the link clock is simpler:
10780
	 * link_clock = (m * link_clock) / n
10730
	 * link_clock = (m * link_clock) / n
10781
	 */
10731
	 */
10782
 
10732
 
10783
	if (!m_n->link_n)
10733
	if (!m_n->link_n)
10784
		return 0;
10734
		return 0;
10785
 
10735
 
10786
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10736
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10787
}
10737
}
10788
 
10738
 
10789
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10739
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10790
				   struct intel_crtc_state *pipe_config)
10740
				   struct intel_crtc_state *pipe_config)
10791
{
10741
{
10792
	struct drm_device *dev = crtc->base.dev;
10742
	struct drm_device *dev = crtc->base.dev;
10793
 
10743
 
10794
	/* read out port_clock from the DPLL */
10744
	/* read out port_clock from the DPLL */
10795
	i9xx_crtc_clock_get(crtc, pipe_config);
10745
	i9xx_crtc_clock_get(crtc, pipe_config);
10796
 
10746
 
10797
	/*
10747
	/*
10798
	 * This value does not include pixel_multiplier.
10748
	 * This value does not include pixel_multiplier.
10799
	 * We will check that port_clock and adjusted_mode.crtc_clock
10749
	 * We will check that port_clock and adjusted_mode.crtc_clock
10800
	 * agree once we know their relationship in the encoder's
10750
	 * agree once we know their relationship in the encoder's
10801
	 * get_config() function.
10751
	 * get_config() function.
10802
	 */
10752
	 */
10803
	pipe_config->base.adjusted_mode.crtc_clock =
10753
	pipe_config->base.adjusted_mode.crtc_clock =
10804
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10754
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10805
					 &pipe_config->fdi_m_n);
10755
					 &pipe_config->fdi_m_n);
10806
}
10756
}
10807
 
10757
 
10808
/** Returns the currently programmed mode of the given pipe. */
10758
/** Returns the currently programmed mode of the given pipe. */
10809
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10759
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10810
					     struct drm_crtc *crtc)
10760
					     struct drm_crtc *crtc)
10811
{
10761
{
10812
	struct drm_i915_private *dev_priv = dev->dev_private;
10762
	struct drm_i915_private *dev_priv = dev->dev_private;
10813
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10763
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10814
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10764
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10815
	struct drm_display_mode *mode;
10765
	struct drm_display_mode *mode;
10816
	struct intel_crtc_state pipe_config;
10766
	struct intel_crtc_state *pipe_config;
10817
	int htot = I915_READ(HTOTAL(cpu_transcoder));
10767
	int htot = I915_READ(HTOTAL(cpu_transcoder));
10818
	int hsync = I915_READ(HSYNC(cpu_transcoder));
10768
	int hsync = I915_READ(HSYNC(cpu_transcoder));
10819
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10769
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10820
	int vsync = I915_READ(VSYNC(cpu_transcoder));
10770
	int vsync = I915_READ(VSYNC(cpu_transcoder));
10821
	enum pipe pipe = intel_crtc->pipe;
10771
	enum pipe pipe = intel_crtc->pipe;
10822
 
10772
 
10823
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10773
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10824
	if (!mode)
10774
	if (!mode)
10825
		return NULL;
10775
		return NULL;
-
 
10776
 
-
 
10777
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
-
 
10778
	if (!pipe_config) {
-
 
10779
		kfree(mode);
-
 
10780
		return NULL;
-
 
10781
	}
10826
 
10782
 
10827
	/*
10783
	/*
10828
	 * Construct a pipe_config sufficient for getting the clock info
10784
	 * Construct a pipe_config sufficient for getting the clock info
10829
	 * back out of crtc_clock_get.
10785
	 * back out of crtc_clock_get.
10830
	 *
10786
	 *
10831
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10787
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10832
	 * to use a real value here instead.
10788
	 * to use a real value here instead.
10833
	 */
10789
	 */
10834
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
10790
	pipe_config->cpu_transcoder = (enum transcoder) pipe;
10835
	pipe_config.pixel_multiplier = 1;
10791
	pipe_config->pixel_multiplier = 1;
10836
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10792
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10837
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10793
	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10838
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10794
	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10839
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10795
	i9xx_crtc_clock_get(intel_crtc, pipe_config);
10840
 
10796
 
10841
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
10797
	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10842
	mode->hdisplay = (htot & 0xffff) + 1;
10798
	mode->hdisplay = (htot & 0xffff) + 1;
10843
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10799
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10844
	mode->hsync_start = (hsync & 0xffff) + 1;
10800
	mode->hsync_start = (hsync & 0xffff) + 1;
10845
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10801
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10846
	mode->vdisplay = (vtot & 0xffff) + 1;
10802
	mode->vdisplay = (vtot & 0xffff) + 1;
10847
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10803
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10848
	mode->vsync_start = (vsync & 0xffff) + 1;
10804
	mode->vsync_start = (vsync & 0xffff) + 1;
10849
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10805
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10850
 
10806
 
10851
	drm_mode_set_name(mode);
10807
	drm_mode_set_name(mode);
-
 
10808
 
-
 
10809
	kfree(pipe_config);
10852
 
10810
 
10853
	return mode;
10811
	return mode;
10854
}
10812
}
10855
 
10813
 
10856
void intel_mark_busy(struct drm_device *dev)
10814
void intel_mark_busy(struct drm_device *dev)
10857
{
10815
{
10858
	struct drm_i915_private *dev_priv = dev->dev_private;
10816
	struct drm_i915_private *dev_priv = dev->dev_private;
10859
 
10817
 
10860
	if (dev_priv->mm.busy)
10818
	if (dev_priv->mm.busy)
10861
		return;
10819
		return;
10862
 
10820
 
10863
	intel_runtime_pm_get(dev_priv);
10821
	intel_runtime_pm_get(dev_priv);
10864
	i915_update_gfx_val(dev_priv);
10822
	i915_update_gfx_val(dev_priv);
10865
	if (INTEL_INFO(dev)->gen >= 6)
10823
	if (INTEL_INFO(dev)->gen >= 6)
10866
		gen6_rps_busy(dev_priv);
10824
		gen6_rps_busy(dev_priv);
10867
	dev_priv->mm.busy = true;
10825
	dev_priv->mm.busy = true;
10868
}
10826
}
10869
 
10827
 
10870
void intel_mark_idle(struct drm_device *dev)
10828
void intel_mark_idle(struct drm_device *dev)
10871
{
10829
{
10872
	struct drm_i915_private *dev_priv = dev->dev_private;
10830
	struct drm_i915_private *dev_priv = dev->dev_private;
10873
 
10831
 
10874
	if (!dev_priv->mm.busy)
10832
	if (!dev_priv->mm.busy)
10875
		return;
10833
		return;
10876
 
10834
 
10877
	dev_priv->mm.busy = false;
10835
	dev_priv->mm.busy = false;
10878
 
10836
 
10879
	if (INTEL_INFO(dev)->gen >= 6)
10837
	if (INTEL_INFO(dev)->gen >= 6)
10880
		gen6_rps_idle(dev->dev_private);
10838
		gen6_rps_idle(dev->dev_private);
10881
 
10839
 
10882
	intel_runtime_pm_put(dev_priv);
10840
	intel_runtime_pm_put(dev_priv);
10883
}
10841
}
10884
 
10842
 
10885
static void intel_crtc_destroy(struct drm_crtc *crtc)
10843
static void intel_crtc_destroy(struct drm_crtc *crtc)
10886
{
10844
{
10887
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10845
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10888
	struct drm_device *dev = crtc->dev;
10846
	struct drm_device *dev = crtc->dev;
10889
	struct intel_unpin_work *work;
10847
	struct intel_unpin_work *work;
10890
 
10848
 
10891
	spin_lock_irq(&dev->event_lock);
10849
	spin_lock_irq(&dev->event_lock);
10892
	work = intel_crtc->unpin_work;
10850
	work = intel_crtc->unpin_work;
10893
	intel_crtc->unpin_work = NULL;
10851
	intel_crtc->unpin_work = NULL;
10894
	spin_unlock_irq(&dev->event_lock);
10852
	spin_unlock_irq(&dev->event_lock);
10895
 
10853
 
10896
	if (work) {
10854
	if (work) {
10897
//		cancel_work_sync(&work->work);
10855
		cancel_work_sync(&work->work);
10898
		kfree(work);
10856
		kfree(work);
10899
	}
10857
	}
10900
 
10858
 
10901
	drm_crtc_cleanup(crtc);
10859
	drm_crtc_cleanup(crtc);
10902
 
10860
 
10903
	kfree(intel_crtc);
10861
	kfree(intel_crtc);
10904
}
10862
}
10905
 
10863
 
10906
static void intel_unpin_work_fn(struct work_struct *__work)
10864
static void intel_unpin_work_fn(struct work_struct *__work)
10907
{
10865
{
10908
	struct intel_unpin_work *work =
10866
	struct intel_unpin_work *work =
10909
		container_of(__work, struct intel_unpin_work, work);
10867
		container_of(__work, struct intel_unpin_work, work);
10910
	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10868
	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10911
	struct drm_device *dev = crtc->base.dev;
10869
	struct drm_device *dev = crtc->base.dev;
10912
	struct drm_plane *primary = crtc->base.primary;
10870
	struct drm_plane *primary = crtc->base.primary;
10913
 
10871
 
10914
	mutex_lock(&dev->struct_mutex);
10872
	mutex_lock(&dev->struct_mutex);
10915
	intel_unpin_fb_obj(work->old_fb, primary->state);
10873
	intel_unpin_fb_obj(work->old_fb, primary->state);
10916
	drm_gem_object_unreference(&work->pending_flip_obj->base);
10874
	drm_gem_object_unreference(&work->pending_flip_obj->base);
10917
 
10875
 
10918
	if (work->flip_queued_req)
10876
	if (work->flip_queued_req)
10919
		i915_gem_request_assign(&work->flip_queued_req, NULL);
10877
		i915_gem_request_assign(&work->flip_queued_req, NULL);
10920
	mutex_unlock(&dev->struct_mutex);
10878
	mutex_unlock(&dev->struct_mutex);
10921
 
10879
 
10922
	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10880
	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
-
 
10881
	intel_fbc_post_update(crtc);
10923
	drm_framebuffer_unreference(work->old_fb);
10882
	drm_framebuffer_unreference(work->old_fb);
10924
 
10883
 
10925
	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10884
	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10926
	atomic_dec(&crtc->unpin_work_count);
10885
	atomic_dec(&crtc->unpin_work_count);
10927
 
10886
 
10928
	kfree(work);
10887
	kfree(work);
10929
}
10888
}
10930
 
10889
 
10931
static void do_intel_finish_page_flip(struct drm_device *dev,
10890
static void do_intel_finish_page_flip(struct drm_device *dev,
10932
				      struct drm_crtc *crtc)
10891
				      struct drm_crtc *crtc)
10933
{
10892
{
10934
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10893
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10935
	struct intel_unpin_work *work;
10894
	struct intel_unpin_work *work;
10936
	unsigned long flags;
10895
	unsigned long flags;
10937
 
10896
 
10938
	/* Ignore early vblank irqs */
10897
	/* Ignore early vblank irqs */
10939
	if (intel_crtc == NULL)
10898
	if (intel_crtc == NULL)
10940
		return;
10899
		return;
10941
 
10900
 
10942
	/*
10901
	/*
10943
	 * This is called both by irq handlers and the reset code (to complete
10902
	 * This is called both by irq handlers and the reset code (to complete
10944
	 * lost pageflips) so needs the full irqsave spinlocks.
10903
	 * lost pageflips) so needs the full irqsave spinlocks.
10945
	 */
10904
	 */
10946
	spin_lock_irqsave(&dev->event_lock, flags);
10905
	spin_lock_irqsave(&dev->event_lock, flags);
10947
	work = intel_crtc->unpin_work;
10906
	work = intel_crtc->unpin_work;
10948
 
10907
 
10949
	/* Ensure we don't miss a work->pending update ... */
10908
	/* Ensure we don't miss a work->pending update ... */
10950
	smp_rmb();
10909
	smp_rmb();
10951
 
10910
 
10952
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10911
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10953
		spin_unlock_irqrestore(&dev->event_lock, flags);
10912
		spin_unlock_irqrestore(&dev->event_lock, flags);
10954
		return;
10913
		return;
10955
	}
10914
	}
10956
 
10915
 
10957
	page_flip_completed(intel_crtc);
10916
	page_flip_completed(intel_crtc);
10958
 
10917
 
10959
	spin_unlock_irqrestore(&dev->event_lock, flags);
10918
	spin_unlock_irqrestore(&dev->event_lock, flags);
10960
}
10919
}
10961
 
10920
 
10962
void intel_finish_page_flip(struct drm_device *dev, int pipe)
10921
void intel_finish_page_flip(struct drm_device *dev, int pipe)
10963
{
10922
{
10964
	struct drm_i915_private *dev_priv = dev->dev_private;
10923
	struct drm_i915_private *dev_priv = dev->dev_private;
10965
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10924
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10966
 
10925
 
10967
	do_intel_finish_page_flip(dev, crtc);
10926
	do_intel_finish_page_flip(dev, crtc);
10968
}
10927
}
10969
 
10928
 
10970
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10929
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10971
{
10930
{
10972
	struct drm_i915_private *dev_priv = dev->dev_private;
10931
	struct drm_i915_private *dev_priv = dev->dev_private;
10973
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10932
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10974
 
10933
 
10975
	do_intel_finish_page_flip(dev, crtc);
10934
	do_intel_finish_page_flip(dev, crtc);
10976
}
10935
}
10977
 
10936
 
10978
/* Is 'a' after or equal to 'b'? */
10937
/* Is 'a' after or equal to 'b'? */
10979
static bool g4x_flip_count_after_eq(u32 a, u32 b)
10938
static bool g4x_flip_count_after_eq(u32 a, u32 b)
10980
{
10939
{
10981
	return !((a - b) & 0x80000000);
10940
	return !((a - b) & 0x80000000);
10982
}
10941
}
10983
 
10942
 
10984
static bool page_flip_finished(struct intel_crtc *crtc)
10943
static bool page_flip_finished(struct intel_crtc *crtc)
10985
{
10944
{
10986
	struct drm_device *dev = crtc->base.dev;
10945
	struct drm_device *dev = crtc->base.dev;
10987
	struct drm_i915_private *dev_priv = dev->dev_private;
10946
	struct drm_i915_private *dev_priv = dev->dev_private;
10988
 
10947
 
10989
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10948
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10990
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10949
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10991
		return true;
10950
		return true;
10992
 
10951
 
10993
	/*
10952
	/*
10994
	 * The relevant registers doen't exist on pre-ctg.
10953
	 * The relevant registers doen't exist on pre-ctg.
10995
	 * As the flip done interrupt doesn't trigger for mmio
10954
	 * As the flip done interrupt doesn't trigger for mmio
10996
	 * flips on gmch platforms, a flip count check isn't
10955
	 * flips on gmch platforms, a flip count check isn't
10997
	 * really needed there. But since ctg has the registers,
10956
	 * really needed there. But since ctg has the registers,
10998
	 * include it in the check anyway.
10957
	 * include it in the check anyway.
10999
	 */
10958
	 */
11000
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10959
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11001
		return true;
10960
		return true;
11002
 
10961
 
11003
	/*
10962
	/*
-
 
10963
	 * BDW signals flip done immediately if the plane
-
 
10964
	 * is disabled, even if the plane enable is already
-
 
10965
	 * armed to occur at the next vblank :(
-
 
10966
	 */
-
 
10967
 
-
 
10968
	/*
11004
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10969
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11005
	 * used the same base address. In that case the mmio flip might
10970
	 * used the same base address. In that case the mmio flip might
11006
	 * have completed, but the CS hasn't even executed the flip yet.
10971
	 * have completed, but the CS hasn't even executed the flip yet.
11007
	 *
10972
	 *
11008
	 * A flip count check isn't enough as the CS might have updated
10973
	 * A flip count check isn't enough as the CS might have updated
11009
	 * the base address just after start of vblank, but before we
10974
	 * the base address just after start of vblank, but before we
11010
	 * managed to process the interrupt. This means we'd complete the
10975
	 * managed to process the interrupt. This means we'd complete the
11011
	 * CS flip too soon.
10976
	 * CS flip too soon.
11012
	 *
10977
	 *
11013
	 * Combining both checks should get us a good enough result. It may
10978
	 * Combining both checks should get us a good enough result. It may
11014
	 * still happen that the CS flip has been executed, but has not
10979
	 * still happen that the CS flip has been executed, but has not
11015
	 * yet actually completed. But in case the base address is the same
10980
	 * yet actually completed. But in case the base address is the same
11016
	 * anyway, we don't really care.
10981
	 * anyway, we don't really care.
11017
	 */
10982
	 */
11018
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10983
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11019
		crtc->unpin_work->gtt_offset &&
10984
		crtc->unpin_work->gtt_offset &&
11020
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10985
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11021
				    crtc->unpin_work->flip_count);
10986
				    crtc->unpin_work->flip_count);
11022
}
10987
}
11023
 
10988
 
11024
void intel_prepare_page_flip(struct drm_device *dev, int plane)
10989
void intel_prepare_page_flip(struct drm_device *dev, int plane)
11025
{
10990
{
11026
	struct drm_i915_private *dev_priv = dev->dev_private;
10991
	struct drm_i915_private *dev_priv = dev->dev_private;
11027
	struct intel_crtc *intel_crtc =
10992
	struct intel_crtc *intel_crtc =
11028
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10993
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
11029
	unsigned long flags;
10994
	unsigned long flags;
11030
 
10995
 
11031
 
10996
 
11032
	/*
10997
	/*
11033
	 * This is called both by irq handlers and the reset code (to complete
10998
	 * This is called both by irq handlers and the reset code (to complete
11034
	 * lost pageflips) so needs the full irqsave spinlocks.
10999
	 * lost pageflips) so needs the full irqsave spinlocks.
11035
	 *
11000
	 *
11036
	 * NB: An MMIO update of the plane base pointer will also
11001
	 * NB: An MMIO update of the plane base pointer will also
11037
	 * generate a page-flip completion irq, i.e. every modeset
11002
	 * generate a page-flip completion irq, i.e. every modeset
11038
	 * is also accompanied by a spurious intel_prepare_page_flip().
11003
	 * is also accompanied by a spurious intel_prepare_page_flip().
11039
	 */
11004
	 */
11040
	spin_lock_irqsave(&dev->event_lock, flags);
11005
	spin_lock_irqsave(&dev->event_lock, flags);
11041
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
11006
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
11042
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
11007
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
11043
	spin_unlock_irqrestore(&dev->event_lock, flags);
11008
	spin_unlock_irqrestore(&dev->event_lock, flags);
11044
}
11009
}
11045
 
11010
 
11046
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
11011
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
11047
{
11012
{
11048
	/* Ensure that the work item is consistent when activating it ... */
11013
	/* Ensure that the work item is consistent when activating it ... */
11049
	smp_wmb();
11014
	smp_wmb();
11050
	atomic_set(&work->pending, INTEL_FLIP_PENDING);
11015
	atomic_set(&work->pending, INTEL_FLIP_PENDING);
11051
	/* and that it is marked active as soon as the irq could fire. */
11016
	/* and that it is marked active as soon as the irq could fire. */
11052
	smp_wmb();
11017
	smp_wmb();
11053
}
11018
}
11054
 
11019
 
11055
static int intel_gen2_queue_flip(struct drm_device *dev,
11020
static int intel_gen2_queue_flip(struct drm_device *dev,
11056
				 struct drm_crtc *crtc,
11021
				 struct drm_crtc *crtc,
11057
				 struct drm_framebuffer *fb,
11022
				 struct drm_framebuffer *fb,
11058
				 struct drm_i915_gem_object *obj,
11023
				 struct drm_i915_gem_object *obj,
11059
				 struct drm_i915_gem_request *req,
11024
				 struct drm_i915_gem_request *req,
11060
				 uint32_t flags)
11025
				 uint32_t flags)
11061
{
11026
{
11062
	struct intel_engine_cs *ring = req->ring;
11027
	struct intel_engine_cs *ring = req->ring;
11063
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11028
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11064
	u32 flip_mask;
11029
	u32 flip_mask;
11065
	int ret;
11030
	int ret;
11066
 
11031
 
11067
	ret = intel_ring_begin(req, 6);
11032
	ret = intel_ring_begin(req, 6);
11068
	if (ret)
11033
	if (ret)
11069
		return ret;
11034
		return ret;
11070
 
11035
 
11071
	/* Can't queue multiple flips, so wait for the previous
11036
	/* Can't queue multiple flips, so wait for the previous
11072
	 * one to finish before executing the next.
11037
	 * one to finish before executing the next.
11073
	 */
11038
	 */
11074
	if (intel_crtc->plane)
11039
	if (intel_crtc->plane)
11075
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11040
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11076
	else
11041
	else
11077
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11042
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11078
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11043
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11079
	intel_ring_emit(ring, MI_NOOP);
11044
	intel_ring_emit(ring, MI_NOOP);
11080
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11045
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11081
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11046
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11082
	intel_ring_emit(ring, fb->pitches[0]);
11047
	intel_ring_emit(ring, fb->pitches[0]);
11083
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11048
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11084
	intel_ring_emit(ring, 0); /* aux display base address, unused */
11049
	intel_ring_emit(ring, 0); /* aux display base address, unused */
11085
 
11050
 
11086
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11051
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11087
	return 0;
11052
	return 0;
11088
}
11053
}
11089
 
11054
 
11090
static int intel_gen3_queue_flip(struct drm_device *dev,
11055
static int intel_gen3_queue_flip(struct drm_device *dev,
11091
				 struct drm_crtc *crtc,
11056
				 struct drm_crtc *crtc,
11092
				 struct drm_framebuffer *fb,
11057
				 struct drm_framebuffer *fb,
11093
				 struct drm_i915_gem_object *obj,
11058
				 struct drm_i915_gem_object *obj,
11094
				 struct drm_i915_gem_request *req,
11059
				 struct drm_i915_gem_request *req,
11095
				 uint32_t flags)
11060
				 uint32_t flags)
11096
{
11061
{
11097
	struct intel_engine_cs *ring = req->ring;
11062
	struct intel_engine_cs *ring = req->ring;
11098
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11063
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11099
	u32 flip_mask;
11064
	u32 flip_mask;
11100
	int ret;
11065
	int ret;
11101
 
11066
 
11102
	ret = intel_ring_begin(req, 6);
11067
	ret = intel_ring_begin(req, 6);
11103
	if (ret)
11068
	if (ret)
11104
		return ret;
11069
		return ret;
11105
 
11070
 
11106
	if (intel_crtc->plane)
11071
	if (intel_crtc->plane)
11107
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11072
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11108
	else
11073
	else
11109
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11074
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11110
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11075
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11111
	intel_ring_emit(ring, MI_NOOP);
11076
	intel_ring_emit(ring, MI_NOOP);
11112
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11077
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11113
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11078
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11114
	intel_ring_emit(ring, fb->pitches[0]);
11079
	intel_ring_emit(ring, fb->pitches[0]);
11115
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11080
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11116
	intel_ring_emit(ring, MI_NOOP);
11081
	intel_ring_emit(ring, MI_NOOP);
11117
 
11082
 
11118
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11083
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11119
	return 0;
11084
	return 0;
11120
}
11085
}
11121
 
11086
 
11122
static int intel_gen4_queue_flip(struct drm_device *dev,
11087
static int intel_gen4_queue_flip(struct drm_device *dev,
11123
				 struct drm_crtc *crtc,
11088
				 struct drm_crtc *crtc,
11124
				 struct drm_framebuffer *fb,
11089
				 struct drm_framebuffer *fb,
11125
				 struct drm_i915_gem_object *obj,
11090
				 struct drm_i915_gem_object *obj,
11126
				 struct drm_i915_gem_request *req,
11091
				 struct drm_i915_gem_request *req,
11127
				 uint32_t flags)
11092
				 uint32_t flags)
11128
{
11093
{
11129
	struct intel_engine_cs *ring = req->ring;
11094
	struct intel_engine_cs *ring = req->ring;
11130
	struct drm_i915_private *dev_priv = dev->dev_private;
11095
	struct drm_i915_private *dev_priv = dev->dev_private;
11131
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11096
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11132
	uint32_t pf, pipesrc;
11097
	uint32_t pf, pipesrc;
11133
	int ret;
11098
	int ret;
11134
 
11099
 
11135
	ret = intel_ring_begin(req, 4);
11100
	ret = intel_ring_begin(req, 4);
11136
	if (ret)
11101
	if (ret)
11137
		return ret;
11102
		return ret;
11138
 
11103
 
11139
	/* i965+ uses the linear or tiled offsets from the
11104
	/* i965+ uses the linear or tiled offsets from the
11140
	 * Display Registers (which do not change across a page-flip)
11105
	 * Display Registers (which do not change across a page-flip)
11141
	 * so we need only reprogram the base address.
11106
	 * so we need only reprogram the base address.
11142
	 */
11107
	 */
11143
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11108
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11144
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11109
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11145
	intel_ring_emit(ring, fb->pitches[0]);
11110
	intel_ring_emit(ring, fb->pitches[0]);
11146
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
11111
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
11147
			obj->tiling_mode);
11112
			obj->tiling_mode);
11148
 
11113
 
11149
	/* XXX Enabling the panel-fitter across page-flip is so far
11114
	/* XXX Enabling the panel-fitter across page-flip is so far
11150
	 * untested on non-native modes, so ignore it for now.
11115
	 * untested on non-native modes, so ignore it for now.
11151
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11116
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11152
	 */
11117
	 */
11153
	pf = 0;
11118
	pf = 0;
11154
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11119
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11155
	intel_ring_emit(ring, pf | pipesrc);
11120
	intel_ring_emit(ring, pf | pipesrc);
11156
 
11121
 
11157
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11122
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11158
	return 0;
11123
	return 0;
11159
}
11124
}
11160
 
11125
 
11161
static int intel_gen6_queue_flip(struct drm_device *dev,
11126
static int intel_gen6_queue_flip(struct drm_device *dev,
11162
				 struct drm_crtc *crtc,
11127
				 struct drm_crtc *crtc,
11163
				 struct drm_framebuffer *fb,
11128
				 struct drm_framebuffer *fb,
11164
				 struct drm_i915_gem_object *obj,
11129
				 struct drm_i915_gem_object *obj,
11165
				 struct drm_i915_gem_request *req,
11130
				 struct drm_i915_gem_request *req,
11166
				 uint32_t flags)
11131
				 uint32_t flags)
11167
{
11132
{
11168
	struct intel_engine_cs *ring = req->ring;
11133
	struct intel_engine_cs *ring = req->ring;
11169
	struct drm_i915_private *dev_priv = dev->dev_private;
11134
	struct drm_i915_private *dev_priv = dev->dev_private;
11170
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11135
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11171
	uint32_t pf, pipesrc;
11136
	uint32_t pf, pipesrc;
11172
	int ret;
11137
	int ret;
11173
 
11138
 
11174
	ret = intel_ring_begin(req, 4);
11139
	ret = intel_ring_begin(req, 4);
11175
	if (ret)
11140
	if (ret)
11176
		return ret;
11141
		return ret;
11177
 
11142
 
11178
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11143
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11179
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11144
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11180
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
11145
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
11181
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11146
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11182
 
11147
 
11183
	/* Contrary to the suggestions in the documentation,
11148
	/* Contrary to the suggestions in the documentation,
11184
	 * "Enable Panel Fitter" does not seem to be required when page
11149
	 * "Enable Panel Fitter" does not seem to be required when page
11185
	 * flipping with a non-native mode, and worse causes a normal
11150
	 * flipping with a non-native mode, and worse causes a normal
11186
	 * modeset to fail.
11151
	 * modeset to fail.
11187
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11152
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11188
	 */
11153
	 */
11189
	pf = 0;
11154
	pf = 0;
11190
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11155
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11191
	intel_ring_emit(ring, pf | pipesrc);
11156
	intel_ring_emit(ring, pf | pipesrc);
11192
 
11157
 
11193
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11158
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11194
	return 0;
11159
	return 0;
11195
}
11160
}
11196
 
11161
 
11197
static int intel_gen7_queue_flip(struct drm_device *dev,
11162
static int intel_gen7_queue_flip(struct drm_device *dev,
11198
				 struct drm_crtc *crtc,
11163
				 struct drm_crtc *crtc,
11199
				 struct drm_framebuffer *fb,
11164
				 struct drm_framebuffer *fb,
11200
				 struct drm_i915_gem_object *obj,
11165
				 struct drm_i915_gem_object *obj,
11201
				 struct drm_i915_gem_request *req,
11166
				 struct drm_i915_gem_request *req,
11202
				 uint32_t flags)
11167
				 uint32_t flags)
11203
{
11168
{
11204
	struct intel_engine_cs *ring = req->ring;
11169
	struct intel_engine_cs *ring = req->ring;
11205
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11170
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11206
	uint32_t plane_bit = 0;
11171
	uint32_t plane_bit = 0;
11207
	int len, ret;
11172
	int len, ret;
11208
 
11173
 
11209
	switch (intel_crtc->plane) {
11174
	switch (intel_crtc->plane) {
11210
	case PLANE_A:
11175
	case PLANE_A:
11211
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11176
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11212
		break;
11177
		break;
11213
	case PLANE_B:
11178
	case PLANE_B:
11214
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11179
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11215
		break;
11180
		break;
11216
	case PLANE_C:
11181
	case PLANE_C:
11217
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11182
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11218
		break;
11183
		break;
11219
	default:
11184
	default:
11220
		WARN_ONCE(1, "unknown plane in flip command\n");
11185
		WARN_ONCE(1, "unknown plane in flip command\n");
11221
		return -ENODEV;
11186
		return -ENODEV;
11222
	}
11187
	}
11223
 
11188
 
11224
	len = 4;
11189
	len = 4;
11225
	if (ring->id == RCS) {
11190
	if (ring->id == RCS) {
11226
		len += 6;
11191
		len += 6;
11227
		/*
11192
		/*
11228
		 * On Gen 8, SRM is now taking an extra dword to accommodate
11193
		 * On Gen 8, SRM is now taking an extra dword to accommodate
11229
		 * 48bits addresses, and we need a NOOP for the batch size to
11194
		 * 48bits addresses, and we need a NOOP for the batch size to
11230
		 * stay even.
11195
		 * stay even.
11231
		 */
11196
		 */
11232
		if (IS_GEN8(dev))
11197
		if (IS_GEN8(dev))
11233
			len += 2;
11198
			len += 2;
11234
	}
11199
	}
11235
 
11200
 
11236
	/*
11201
	/*
11237
	 * BSpec MI_DISPLAY_FLIP for IVB:
11202
	 * BSpec MI_DISPLAY_FLIP for IVB:
11238
	 * "The full packet must be contained within the same cache line."
11203
	 * "The full packet must be contained within the same cache line."
11239
	 *
11204
	 *
11240
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11205
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11241
	 * cacheline, if we ever start emitting more commands before
11206
	 * cacheline, if we ever start emitting more commands before
11242
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11207
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11243
	 * then do the cacheline alignment, and finally emit the
11208
	 * then do the cacheline alignment, and finally emit the
11244
	 * MI_DISPLAY_FLIP.
11209
	 * MI_DISPLAY_FLIP.
11245
	 */
11210
	 */
11246
	ret = intel_ring_cacheline_align(req);
11211
	ret = intel_ring_cacheline_align(req);
11247
	if (ret)
11212
	if (ret)
11248
		return ret;
11213
		return ret;
11249
 
11214
 
11250
	ret = intel_ring_begin(req, len);
11215
	ret = intel_ring_begin(req, len);
11251
	if (ret)
11216
	if (ret)
11252
		return ret;
11217
		return ret;
11253
 
11218
 
11254
	/* Unmask the flip-done completion message. Note that the bspec says that
11219
	/* Unmask the flip-done completion message. Note that the bspec says that
11255
	 * we should do this for both the BCS and RCS, and that we must not unmask
11220
	 * we should do this for both the BCS and RCS, and that we must not unmask
11256
	 * more than one flip event at any time (or ensure that one flip message
11221
	 * more than one flip event at any time (or ensure that one flip message
11257
	 * can be sent by waiting for flip-done prior to queueing new flips).
11222
	 * can be sent by waiting for flip-done prior to queueing new flips).
11258
	 * Experimentation says that BCS works despite DERRMR masking all
11223
	 * Experimentation says that BCS works despite DERRMR masking all
11259
	 * flip-done completion events and that unmasking all planes at once
11224
	 * flip-done completion events and that unmasking all planes at once
11260
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11225
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11261
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11226
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11262
	 */
11227
	 */
11263
	if (ring->id == RCS) {
11228
	if (ring->id == RCS) {
11264
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11229
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11265
		intel_ring_emit_reg(ring, DERRMR);
11230
		intel_ring_emit_reg(ring, DERRMR);
11266
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11231
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11267
					DERRMR_PIPEB_PRI_FLIP_DONE |
11232
					DERRMR_PIPEB_PRI_FLIP_DONE |
11268
					DERRMR_PIPEC_PRI_FLIP_DONE));
11233
					DERRMR_PIPEC_PRI_FLIP_DONE));
11269
		if (IS_GEN8(dev))
11234
		if (IS_GEN8(dev))
11270
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11235
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11271
					      MI_SRM_LRM_GLOBAL_GTT);
11236
					      MI_SRM_LRM_GLOBAL_GTT);
11272
		else
11237
		else
11273
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11238
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11274
					      MI_SRM_LRM_GLOBAL_GTT);
11239
					      MI_SRM_LRM_GLOBAL_GTT);
11275
		intel_ring_emit_reg(ring, DERRMR);
11240
		intel_ring_emit_reg(ring, DERRMR);
11276
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11241
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11277
		if (IS_GEN8(dev)) {
11242
		if (IS_GEN8(dev)) {
11278
			intel_ring_emit(ring, 0);
11243
			intel_ring_emit(ring, 0);
11279
			intel_ring_emit(ring, MI_NOOP);
11244
			intel_ring_emit(ring, MI_NOOP);
11280
		}
11245
		}
11281
	}
11246
	}
11282
 
11247
 
11283
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11248
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11284
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
11249
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
11285
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11250
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11286
	intel_ring_emit(ring, (MI_NOOP));
11251
	intel_ring_emit(ring, (MI_NOOP));
11287
 
11252
 
11288
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11253
	intel_mark_page_flip_active(intel_crtc->unpin_work);
11289
	return 0;
11254
	return 0;
11290
}
11255
}
11291
 
11256
 
11292
static bool use_mmio_flip(struct intel_engine_cs *ring,
11257
static bool use_mmio_flip(struct intel_engine_cs *ring,
11293
			  struct drm_i915_gem_object *obj)
11258
			  struct drm_i915_gem_object *obj)
11294
{
11259
{
11295
	/*
11260
	/*
11296
	 * This is not being used for older platforms, because
11261
	 * This is not being used for older platforms, because
11297
	 * non-availability of flip done interrupt forces us to use
11262
	 * non-availability of flip done interrupt forces us to use
11298
	 * CS flips. Older platforms derive flip done using some clever
11263
	 * CS flips. Older platforms derive flip done using some clever
11299
	 * tricks involving the flip_pending status bits and vblank irqs.
11264
	 * tricks involving the flip_pending status bits and vblank irqs.
11300
	 * So using MMIO flips there would disrupt this mechanism.
11265
	 * So using MMIO flips there would disrupt this mechanism.
11301
	 */
11266
	 */
11302
 
11267
 
11303
	if (ring == NULL)
11268
	if (ring == NULL)
11304
		return true;
11269
		return true;
11305
 
11270
 
11306
	if (INTEL_INFO(ring->dev)->gen < 5)
11271
	if (INTEL_INFO(ring->dev)->gen < 5)
11307
		return false;
11272
		return false;
11308
 
11273
 
11309
	if (i915.use_mmio_flip < 0)
11274
	if (i915.use_mmio_flip < 0)
11310
		return false;
11275
		return false;
11311
	else if (i915.use_mmio_flip > 0)
11276
	else if (i915.use_mmio_flip > 0)
11312
		return true;
11277
		return true;
11313
	else if (i915.enable_execlists)
11278
	else if (i915.enable_execlists)
11314
		return true;
11279
		return true;
11315
//	else if (obj->base.dma_buf &&
11280
//	else if (obj->base.dma_buf &&
11316
//		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11281
//		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11317
//						       false))
11282
//						       false))
11318
//		return true;
11283
//		return true;
11319
	else
11284
	else
11320
		return ring != i915_gem_request_get_ring(obj->last_write_req);
11285
		return ring != i915_gem_request_get_ring(obj->last_write_req);
11321
}
11286
}
11322
 
11287
 
11323
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11288
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11324
			     unsigned int rotation,
11289
			     unsigned int rotation,
11325
			     struct intel_unpin_work *work)
11290
			     struct intel_unpin_work *work)
11326
{
11291
{
11327
	struct drm_device *dev = intel_crtc->base.dev;
11292
	struct drm_device *dev = intel_crtc->base.dev;
11328
	struct drm_i915_private *dev_priv = dev->dev_private;
11293
	struct drm_i915_private *dev_priv = dev->dev_private;
11329
	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11294
	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11330
	const enum pipe pipe = intel_crtc->pipe;
11295
	const enum pipe pipe = intel_crtc->pipe;
11331
	u32 ctl, stride, tile_height;
11296
	u32 ctl, stride, tile_height;
11332
 
11297
 
11333
	ctl = I915_READ(PLANE_CTL(pipe, 0));
11298
	ctl = I915_READ(PLANE_CTL(pipe, 0));
11334
	ctl &= ~PLANE_CTL_TILED_MASK;
11299
	ctl &= ~PLANE_CTL_TILED_MASK;
11335
	switch (fb->modifier[0]) {
11300
	switch (fb->modifier[0]) {
11336
	case DRM_FORMAT_MOD_NONE:
11301
	case DRM_FORMAT_MOD_NONE:
11337
		break;
11302
		break;
11338
	case I915_FORMAT_MOD_X_TILED:
11303
	case I915_FORMAT_MOD_X_TILED:
11339
		ctl |= PLANE_CTL_TILED_X;
11304
		ctl |= PLANE_CTL_TILED_X;
11340
		break;
11305
		break;
11341
	case I915_FORMAT_MOD_Y_TILED:
11306
	case I915_FORMAT_MOD_Y_TILED:
11342
		ctl |= PLANE_CTL_TILED_Y;
11307
		ctl |= PLANE_CTL_TILED_Y;
11343
		break;
11308
		break;
11344
	case I915_FORMAT_MOD_Yf_TILED:
11309
	case I915_FORMAT_MOD_Yf_TILED:
11345
		ctl |= PLANE_CTL_TILED_YF;
11310
		ctl |= PLANE_CTL_TILED_YF;
11346
		break;
11311
		break;
11347
	default:
11312
	default:
11348
		MISSING_CASE(fb->modifier[0]);
11313
		MISSING_CASE(fb->modifier[0]);
11349
	}
11314
	}
11350
 
11315
 
11351
	/*
11316
	/*
11352
	 * The stride is either expressed as a multiple of 64 bytes chunks for
11317
	 * The stride is either expressed as a multiple of 64 bytes chunks for
11353
	 * linear buffers or in number of tiles for tiled buffers.
11318
	 * linear buffers or in number of tiles for tiled buffers.
11354
	 */
11319
	 */
11355
	if (intel_rotation_90_or_270(rotation)) {
11320
	if (intel_rotation_90_or_270(rotation)) {
11356
		/* stride = Surface height in tiles */
11321
		/* stride = Surface height in tiles */
11357
		tile_height = intel_tile_height(dev, fb->pixel_format,
11322
		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11358
						fb->modifier[0], 0);
-
 
11359
		stride = DIV_ROUND_UP(fb->height, tile_height);
11323
		stride = DIV_ROUND_UP(fb->height, tile_height);
11360
	} else {
11324
	} else {
11361
	stride = fb->pitches[0] /
11325
		stride = fb->pitches[0] /
11362
		 intel_fb_stride_alignment(dev, fb->modifier[0],
11326
			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11363
					   fb->pixel_format);
11327
						  fb->pixel_format);
11364
	}
11328
	}
11365
 
11329
 
11366
	/*
11330
	/*
11367
	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11331
	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11368
	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11332
	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11369
	 */
11333
	 */
11370
	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11334
	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11371
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11335
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11372
 
11336
 
11373
	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11337
	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11374
	POSTING_READ(PLANE_SURF(pipe, 0));
11338
	POSTING_READ(PLANE_SURF(pipe, 0));
11375
}
11339
}
11376
 
11340
 
11377
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11341
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11378
			     struct intel_unpin_work *work)
11342
			     struct intel_unpin_work *work)
11379
{
11343
{
11380
	struct drm_device *dev = intel_crtc->base.dev;
11344
	struct drm_device *dev = intel_crtc->base.dev;
11381
	struct drm_i915_private *dev_priv = dev->dev_private;
11345
	struct drm_i915_private *dev_priv = dev->dev_private;
11382
	struct intel_framebuffer *intel_fb =
11346
	struct intel_framebuffer *intel_fb =
11383
		to_intel_framebuffer(intel_crtc->base.primary->fb);
11347
		to_intel_framebuffer(intel_crtc->base.primary->fb);
11384
	struct drm_i915_gem_object *obj = intel_fb->obj;
11348
	struct drm_i915_gem_object *obj = intel_fb->obj;
11385
	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11349
	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11386
	u32 dspcntr;
11350
	u32 dspcntr;
11387
 
11351
 
11388
	dspcntr = I915_READ(reg);
11352
	dspcntr = I915_READ(reg);
11389
 
11353
 
11390
	if (obj->tiling_mode != I915_TILING_NONE)
11354
	if (obj->tiling_mode != I915_TILING_NONE)
11391
		dspcntr |= DISPPLANE_TILED;
11355
		dspcntr |= DISPPLANE_TILED;
11392
	else
11356
	else
11393
		dspcntr &= ~DISPPLANE_TILED;
11357
		dspcntr &= ~DISPPLANE_TILED;
11394
 
11358
 
11395
	I915_WRITE(reg, dspcntr);
11359
	I915_WRITE(reg, dspcntr);
11396
 
11360
 
11397
	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11361
	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11398
	POSTING_READ(DSPSURF(intel_crtc->plane));
11362
	POSTING_READ(DSPSURF(intel_crtc->plane));
11399
}
11363
}
11400
 
11364
 
11401
/*
11365
/*
11402
 * XXX: This is the temporary way to update the plane registers until we get
11366
 * XXX: This is the temporary way to update the plane registers until we get
11403
 * around to using the usual plane update functions for MMIO flips
11367
 * around to using the usual plane update functions for MMIO flips
11404
 */
11368
 */
11405
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11369
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11406
{
11370
{
11407
	struct intel_crtc *crtc = mmio_flip->crtc;
11371
	struct intel_crtc *crtc = mmio_flip->crtc;
11408
	struct intel_unpin_work *work;
11372
	struct intel_unpin_work *work;
11409
 
11373
 
11410
	spin_lock_irq(&crtc->base.dev->event_lock);
11374
	spin_lock_irq(&crtc->base.dev->event_lock);
11411
	work = crtc->unpin_work;
11375
	work = crtc->unpin_work;
11412
	spin_unlock_irq(&crtc->base.dev->event_lock);
11376
	spin_unlock_irq(&crtc->base.dev->event_lock);
11413
	if (work == NULL)
11377
	if (work == NULL)
11414
		return;
11378
		return;
11415
 
11379
 
11416
	intel_mark_page_flip_active(work);
11380
	intel_mark_page_flip_active(work);
11417
 
11381
 
11418
	intel_pipe_update_start(crtc);
11382
	intel_pipe_update_start(crtc);
11419
 
11383
 
11420
	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11384
	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11421
		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11385
		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11422
	else
11386
	else
11423
		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11387
		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11424
		ilk_do_mmio_flip(crtc, work);
11388
		ilk_do_mmio_flip(crtc, work);
11425
 
11389
 
11426
	intel_pipe_update_end(crtc);
11390
	intel_pipe_update_end(crtc);
11427
}
11391
}
11428
 
11392
 
11429
static void intel_mmio_flip_work_func(struct work_struct *work)
11393
static void intel_mmio_flip_work_func(struct work_struct *work)
11430
{
11394
{
11431
	struct intel_mmio_flip *mmio_flip =
11395
	struct intel_mmio_flip *mmio_flip =
11432
		container_of(work, struct intel_mmio_flip, work);
11396
		container_of(work, struct intel_mmio_flip, work);
11433
	struct intel_framebuffer *intel_fb =
11397
	struct intel_framebuffer *intel_fb =
11434
		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11398
		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11435
	struct drm_i915_gem_object *obj = intel_fb->obj;
11399
	struct drm_i915_gem_object *obj = intel_fb->obj;
11436
 
11400
 
11437
	if (mmio_flip->req) {
11401
	if (mmio_flip->req) {
11438
		WARN_ON(__i915_wait_request(mmio_flip->req,
11402
		WARN_ON(__i915_wait_request(mmio_flip->req,
11439
					    mmio_flip->crtc->reset_counter,
11403
					    mmio_flip->crtc->reset_counter,
11440
					    false, NULL,
11404
					    false, NULL,
11441
					    &mmio_flip->i915->rps.mmioflips));
11405
					    &mmio_flip->i915->rps.mmioflips));
11442
		i915_gem_request_unreference__unlocked(mmio_flip->req);
11406
		i915_gem_request_unreference__unlocked(mmio_flip->req);
11443
	}
11407
	}
11444
 
11408
 
11445
	/* For framebuffer backed by dmabuf, wait for fence */
11409
	/* For framebuffer backed by dmabuf, wait for fence */
11446
//	if (obj->base.dma_buf)
11410
//	if (obj->base.dma_buf)
11447
//		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11411
//		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11448
//							    false, false,
11412
//							    false, false,
11449
//							    MAX_SCHEDULE_TIMEOUT) < 0);
11413
//							    MAX_SCHEDULE_TIMEOUT) < 0);
11450
 
11414
 
11451
	intel_do_mmio_flip(mmio_flip);
11415
	intel_do_mmio_flip(mmio_flip);
11452
	kfree(mmio_flip);
11416
	kfree(mmio_flip);
11453
}
11417
}
11454
 
11418
 
11455
static int intel_queue_mmio_flip(struct drm_device *dev,
11419
static int intel_queue_mmio_flip(struct drm_device *dev,
11456
				 struct drm_crtc *crtc,
11420
				 struct drm_crtc *crtc,
11457
				 struct drm_i915_gem_object *obj)
11421
				 struct drm_i915_gem_object *obj)
11458
{
11422
{
11459
	struct intel_mmio_flip *mmio_flip;
11423
	struct intel_mmio_flip *mmio_flip;
11460
 
11424
 
11461
	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11425
	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11462
	if (mmio_flip == NULL)
11426
	if (mmio_flip == NULL)
11463
		return -ENOMEM;
11427
		return -ENOMEM;
11464
 
11428
 
11465
	mmio_flip->i915 = to_i915(dev);
11429
	mmio_flip->i915 = to_i915(dev);
11466
	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11430
	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11467
	mmio_flip->crtc = to_intel_crtc(crtc);
11431
	mmio_flip->crtc = to_intel_crtc(crtc);
11468
	mmio_flip->rotation = crtc->primary->state->rotation;
11432
	mmio_flip->rotation = crtc->primary->state->rotation;
11469
 
11433
 
11470
	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11434
	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11471
	schedule_work(&mmio_flip->work);
11435
	schedule_work(&mmio_flip->work);
11472
 
11436
 
11473
	return 0;
11437
	return 0;
11474
}
11438
}
11475
 
11439
 
11476
static int intel_default_queue_flip(struct drm_device *dev,
11440
static int intel_default_queue_flip(struct drm_device *dev,
11477
				    struct drm_crtc *crtc,
11441
				    struct drm_crtc *crtc,
11478
				    struct drm_framebuffer *fb,
11442
				    struct drm_framebuffer *fb,
11479
				    struct drm_i915_gem_object *obj,
11443
				    struct drm_i915_gem_object *obj,
11480
				    struct drm_i915_gem_request *req,
11444
				    struct drm_i915_gem_request *req,
11481
				    uint32_t flags)
11445
				    uint32_t flags)
11482
{
11446
{
11483
	return -ENODEV;
11447
	return -ENODEV;
11484
}
11448
}
11485
 
11449
 
11486
static bool __intel_pageflip_stall_check(struct drm_device *dev,
11450
static bool __intel_pageflip_stall_check(struct drm_device *dev,
11487
					 struct drm_crtc *crtc)
11451
					 struct drm_crtc *crtc)
11488
{
11452
{
11489
	struct drm_i915_private *dev_priv = dev->dev_private;
11453
	struct drm_i915_private *dev_priv = dev->dev_private;
11490
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11454
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11491
	struct intel_unpin_work *work = intel_crtc->unpin_work;
11455
	struct intel_unpin_work *work = intel_crtc->unpin_work;
11492
	u32 addr;
11456
	u32 addr;
11493
 
11457
 
11494
	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11458
	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11495
		return true;
11459
		return true;
11496
 
11460
 
11497
	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11461
	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11498
		return false;
11462
		return false;
11499
 
11463
 
11500
	if (!work->enable_stall_check)
11464
	if (!work->enable_stall_check)
11501
		return false;
11465
		return false;
11502
 
11466
 
11503
	if (work->flip_ready_vblank == 0) {
11467
	if (work->flip_ready_vblank == 0) {
11504
		if (work->flip_queued_req &&
11468
		if (work->flip_queued_req &&
11505
		    !i915_gem_request_completed(work->flip_queued_req, true))
11469
		    !i915_gem_request_completed(work->flip_queued_req, true))
11506
			return false;
11470
			return false;
11507
 
11471
 
11508
		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11472
		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11509
	}
11473
	}
11510
 
11474
 
11511
	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11475
	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11512
		return false;
11476
		return false;
11513
 
11477
 
11514
	/* Potential stall - if we see that the flip has happened,
11478
	/* Potential stall - if we see that the flip has happened,
11515
	 * assume a missed interrupt. */
11479
	 * assume a missed interrupt. */
11516
	if (INTEL_INFO(dev)->gen >= 4)
11480
	if (INTEL_INFO(dev)->gen >= 4)
11517
		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11481
		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11518
	else
11482
	else
11519
		addr = I915_READ(DSPADDR(intel_crtc->plane));
11483
		addr = I915_READ(DSPADDR(intel_crtc->plane));
11520
 
11484
 
11521
	/* There is a potential issue here with a false positive after a flip
11485
	/* There is a potential issue here with a false positive after a flip
11522
	 * to the same address. We could address this by checking for a
11486
	 * to the same address. We could address this by checking for a
11523
	 * non-incrementing frame counter.
11487
	 * non-incrementing frame counter.
11524
	 */
11488
	 */
11525
	return addr == work->gtt_offset;
11489
	return addr == work->gtt_offset;
11526
}
11490
}
11527
 
11491
 
11528
void intel_check_page_flip(struct drm_device *dev, int pipe)
11492
void intel_check_page_flip(struct drm_device *dev, int pipe)
11529
{
11493
{
11530
	struct drm_i915_private *dev_priv = dev->dev_private;
11494
	struct drm_i915_private *dev_priv = dev->dev_private;
11531
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11495
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11532
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11496
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11533
	struct intel_unpin_work *work;
11497
	struct intel_unpin_work *work;
11534
 
11498
 
11535
	WARN_ON(!in_interrupt());
11499
	WARN_ON(!in_interrupt());
11536
 
11500
 
11537
	if (crtc == NULL)
11501
	if (crtc == NULL)
11538
		return;
11502
		return;
11539
 
11503
 
11540
	spin_lock(&dev->event_lock);
11504
	spin_lock(&dev->event_lock);
11541
	work = intel_crtc->unpin_work;
11505
	work = intel_crtc->unpin_work;
11542
	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11506
	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11543
		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11507
		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11544
			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11508
			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11545
		page_flip_completed(intel_crtc);
11509
		page_flip_completed(intel_crtc);
11546
		work = NULL;
11510
		work = NULL;
11547
	}
11511
	}
11548
	if (work != NULL &&
11512
	if (work != NULL &&
11549
	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11513
	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11550
		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11514
		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11551
	spin_unlock(&dev->event_lock);
11515
	spin_unlock(&dev->event_lock);
11552
}
11516
}
11553
 
11517
 
11554
static int intel_crtc_page_flip(struct drm_crtc *crtc,
11518
static int intel_crtc_page_flip(struct drm_crtc *crtc,
11555
				struct drm_framebuffer *fb,
11519
				struct drm_framebuffer *fb,
11556
				struct drm_pending_vblank_event *event,
11520
				struct drm_pending_vblank_event *event,
11557
				uint32_t page_flip_flags)
11521
				uint32_t page_flip_flags)
11558
{
11522
{
11559
	struct drm_device *dev = crtc->dev;
11523
	struct drm_device *dev = crtc->dev;
11560
	struct drm_i915_private *dev_priv = dev->dev_private;
11524
	struct drm_i915_private *dev_priv = dev->dev_private;
11561
	struct drm_framebuffer *old_fb = crtc->primary->fb;
11525
	struct drm_framebuffer *old_fb = crtc->primary->fb;
11562
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11526
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11563
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11527
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11564
	struct drm_plane *primary = crtc->primary;
11528
	struct drm_plane *primary = crtc->primary;
11565
	enum pipe pipe = intel_crtc->pipe;
11529
	enum pipe pipe = intel_crtc->pipe;
11566
	struct intel_unpin_work *work;
11530
	struct intel_unpin_work *work;
11567
	struct intel_engine_cs *ring;
11531
	struct intel_engine_cs *ring;
11568
	bool mmio_flip;
11532
	bool mmio_flip;
11569
	struct drm_i915_gem_request *request = NULL;
11533
	struct drm_i915_gem_request *request = NULL;
11570
	int ret;
11534
	int ret;
11571
 
11535
 
11572
	/*
11536
	/*
11573
	 * drm_mode_page_flip_ioctl() should already catch this, but double
11537
	 * drm_mode_page_flip_ioctl() should already catch this, but double
11574
	 * check to be safe.  In the future we may enable pageflipping from
11538
	 * check to be safe.  In the future we may enable pageflipping from
11575
	 * a disabled primary plane.
11539
	 * a disabled primary plane.
11576
	 */
11540
	 */
11577
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11541
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11578
		return -EBUSY;
11542
		return -EBUSY;
11579
 
11543
 
11580
	/* Can't change pixel format via MI display flips. */
11544
	/* Can't change pixel format via MI display flips. */
11581
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11545
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11582
		return -EINVAL;
11546
		return -EINVAL;
11583
 
11547
 
11584
	/*
11548
	/*
11585
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11549
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11586
	 * Note that pitch changes could also affect these register.
11550
	 * Note that pitch changes could also affect these register.
11587
	 */
11551
	 */
11588
	if (INTEL_INFO(dev)->gen > 3 &&
11552
	if (INTEL_INFO(dev)->gen > 3 &&
11589
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11553
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11590
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11554
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11591
		return -EINVAL;
11555
		return -EINVAL;
11592
 
11556
 
11593
	if (i915_terminally_wedged(&dev_priv->gpu_error))
11557
	if (i915_terminally_wedged(&dev_priv->gpu_error))
11594
		goto out_hang;
11558
		goto out_hang;
11595
 
11559
 
11596
	work = kzalloc(sizeof(*work), GFP_KERNEL);
11560
	work = kzalloc(sizeof(*work), GFP_KERNEL);
11597
	if (work == NULL)
11561
	if (work == NULL)
11598
		return -ENOMEM;
11562
		return -ENOMEM;
11599
 
11563
 
11600
	work->event = event;
11564
	work->event = event;
11601
	work->crtc = crtc;
11565
	work->crtc = crtc;
11602
	work->old_fb = old_fb;
11566
	work->old_fb = old_fb;
11603
	INIT_WORK(&work->work, intel_unpin_work_fn);
11567
	INIT_WORK(&work->work, intel_unpin_work_fn);
11604
 
11568
 
11605
	ret = drm_crtc_vblank_get(crtc);
11569
	ret = drm_crtc_vblank_get(crtc);
11606
	if (ret)
11570
	if (ret)
11607
		goto free_work;
11571
		goto free_work;
11608
 
11572
 
11609
	/* We borrow the event spin lock for protecting unpin_work */
11573
	/* We borrow the event spin lock for protecting unpin_work */
11610
	spin_lock_irq(&dev->event_lock);
11574
	spin_lock_irq(&dev->event_lock);
11611
	if (intel_crtc->unpin_work) {
11575
	if (intel_crtc->unpin_work) {
11612
		/* Before declaring the flip queue wedged, check if
11576
		/* Before declaring the flip queue wedged, check if
11613
		 * the hardware completed the operation behind our backs.
11577
		 * the hardware completed the operation behind our backs.
11614
		 */
11578
		 */
11615
		if (__intel_pageflip_stall_check(dev, crtc)) {
11579
		if (__intel_pageflip_stall_check(dev, crtc)) {
11616
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11580
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11617
			page_flip_completed(intel_crtc);
11581
			page_flip_completed(intel_crtc);
11618
		} else {
11582
		} else {
11619
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11583
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11620
			spin_unlock_irq(&dev->event_lock);
11584
			spin_unlock_irq(&dev->event_lock);
11621
 
11585
 
11622
			drm_crtc_vblank_put(crtc);
11586
			drm_crtc_vblank_put(crtc);
11623
			kfree(work);
11587
			kfree(work);
11624
			return -EBUSY;
11588
			return -EBUSY;
11625
		}
11589
		}
11626
	}
11590
	}
11627
	intel_crtc->unpin_work = work;
11591
	intel_crtc->unpin_work = work;
11628
	spin_unlock_irq(&dev->event_lock);
11592
	spin_unlock_irq(&dev->event_lock);
11629
 
11593
 
11630
//   if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11594
//   if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11631
//       flush_workqueue(dev_priv->wq);
11595
//       flush_workqueue(dev_priv->wq);
11632
 
11596
 
11633
	/* Reference the objects for the scheduled work. */
11597
	/* Reference the objects for the scheduled work. */
11634
	drm_framebuffer_reference(work->old_fb);
11598
	drm_framebuffer_reference(work->old_fb);
11635
	drm_gem_object_reference(&obj->base);
11599
	drm_gem_object_reference(&obj->base);
11636
 
11600
 
11637
	crtc->primary->fb = fb;
11601
	crtc->primary->fb = fb;
11638
	update_state_fb(crtc->primary);
11602
	update_state_fb(crtc->primary);
-
 
11603
	intel_fbc_pre_update(intel_crtc);
11639
 
11604
 
11640
	work->pending_flip_obj = obj;
11605
	work->pending_flip_obj = obj;
11641
 
11606
 
11642
	ret = i915_mutex_lock_interruptible(dev);
11607
	ret = i915_mutex_lock_interruptible(dev);
11643
	if (ret)
11608
	if (ret)
11644
		goto cleanup;
11609
		goto cleanup;
11645
 
11610
 
11646
	atomic_inc(&intel_crtc->unpin_work_count);
11611
	atomic_inc(&intel_crtc->unpin_work_count);
11647
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11612
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11648
 
11613
 
11649
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11614
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11650
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11615
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11651
 
11616
 
11652
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11617
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11653
		ring = &dev_priv->ring[BCS];
11618
		ring = &dev_priv->ring[BCS];
11654
		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11619
		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11655
			/* vlv: DISPLAY_FLIP fails to change tiling */
11620
			/* vlv: DISPLAY_FLIP fails to change tiling */
11656
			ring = NULL;
11621
			ring = NULL;
11657
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11622
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11658
		ring = &dev_priv->ring[BCS];
11623
		ring = &dev_priv->ring[BCS];
11659
	} else if (INTEL_INFO(dev)->gen >= 7) {
11624
	} else if (INTEL_INFO(dev)->gen >= 7) {
11660
		ring = i915_gem_request_get_ring(obj->last_write_req);
11625
		ring = i915_gem_request_get_ring(obj->last_write_req);
11661
		if (ring == NULL || ring->id != RCS)
11626
		if (ring == NULL || ring->id != RCS)
11662
			ring = &dev_priv->ring[BCS];
11627
			ring = &dev_priv->ring[BCS];
11663
	} else {
11628
	} else {
11664
		ring = &dev_priv->ring[RCS];
11629
		ring = &dev_priv->ring[RCS];
11665
	}
11630
	}
11666
 
11631
 
11667
	mmio_flip = use_mmio_flip(ring, obj);
11632
	mmio_flip = use_mmio_flip(ring, obj);
11668
 
11633
 
11669
	/* When using CS flips, we want to emit semaphores between rings.
11634
	/* When using CS flips, we want to emit semaphores between rings.
11670
	 * However, when using mmio flips we will create a task to do the
11635
	 * However, when using mmio flips we will create a task to do the
11671
	 * synchronisation, so all we want here is to pin the framebuffer
11636
	 * synchronisation, so all we want here is to pin the framebuffer
11672
	 * into the display plane and skip any waits.
11637
	 * into the display plane and skip any waits.
11673
	 */
11638
	 */
11674
	if (!mmio_flip) {
11639
	if (!mmio_flip) {
11675
		ret = i915_gem_object_sync(obj, ring, &request);
11640
		ret = i915_gem_object_sync(obj, ring, &request);
11676
		if (ret)
11641
		if (ret)
11677
			goto cleanup_pending;
11642
			goto cleanup_pending;
11678
	}
11643
	}
11679
 
11644
 
11680
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11645
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11681
					 crtc->primary->state);
11646
					 crtc->primary->state);
11682
	if (ret)
11647
	if (ret)
11683
		goto cleanup_pending;
11648
		goto cleanup_pending;
11684
 
11649
 
11685
	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11650
	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11686
						  obj, 0);
11651
						  obj, 0);
11687
	work->gtt_offset += intel_crtc->dspaddr_offset;
11652
	work->gtt_offset += intel_crtc->dspaddr_offset;
11688
 
11653
 
11689
	if (mmio_flip) {
11654
	if (mmio_flip) {
11690
		ret = intel_queue_mmio_flip(dev, crtc, obj);
11655
		ret = intel_queue_mmio_flip(dev, crtc, obj);
11691
		if (ret)
11656
		if (ret)
11692
			goto cleanup_unpin;
11657
			goto cleanup_unpin;
11693
 
11658
 
11694
		i915_gem_request_assign(&work->flip_queued_req,
11659
		i915_gem_request_assign(&work->flip_queued_req,
11695
					obj->last_write_req);
11660
					obj->last_write_req);
11696
	} else {
11661
	} else {
11697
		if (!request) {
11662
		if (!request) {
11698
			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11663
			request = i915_gem_request_alloc(ring, NULL);
11699
			if (ret)
11664
			if (IS_ERR(request)) {
-
 
11665
				ret = PTR_ERR(request);
11700
				goto cleanup_unpin;
11666
				goto cleanup_unpin;
11701
		}
11667
			}
-
 
11668
		}
11702
 
11669
 
11703
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11670
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11704
						   page_flip_flags);
11671
						   page_flip_flags);
11705
		if (ret)
11672
		if (ret)
11706
			goto cleanup_unpin;
11673
			goto cleanup_unpin;
11707
 
11674
 
11708
		i915_gem_request_assign(&work->flip_queued_req, request);
11675
		i915_gem_request_assign(&work->flip_queued_req, request);
11709
	}
11676
	}
11710
 
11677
 
11711
	if (request)
11678
	if (request)
11712
		i915_add_request_no_flush(request);
11679
		i915_add_request_no_flush(request);
11713
 
11680
 
11714
	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11681
	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11715
	work->enable_stall_check = true;
11682
	work->enable_stall_check = true;
11716
 
11683
 
11717
	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11684
	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11718
			  to_intel_plane(primary)->frontbuffer_bit);
11685
			  to_intel_plane(primary)->frontbuffer_bit);
11719
	mutex_unlock(&dev->struct_mutex);
11686
	mutex_unlock(&dev->struct_mutex);
11720
 
-
 
11721
	intel_fbc_deactivate(intel_crtc);
11687
 
11722
	intel_frontbuffer_flip_prepare(dev,
11688
	intel_frontbuffer_flip_prepare(dev,
11723
				       to_intel_plane(primary)->frontbuffer_bit);
11689
				       to_intel_plane(primary)->frontbuffer_bit);
11724
 
11690
 
11725
	trace_i915_flip_request(intel_crtc->plane, obj);
11691
	trace_i915_flip_request(intel_crtc->plane, obj);
11726
 
11692
 
11727
	return 0;
11693
	return 0;
11728
 
11694
 
11729
cleanup_unpin:
11695
cleanup_unpin:
11730
	intel_unpin_fb_obj(fb, crtc->primary->state);
11696
	intel_unpin_fb_obj(fb, crtc->primary->state);
11731
cleanup_pending:
11697
cleanup_pending:
11732
	if (request)
11698
	if (!IS_ERR_OR_NULL(request))
11733
		i915_gem_request_cancel(request);
11699
		i915_gem_request_cancel(request);
11734
	atomic_dec(&intel_crtc->unpin_work_count);
11700
	atomic_dec(&intel_crtc->unpin_work_count);
11735
	mutex_unlock(&dev->struct_mutex);
11701
	mutex_unlock(&dev->struct_mutex);
11736
cleanup:
11702
cleanup:
11737
	crtc->primary->fb = old_fb;
11703
	crtc->primary->fb = old_fb;
11738
	update_state_fb(crtc->primary);
11704
	update_state_fb(crtc->primary);
11739
 
11705
 
11740
	drm_gem_object_unreference_unlocked(&obj->base);
11706
	drm_gem_object_unreference_unlocked(&obj->base);
11741
	drm_framebuffer_unreference(work->old_fb);
11707
	drm_framebuffer_unreference(work->old_fb);
11742
 
11708
 
11743
	spin_lock_irq(&dev->event_lock);
11709
	spin_lock_irq(&dev->event_lock);
11744
	intel_crtc->unpin_work = NULL;
11710
	intel_crtc->unpin_work = NULL;
11745
	spin_unlock_irq(&dev->event_lock);
11711
	spin_unlock_irq(&dev->event_lock);
11746
 
11712
 
11747
	drm_crtc_vblank_put(crtc);
11713
	drm_crtc_vblank_put(crtc);
11748
free_work:
11714
free_work:
11749
	kfree(work);
11715
	kfree(work);
11750
 
11716
 
11751
	if (ret == -EIO) {
11717
	if (ret == -EIO) {
11752
		struct drm_atomic_state *state;
11718
		struct drm_atomic_state *state;
11753
		struct drm_plane_state *plane_state;
11719
		struct drm_plane_state *plane_state;
11754
 
11720
 
11755
out_hang:
11721
out_hang:
11756
		state = drm_atomic_state_alloc(dev);
11722
		state = drm_atomic_state_alloc(dev);
11757
		if (!state)
11723
		if (!state)
11758
			return -ENOMEM;
11724
			return -ENOMEM;
11759
		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11725
		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11760
 
11726
 
11761
retry:
11727
retry:
11762
		plane_state = drm_atomic_get_plane_state(state, primary);
11728
		plane_state = drm_atomic_get_plane_state(state, primary);
11763
		ret = PTR_ERR_OR_ZERO(plane_state);
11729
		ret = PTR_ERR_OR_ZERO(plane_state);
11764
		if (!ret) {
11730
		if (!ret) {
11765
			drm_atomic_set_fb_for_plane(plane_state, fb);
11731
			drm_atomic_set_fb_for_plane(plane_state, fb);
11766
 
11732
 
11767
			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11733
			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11768
			if (!ret)
11734
			if (!ret)
11769
				ret = drm_atomic_commit(state);
11735
				ret = drm_atomic_commit(state);
11770
		}
11736
		}
11771
 
11737
 
11772
		if (ret == -EDEADLK) {
11738
		if (ret == -EDEADLK) {
11773
			drm_modeset_backoff(state->acquire_ctx);
11739
			drm_modeset_backoff(state->acquire_ctx);
11774
			drm_atomic_state_clear(state);
11740
			drm_atomic_state_clear(state);
11775
			goto retry;
11741
			goto retry;
11776
		}
11742
		}
11777
 
11743
 
11778
		if (ret)
11744
		if (ret)
11779
			drm_atomic_state_free(state);
11745
			drm_atomic_state_free(state);
11780
 
11746
 
11781
		if (ret == 0 && event) {
11747
		if (ret == 0 && event) {
11782
			spin_lock_irq(&dev->event_lock);
11748
			spin_lock_irq(&dev->event_lock);
11783
			drm_send_vblank_event(dev, pipe, event);
11749
			drm_send_vblank_event(dev, pipe, event);
11784
			spin_unlock_irq(&dev->event_lock);
11750
			spin_unlock_irq(&dev->event_lock);
11785
		}
11751
		}
11786
	}
11752
	}
11787
	return ret;
11753
	return ret;
11788
}
11754
}
11789
 
11755
 
11790
 
11756
 
11791
/**
11757
/**
11792
 * intel_wm_need_update - Check whether watermarks need updating
11758
 * intel_wm_need_update - Check whether watermarks need updating
11793
 * @plane: drm plane
11759
 * @plane: drm plane
11794
 * @state: new plane state
11760
 * @state: new plane state
11795
 *
11761
 *
11796
 * Check current plane state versus the new one to determine whether
11762
 * Check current plane state versus the new one to determine whether
11797
 * watermarks need to be recalculated.
11763
 * watermarks need to be recalculated.
11798
 *
11764
 *
11799
 * Returns true or false.
11765
 * Returns true or false.
11800
 */
11766
 */
11801
static bool intel_wm_need_update(struct drm_plane *plane,
11767
static bool intel_wm_need_update(struct drm_plane *plane,
11802
				 struct drm_plane_state *state)
11768
				 struct drm_plane_state *state)
11803
{
11769
{
11804
	struct intel_plane_state *new = to_intel_plane_state(state);
11770
	struct intel_plane_state *new = to_intel_plane_state(state);
11805
	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11771
	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11806
 
11772
 
11807
	/* Update watermarks on tiling or size changes. */
11773
	/* Update watermarks on tiling or size changes. */
11808
	if (new->visible != cur->visible)
11774
	if (new->visible != cur->visible)
11809
		return true;
11775
		return true;
11810
 
11776
 
11811
	if (!cur->base.fb || !new->base.fb)
11777
	if (!cur->base.fb || !new->base.fb)
11812
		return false;
11778
		return false;
11813
 
11779
 
11814
	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11780
	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11815
	    cur->base.rotation != new->base.rotation ||
11781
	    cur->base.rotation != new->base.rotation ||
11816
	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11782
	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11817
	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11783
	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11818
	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11784
	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11819
	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11785
	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11820
		return true;
11786
		return true;
11821
 
11787
 
11822
	return false;
11788
	return false;
11823
}
11789
}
11824
 
11790
 
11825
static bool needs_scaling(struct intel_plane_state *state)
11791
static bool needs_scaling(struct intel_plane_state *state)
11826
{
11792
{
11827
	int src_w = drm_rect_width(&state->src) >> 16;
11793
	int src_w = drm_rect_width(&state->src) >> 16;
11828
	int src_h = drm_rect_height(&state->src) >> 16;
11794
	int src_h = drm_rect_height(&state->src) >> 16;
11829
	int dst_w = drm_rect_width(&state->dst);
11795
	int dst_w = drm_rect_width(&state->dst);
11830
	int dst_h = drm_rect_height(&state->dst);
11796
	int dst_h = drm_rect_height(&state->dst);
11831
 
11797
 
11832
	return (src_w != dst_w || src_h != dst_h);
11798
	return (src_w != dst_w || src_h != dst_h);
11833
}
11799
}
11834
 
11800
 
11835
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11801
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11836
				    struct drm_plane_state *plane_state)
11802
				    struct drm_plane_state *plane_state)
11837
{
11803
{
11838
	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11804
	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11839
	struct drm_crtc *crtc = crtc_state->crtc;
11805
	struct drm_crtc *crtc = crtc_state->crtc;
11840
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11806
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11841
	struct drm_plane *plane = plane_state->plane;
11807
	struct drm_plane *plane = plane_state->plane;
11842
	struct drm_device *dev = crtc->dev;
11808
	struct drm_device *dev = crtc->dev;
11843
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
11844
	struct intel_plane_state *old_plane_state =
11809
	struct intel_plane_state *old_plane_state =
11845
		to_intel_plane_state(plane->state);
11810
		to_intel_plane_state(plane->state);
11846
	int idx = intel_crtc->base.base.id, ret;
11811
	int idx = intel_crtc->base.base.id, ret;
11847
	int i = drm_plane_index(plane);
-
 
11848
	bool mode_changed = needs_modeset(crtc_state);
11812
	bool mode_changed = needs_modeset(crtc_state);
11849
	bool was_crtc_enabled = crtc->state->active;
11813
	bool was_crtc_enabled = crtc->state->active;
11850
	bool is_crtc_enabled = crtc_state->active;
11814
	bool is_crtc_enabled = crtc_state->active;
11851
	bool turn_off, turn_on, visible, was_visible;
11815
	bool turn_off, turn_on, visible, was_visible;
11852
	struct drm_framebuffer *fb = plane_state->fb;
11816
	struct drm_framebuffer *fb = plane_state->fb;
11853
 
11817
 
11854
	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11818
	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11855
	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11819
	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11856
		ret = skl_update_scaler_plane(
11820
		ret = skl_update_scaler_plane(
11857
			to_intel_crtc_state(crtc_state),
11821
			to_intel_crtc_state(crtc_state),
11858
			to_intel_plane_state(plane_state));
11822
			to_intel_plane_state(plane_state));
11859
		if (ret)
11823
		if (ret)
11860
			return ret;
11824
			return ret;
11861
	}
11825
	}
11862
 
11826
 
11863
	was_visible = old_plane_state->visible;
11827
	was_visible = old_plane_state->visible;
11864
	visible = to_intel_plane_state(plane_state)->visible;
11828
	visible = to_intel_plane_state(plane_state)->visible;
11865
 
11829
 
11866
	if (!was_crtc_enabled && WARN_ON(was_visible))
11830
	if (!was_crtc_enabled && WARN_ON(was_visible))
11867
		was_visible = false;
11831
		was_visible = false;
-
 
11832
 
11868
 
11833
	/*
-
 
11834
	 * Visibility is calculated as if the crtc was on, but
-
 
11835
	 * after scaler setup everything depends on it being off
-
 
11836
	 * when the crtc isn't active.
11869
	if (!is_crtc_enabled && WARN_ON(visible))
11837
	 */
-
 
11838
	if (!is_crtc_enabled)
11870
		visible = false;
11839
		to_intel_plane_state(plane_state)->visible = visible = false;
11871
 
11840
 
11872
	if (!was_visible && !visible)
11841
	if (!was_visible && !visible)
11873
		return 0;
11842
		return 0;
-
 
11843
 
-
 
11844
	if (fb != old_plane_state->base.fb)
-
 
11845
		pipe_config->fb_changed = true;
11874
 
11846
 
11875
	turn_off = was_visible && (!visible || mode_changed);
11847
	turn_off = was_visible && (!visible || mode_changed);
11876
	turn_on = visible && (!was_visible || mode_changed);
11848
	turn_on = visible && (!was_visible || mode_changed);
11877
 
11849
 
11878
	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11850
	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11879
			 plane->base.id, fb ? fb->base.id : -1);
11851
			 plane->base.id, fb ? fb->base.id : -1);
11880
 
11852
 
11881
	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11853
	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11882
			 plane->base.id, was_visible, visible,
11854
			 plane->base.id, was_visible, visible,
11883
			 turn_off, turn_on, mode_changed);
11855
			 turn_off, turn_on, mode_changed);
11884
 
11856
 
11885
	if (turn_on) {
11857
	if (turn_on) {
11886
		pipe_config->update_wm_pre = true;
11858
		pipe_config->update_wm_pre = true;
11887
 
11859
 
11888
		/* must disable cxsr around plane enable/disable */
11860
		/* must disable cxsr around plane enable/disable */
11889
		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11861
		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11890
			pipe_config->disable_cxsr = true;
11862
			pipe_config->disable_cxsr = true;
11891
	} else if (turn_off) {
11863
	} else if (turn_off) {
11892
		pipe_config->update_wm_post = true;
11864
		pipe_config->update_wm_post = true;
11893
 
11865
 
11894
		/* must disable cxsr around plane enable/disable */
11866
		/* must disable cxsr around plane enable/disable */
11895
		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11867
		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11896
			if (is_crtc_enabled)
-
 
11897
				intel_crtc->atomic.wait_vblank = true;
-
 
11898
			pipe_config->disable_cxsr = true;
11868
			pipe_config->disable_cxsr = true;
11899
		}
-
 
11900
	} else if (intel_wm_need_update(plane, plane_state)) {
11869
	} else if (intel_wm_need_update(plane, plane_state)) {
11901
		/* FIXME bollocks */
11870
		/* FIXME bollocks */
11902
		pipe_config->update_wm_pre = true;
11871
		pipe_config->update_wm_pre = true;
11903
		pipe_config->update_wm_post = true;
11872
		pipe_config->update_wm_post = true;
11904
	}
11873
	}
11905
 
11874
 
11906
	if (visible || was_visible)
11875
	if (visible || was_visible)
11907
		intel_crtc->atomic.fb_bits |=
11876
		intel_crtc->atomic.fb_bits |=
11908
			to_intel_plane(plane)->frontbuffer_bit;
11877
			to_intel_plane(plane)->frontbuffer_bit;
11909
 
11878
 
11910
	switch (plane->type) {
11879
	switch (plane->type) {
11911
	case DRM_PLANE_TYPE_PRIMARY:
11880
	case DRM_PLANE_TYPE_PRIMARY:
11912
		intel_crtc->atomic.pre_disable_primary = turn_off;
-
 
11913
		intel_crtc->atomic.post_enable_primary = turn_on;
11881
		intel_crtc->atomic.post_enable_primary = turn_on;
-
 
11882
		intel_crtc->atomic.update_fbc = true;
11914
 
-
 
11915
		if (turn_off) {
-
 
11916
			/*
-
 
11917
			 * FIXME: Actually if we will still have any other
-
 
11918
			 * plane enabled on the pipe we could let IPS enabled
-
 
11919
			 * still, but for now lets consider that when we make
-
 
11920
			 * primary invisible by setting DSPCNTR to 0 on
-
 
11921
			 * update_primary_plane function IPS needs to be
-
 
11922
			 * disable.
-
 
11923
			 */
-
 
11924
			intel_crtc->atomic.disable_ips = true;
-
 
11925
 
-
 
11926
			intel_crtc->atomic.disable_fbc = true;
-
 
11927
		}
-
 
11928
 
-
 
11929
		/*
-
 
11930
		 * FBC does not work on some platforms for rotated
-
 
11931
		 * planes, so disable it when rotation is not 0 and
-
 
11932
		 * update it when rotation is set back to 0.
-
 
11933
		 *
-
 
11934
		 * FIXME: This is redundant with the fbc update done in
-
 
11935
		 * the primary plane enable function except that that
-
 
11936
		 * one is done too late. We eventually need to unify
-
 
11937
		 * this.
-
 
11938
		 */
-
 
11939
 
-
 
11940
		if (visible &&
-
 
11941
		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-
 
11942
		    dev_priv->fbc.crtc == intel_crtc &&
-
 
11943
		    plane_state->rotation != BIT(DRM_ROTATE_0))
-
 
11944
			intel_crtc->atomic.disable_fbc = true;
-
 
11945
 
-
 
11946
		/*
-
 
11947
		 * BDW signals flip done immediately if the plane
-
 
11948
		 * is disabled, even if the plane enable is already
-
 
11949
		 * armed to occur at the next vblank :(
-
 
11950
		 */
-
 
11951
		if (turn_on && IS_BROADWELL(dev))
-
 
11952
			intel_crtc->atomic.wait_vblank = true;
-
 
11953
 
-
 
11954
		intel_crtc->atomic.update_fbc |= visible || mode_changed;
11883
 
11955
		break;
11884
		break;
11956
	case DRM_PLANE_TYPE_CURSOR:
11885
	case DRM_PLANE_TYPE_CURSOR:
11957
		break;
11886
		break;
11958
	case DRM_PLANE_TYPE_OVERLAY:
11887
	case DRM_PLANE_TYPE_OVERLAY:
11959
		/*
11888
		/*
11960
		 * WaCxSRDisabledForSpriteScaling:ivb
11889
		 * WaCxSRDisabledForSpriteScaling:ivb
11961
		 *
11890
		 *
11962
		 * cstate->update_wm was already set above, so this flag will
11891
		 * cstate->update_wm was already set above, so this flag will
11963
		 * take effect when we commit and program watermarks.
11892
		 * take effect when we commit and program watermarks.
11964
		 */
11893
		 */
11965
		if (IS_IVYBRIDGE(dev) &&
11894
		if (IS_IVYBRIDGE(dev) &&
11966
		    needs_scaling(to_intel_plane_state(plane_state)) &&
11895
		    needs_scaling(to_intel_plane_state(plane_state)) &&
11967
		    !needs_scaling(old_plane_state)) {
11896
		    !needs_scaling(old_plane_state))
11968
			to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
11897
			pipe_config->disable_lp_wm = true;
11969
		} else if (turn_off && !mode_changed) {
-
 
11970
			intel_crtc->atomic.wait_vblank = true;
-
 
11971
			intel_crtc->atomic.update_sprite_watermarks |=
-
 
11972
				1 << i;
-
 
11973
		}
-
 
11974
 
11898
 
11975
		break;
11899
		break;
11976
	}
11900
	}
11977
	return 0;
11901
	return 0;
11978
}
11902
}
11979
 
11903
 
11980
static bool encoders_cloneable(const struct intel_encoder *a,
11904
static bool encoders_cloneable(const struct intel_encoder *a,
11981
			       const struct intel_encoder *b)
11905
			       const struct intel_encoder *b)
11982
{
11906
{
11983
	/* masks could be asymmetric, so check both ways */
11907
	/* masks could be asymmetric, so check both ways */
11984
	return a == b || (a->cloneable & (1 << b->type) &&
11908
	return a == b || (a->cloneable & (1 << b->type) &&
11985
			  b->cloneable & (1 << a->type));
11909
			  b->cloneable & (1 << a->type));
11986
}
11910
}
11987
 
11911
 
11988
static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11912
static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11989
					 struct intel_crtc *crtc,
11913
					 struct intel_crtc *crtc,
11990
					 struct intel_encoder *encoder)
11914
					 struct intel_encoder *encoder)
11991
{
11915
{
11992
	struct intel_encoder *source_encoder;
11916
	struct intel_encoder *source_encoder;
11993
	struct drm_connector *connector;
11917
	struct drm_connector *connector;
11994
	struct drm_connector_state *connector_state;
11918
	struct drm_connector_state *connector_state;
11995
	int i;
11919
	int i;
11996
 
11920
 
11997
	for_each_connector_in_state(state, connector, connector_state, i) {
11921
	for_each_connector_in_state(state, connector, connector_state, i) {
11998
		if (connector_state->crtc != &crtc->base)
11922
		if (connector_state->crtc != &crtc->base)
11999
			continue;
11923
			continue;
12000
 
11924
 
12001
		source_encoder =
11925
		source_encoder =
12002
			to_intel_encoder(connector_state->best_encoder);
11926
			to_intel_encoder(connector_state->best_encoder);
12003
		if (!encoders_cloneable(encoder, source_encoder))
11927
		if (!encoders_cloneable(encoder, source_encoder))
12004
			return false;
11928
			return false;
12005
	}
11929
	}
12006
 
11930
 
12007
	return true;
11931
	return true;
12008
}
11932
}
12009
 
11933
 
12010
static bool check_encoder_cloning(struct drm_atomic_state *state,
11934
static bool check_encoder_cloning(struct drm_atomic_state *state,
12011
				  struct intel_crtc *crtc)
11935
				  struct intel_crtc *crtc)
12012
{
11936
{
12013
	struct intel_encoder *encoder;
11937
	struct intel_encoder *encoder;
12014
	struct drm_connector *connector;
11938
	struct drm_connector *connector;
12015
	struct drm_connector_state *connector_state;
11939
	struct drm_connector_state *connector_state;
12016
	int i;
11940
	int i;
12017
 
11941
 
12018
	for_each_connector_in_state(state, connector, connector_state, i) {
11942
	for_each_connector_in_state(state, connector, connector_state, i) {
12019
		if (connector_state->crtc != &crtc->base)
11943
		if (connector_state->crtc != &crtc->base)
12020
			continue;
11944
			continue;
12021
 
11945
 
12022
		encoder = to_intel_encoder(connector_state->best_encoder);
11946
		encoder = to_intel_encoder(connector_state->best_encoder);
12023
		if (!check_single_encoder_cloning(state, crtc, encoder))
11947
		if (!check_single_encoder_cloning(state, crtc, encoder))
12024
			return false;
11948
			return false;
12025
	}
11949
	}
12026
 
11950
 
12027
	return true;
11951
	return true;
12028
}
11952
}
12029
 
11953
 
12030
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11954
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12031
				   struct drm_crtc_state *crtc_state)
11955
				   struct drm_crtc_state *crtc_state)
12032
{
11956
{
12033
	struct drm_device *dev = crtc->dev;
11957
	struct drm_device *dev = crtc->dev;
12034
	struct drm_i915_private *dev_priv = dev->dev_private;
11958
	struct drm_i915_private *dev_priv = dev->dev_private;
12035
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11959
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12036
	struct intel_crtc_state *pipe_config =
11960
	struct intel_crtc_state *pipe_config =
12037
		to_intel_crtc_state(crtc_state);
11961
		to_intel_crtc_state(crtc_state);
12038
	struct drm_atomic_state *state = crtc_state->state;
11962
	struct drm_atomic_state *state = crtc_state->state;
12039
	int ret;
11963
	int ret;
12040
	bool mode_changed = needs_modeset(crtc_state);
11964
	bool mode_changed = needs_modeset(crtc_state);
12041
 
11965
 
12042
	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11966
	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12043
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11967
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12044
		return -EINVAL;
11968
		return -EINVAL;
12045
	}
11969
	}
12046
 
11970
 
12047
	if (mode_changed && !crtc_state->active)
11971
	if (mode_changed && !crtc_state->active)
12048
		pipe_config->update_wm_post = true;
11972
		pipe_config->update_wm_post = true;
12049
 
11973
 
12050
	if (mode_changed && crtc_state->enable &&
11974
	if (mode_changed && crtc_state->enable &&
12051
	    dev_priv->display.crtc_compute_clock &&
11975
	    dev_priv->display.crtc_compute_clock &&
12052
	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11976
	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
12053
		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11977
		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12054
							   pipe_config);
11978
							   pipe_config);
12055
		if (ret)
11979
		if (ret)
12056
			return ret;
11980
			return ret;
12057
	}
11981
	}
12058
 
11982
 
12059
	ret = 0;
11983
	ret = 0;
12060
	if (dev_priv->display.compute_pipe_wm) {
11984
	if (dev_priv->display.compute_pipe_wm) {
12061
		ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
11985
		ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
12062
		if (ret)
11986
		if (ret)
12063
			return ret;
11987
			return ret;
12064
	}
11988
	}
12065
 
11989
 
12066
	if (INTEL_INFO(dev)->gen >= 9) {
11990
	if (INTEL_INFO(dev)->gen >= 9) {
12067
		if (mode_changed)
11991
		if (mode_changed)
12068
			ret = skl_update_scaler_crtc(pipe_config);
11992
			ret = skl_update_scaler_crtc(pipe_config);
12069
 
11993
 
12070
		if (!ret)
11994
		if (!ret)
12071
			ret = intel_atomic_setup_scalers(dev, intel_crtc,
11995
			ret = intel_atomic_setup_scalers(dev, intel_crtc,
12072
							 pipe_config);
11996
							 pipe_config);
12073
	}
11997
	}
12074
 
11998
 
12075
	return ret;
11999
	return ret;
12076
}
12000
}
12077
 
12001
 
12078
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12002
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12079
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12003
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12080
	.load_lut = intel_crtc_load_lut,
12004
	.load_lut = intel_crtc_load_lut,
12081
	.atomic_begin = intel_begin_crtc_commit,
12005
	.atomic_begin = intel_begin_crtc_commit,
12082
	.atomic_flush = intel_finish_crtc_commit,
12006
	.atomic_flush = intel_finish_crtc_commit,
12083
	.atomic_check = intel_crtc_atomic_check,
12007
	.atomic_check = intel_crtc_atomic_check,
12084
};
12008
};
12085
 
12009
 
12086
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12010
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12087
{
12011
{
12088
	struct intel_connector *connector;
12012
	struct intel_connector *connector;
12089
 
12013
 
12090
	for_each_intel_connector(dev, connector) {
12014
	for_each_intel_connector(dev, connector) {
12091
		if (connector->base.encoder) {
12015
		if (connector->base.encoder) {
12092
			connector->base.state->best_encoder =
12016
			connector->base.state->best_encoder =
12093
				connector->base.encoder;
12017
				connector->base.encoder;
12094
			connector->base.state->crtc =
12018
			connector->base.state->crtc =
12095
				connector->base.encoder->crtc;
12019
				connector->base.encoder->crtc;
12096
		} else {
12020
		} else {
12097
			connector->base.state->best_encoder = NULL;
12021
			connector->base.state->best_encoder = NULL;
12098
			connector->base.state->crtc = NULL;
12022
			connector->base.state->crtc = NULL;
12099
		}
12023
		}
12100
	}
12024
	}
12101
}
12025
}
12102
 
12026
 
12103
static void
12027
static void
12104
connected_sink_compute_bpp(struct intel_connector *connector,
12028
connected_sink_compute_bpp(struct intel_connector *connector,
12105
			   struct intel_crtc_state *pipe_config)
12029
			   struct intel_crtc_state *pipe_config)
12106
{
12030
{
12107
	int bpp = pipe_config->pipe_bpp;
12031
	int bpp = pipe_config->pipe_bpp;
12108
 
12032
 
12109
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12033
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12110
		connector->base.base.id,
12034
		connector->base.base.id,
12111
		connector->base.name);
12035
		connector->base.name);
12112
 
12036
 
12113
	/* Don't use an invalid EDID bpc value */
12037
	/* Don't use an invalid EDID bpc value */
12114
	if (connector->base.display_info.bpc &&
12038
	if (connector->base.display_info.bpc &&
12115
	    connector->base.display_info.bpc * 3 < bpp) {
12039
	    connector->base.display_info.bpc * 3 < bpp) {
12116
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12040
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12117
			      bpp, connector->base.display_info.bpc*3);
12041
			      bpp, connector->base.display_info.bpc*3);
12118
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12042
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12119
	}
12043
	}
12120
 
12044
 
12121
	/* Clamp bpp to default limit on screens without EDID 1.4 */
12045
	/* Clamp bpp to default limit on screens without EDID 1.4 */
12122
	if (connector->base.display_info.bpc == 0) {
12046
	if (connector->base.display_info.bpc == 0) {
12123
		int type = connector->base.connector_type;
12047
		int type = connector->base.connector_type;
12124
		int clamp_bpp = 24;
12048
		int clamp_bpp = 24;
12125
 
12049
 
12126
		/* Fall back to 18 bpp when DP sink capability is unknown. */
12050
		/* Fall back to 18 bpp when DP sink capability is unknown. */
12127
		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12051
		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12128
		    type == DRM_MODE_CONNECTOR_eDP)
12052
		    type == DRM_MODE_CONNECTOR_eDP)
12129
			clamp_bpp = 18;
12053
			clamp_bpp = 18;
12130
 
12054
 
12131
		if (bpp > clamp_bpp) {
12055
		if (bpp > clamp_bpp) {
12132
			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12056
			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12133
				      bpp, clamp_bpp);
12057
				      bpp, clamp_bpp);
12134
			pipe_config->pipe_bpp = clamp_bpp;
12058
			pipe_config->pipe_bpp = clamp_bpp;
12135
		}
12059
		}
12136
	}
12060
	}
12137
}
12061
}
12138
 
12062
 
12139
static int
12063
static int
12140
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12064
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12141
			  struct intel_crtc_state *pipe_config)
12065
			  struct intel_crtc_state *pipe_config)
12142
{
12066
{
12143
	struct drm_device *dev = crtc->base.dev;
12067
	struct drm_device *dev = crtc->base.dev;
12144
	struct drm_atomic_state *state;
12068
	struct drm_atomic_state *state;
12145
	struct drm_connector *connector;
12069
	struct drm_connector *connector;
12146
	struct drm_connector_state *connector_state;
12070
	struct drm_connector_state *connector_state;
12147
	int bpp, i;
12071
	int bpp, i;
12148
 
12072
 
12149
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12073
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12150
		bpp = 10*3;
12074
		bpp = 10*3;
12151
	else if (INTEL_INFO(dev)->gen >= 5)
12075
	else if (INTEL_INFO(dev)->gen >= 5)
12152
		bpp = 12*3;
12076
		bpp = 12*3;
12153
	else
12077
	else
12154
		bpp = 8*3;
12078
		bpp = 8*3;
12155
 
12079
 
12156
 
12080
 
12157
	pipe_config->pipe_bpp = bpp;
12081
	pipe_config->pipe_bpp = bpp;
12158
 
12082
 
12159
	state = pipe_config->base.state;
12083
	state = pipe_config->base.state;
12160
 
12084
 
12161
	/* Clamp display bpp to EDID value */
12085
	/* Clamp display bpp to EDID value */
12162
	for_each_connector_in_state(state, connector, connector_state, i) {
12086
	for_each_connector_in_state(state, connector, connector_state, i) {
12163
		if (connector_state->crtc != &crtc->base)
12087
		if (connector_state->crtc != &crtc->base)
12164
			continue;
12088
			continue;
12165
 
12089
 
12166
		connected_sink_compute_bpp(to_intel_connector(connector),
12090
		connected_sink_compute_bpp(to_intel_connector(connector),
12167
					   pipe_config);
12091
					   pipe_config);
12168
	}
12092
	}
12169
 
12093
 
12170
	return bpp;
12094
	return bpp;
12171
}
12095
}
12172
 
12096
 
12173
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12097
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12174
{
12098
{
12175
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12099
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12176
			"type: 0x%x flags: 0x%x\n",
12100
			"type: 0x%x flags: 0x%x\n",
12177
		mode->crtc_clock,
12101
		mode->crtc_clock,
12178
		mode->crtc_hdisplay, mode->crtc_hsync_start,
12102
		mode->crtc_hdisplay, mode->crtc_hsync_start,
12179
		mode->crtc_hsync_end, mode->crtc_htotal,
12103
		mode->crtc_hsync_end, mode->crtc_htotal,
12180
		mode->crtc_vdisplay, mode->crtc_vsync_start,
12104
		mode->crtc_vdisplay, mode->crtc_vsync_start,
12181
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12105
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12182
}
12106
}
12183
 
12107
 
12184
static void intel_dump_pipe_config(struct intel_crtc *crtc,
12108
static void intel_dump_pipe_config(struct intel_crtc *crtc,
12185
				   struct intel_crtc_state *pipe_config,
12109
				   struct intel_crtc_state *pipe_config,
12186
				   const char *context)
12110
				   const char *context)
12187
{
12111
{
12188
	struct drm_device *dev = crtc->base.dev;
12112
	struct drm_device *dev = crtc->base.dev;
12189
	struct drm_plane *plane;
12113
	struct drm_plane *plane;
12190
	struct intel_plane *intel_plane;
12114
	struct intel_plane *intel_plane;
12191
	struct intel_plane_state *state;
12115
	struct intel_plane_state *state;
12192
	struct drm_framebuffer *fb;
12116
	struct drm_framebuffer *fb;
12193
 
12117
 
12194
	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12118
	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12195
		      context, pipe_config, pipe_name(crtc->pipe));
12119
		      context, pipe_config, pipe_name(crtc->pipe));
12196
 
12120
 
12197
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12121
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12198
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12122
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12199
		      pipe_config->pipe_bpp, pipe_config->dither);
12123
		      pipe_config->pipe_bpp, pipe_config->dither);
12200
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12124
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12201
		      pipe_config->has_pch_encoder,
12125
		      pipe_config->has_pch_encoder,
12202
		      pipe_config->fdi_lanes,
12126
		      pipe_config->fdi_lanes,
12203
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12127
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12204
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12128
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12205
		      pipe_config->fdi_m_n.tu);
12129
		      pipe_config->fdi_m_n.tu);
12206
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12130
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12207
		      pipe_config->has_dp_encoder,
12131
		      pipe_config->has_dp_encoder,
12208
		      pipe_config->lane_count,
12132
		      pipe_config->lane_count,
12209
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12133
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12210
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12134
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12211
		      pipe_config->dp_m_n.tu);
12135
		      pipe_config->dp_m_n.tu);
12212
 
12136
 
12213
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12137
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12214
		      pipe_config->has_dp_encoder,
12138
		      pipe_config->has_dp_encoder,
12215
		      pipe_config->lane_count,
12139
		      pipe_config->lane_count,
12216
		      pipe_config->dp_m2_n2.gmch_m,
12140
		      pipe_config->dp_m2_n2.gmch_m,
12217
		      pipe_config->dp_m2_n2.gmch_n,
12141
		      pipe_config->dp_m2_n2.gmch_n,
12218
		      pipe_config->dp_m2_n2.link_m,
12142
		      pipe_config->dp_m2_n2.link_m,
12219
		      pipe_config->dp_m2_n2.link_n,
12143
		      pipe_config->dp_m2_n2.link_n,
12220
		      pipe_config->dp_m2_n2.tu);
12144
		      pipe_config->dp_m2_n2.tu);
12221
 
12145
 
12222
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12146
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12223
		      pipe_config->has_audio,
12147
		      pipe_config->has_audio,
12224
		      pipe_config->has_infoframe);
12148
		      pipe_config->has_infoframe);
12225
 
12149
 
12226
	DRM_DEBUG_KMS("requested mode:\n");
12150
	DRM_DEBUG_KMS("requested mode:\n");
12227
	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12151
	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12228
	DRM_DEBUG_KMS("adjusted mode:\n");
12152
	DRM_DEBUG_KMS("adjusted mode:\n");
12229
	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12153
	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12230
	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12154
	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12231
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12155
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12232
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12156
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12233
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12157
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12234
	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12158
	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12235
		      crtc->num_scalers,
12159
		      crtc->num_scalers,
12236
		      pipe_config->scaler_state.scaler_users,
12160
		      pipe_config->scaler_state.scaler_users,
12237
		      pipe_config->scaler_state.scaler_id);
12161
		      pipe_config->scaler_state.scaler_id);
12238
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12162
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12239
		      pipe_config->gmch_pfit.control,
12163
		      pipe_config->gmch_pfit.control,
12240
		      pipe_config->gmch_pfit.pgm_ratios,
12164
		      pipe_config->gmch_pfit.pgm_ratios,
12241
		      pipe_config->gmch_pfit.lvds_border_bits);
12165
		      pipe_config->gmch_pfit.lvds_border_bits);
12242
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12166
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12243
		      pipe_config->pch_pfit.pos,
12167
		      pipe_config->pch_pfit.pos,
12244
		      pipe_config->pch_pfit.size,
12168
		      pipe_config->pch_pfit.size,
12245
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12169
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12246
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12170
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12247
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12171
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12248
 
12172
 
12249
	if (IS_BROXTON(dev)) {
12173
	if (IS_BROXTON(dev)) {
12250
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12174
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12251
			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12175
			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12252
			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12176
			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12253
			      pipe_config->ddi_pll_sel,
12177
			      pipe_config->ddi_pll_sel,
12254
			      pipe_config->dpll_hw_state.ebb0,
12178
			      pipe_config->dpll_hw_state.ebb0,
12255
			      pipe_config->dpll_hw_state.ebb4,
12179
			      pipe_config->dpll_hw_state.ebb4,
12256
			      pipe_config->dpll_hw_state.pll0,
12180
			      pipe_config->dpll_hw_state.pll0,
12257
			      pipe_config->dpll_hw_state.pll1,
12181
			      pipe_config->dpll_hw_state.pll1,
12258
			      pipe_config->dpll_hw_state.pll2,
12182
			      pipe_config->dpll_hw_state.pll2,
12259
			      pipe_config->dpll_hw_state.pll3,
12183
			      pipe_config->dpll_hw_state.pll3,
12260
			      pipe_config->dpll_hw_state.pll6,
12184
			      pipe_config->dpll_hw_state.pll6,
12261
			      pipe_config->dpll_hw_state.pll8,
12185
			      pipe_config->dpll_hw_state.pll8,
12262
			      pipe_config->dpll_hw_state.pll9,
12186
			      pipe_config->dpll_hw_state.pll9,
12263
			      pipe_config->dpll_hw_state.pll10,
12187
			      pipe_config->dpll_hw_state.pll10,
12264
			      pipe_config->dpll_hw_state.pcsdw12);
12188
			      pipe_config->dpll_hw_state.pcsdw12);
12265
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12189
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12266
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12190
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12267
			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12191
			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12268
			      pipe_config->ddi_pll_sel,
12192
			      pipe_config->ddi_pll_sel,
12269
			      pipe_config->dpll_hw_state.ctrl1,
12193
			      pipe_config->dpll_hw_state.ctrl1,
12270
			      pipe_config->dpll_hw_state.cfgcr1,
12194
			      pipe_config->dpll_hw_state.cfgcr1,
12271
			      pipe_config->dpll_hw_state.cfgcr2);
12195
			      pipe_config->dpll_hw_state.cfgcr2);
12272
	} else if (HAS_DDI(dev)) {
12196
	} else if (HAS_DDI(dev)) {
12273
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12197
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12274
			      pipe_config->ddi_pll_sel,
12198
			      pipe_config->ddi_pll_sel,
12275
			      pipe_config->dpll_hw_state.wrpll,
12199
			      pipe_config->dpll_hw_state.wrpll,
12276
			      pipe_config->dpll_hw_state.spll);
12200
			      pipe_config->dpll_hw_state.spll);
12277
	} else {
12201
	} else {
12278
		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12202
		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12279
			      "fp0: 0x%x, fp1: 0x%x\n",
12203
			      "fp0: 0x%x, fp1: 0x%x\n",
12280
			      pipe_config->dpll_hw_state.dpll,
12204
			      pipe_config->dpll_hw_state.dpll,
12281
			      pipe_config->dpll_hw_state.dpll_md,
12205
			      pipe_config->dpll_hw_state.dpll_md,
12282
			      pipe_config->dpll_hw_state.fp0,
12206
			      pipe_config->dpll_hw_state.fp0,
12283
			      pipe_config->dpll_hw_state.fp1);
12207
			      pipe_config->dpll_hw_state.fp1);
12284
	}
12208
	}
12285
 
12209
 
12286
	DRM_DEBUG_KMS("planes on this crtc\n");
12210
	DRM_DEBUG_KMS("planes on this crtc\n");
12287
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12211
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12288
		intel_plane = to_intel_plane(plane);
12212
		intel_plane = to_intel_plane(plane);
12289
		if (intel_plane->pipe != crtc->pipe)
12213
		if (intel_plane->pipe != crtc->pipe)
12290
			continue;
12214
			continue;
12291
 
12215
 
12292
		state = to_intel_plane_state(plane->state);
12216
		state = to_intel_plane_state(plane->state);
12293
		fb = state->base.fb;
12217
		fb = state->base.fb;
12294
		if (!fb) {
12218
		if (!fb) {
12295
			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12219
			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12296
				"disabled, scaler_id = %d\n",
12220
				"disabled, scaler_id = %d\n",
12297
				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12221
				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12298
				plane->base.id, intel_plane->pipe,
12222
				plane->base.id, intel_plane->pipe,
12299
				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12223
				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12300
				drm_plane_index(plane), state->scaler_id);
12224
				drm_plane_index(plane), state->scaler_id);
12301
			continue;
12225
			continue;
12302
		}
12226
		}
12303
 
12227
 
12304
		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12228
		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12305
			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12229
			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12306
			plane->base.id, intel_plane->pipe,
12230
			plane->base.id, intel_plane->pipe,
12307
			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12231
			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12308
			drm_plane_index(plane));
12232
			drm_plane_index(plane));
12309
		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12233
		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12310
			fb->base.id, fb->width, fb->height, fb->pixel_format);
12234
			fb->base.id, fb->width, fb->height, fb->pixel_format);
12311
		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12235
		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12312
			state->scaler_id,
12236
			state->scaler_id,
12313
			state->src.x1 >> 16, state->src.y1 >> 16,
12237
			state->src.x1 >> 16, state->src.y1 >> 16,
12314
			drm_rect_width(&state->src) >> 16,
12238
			drm_rect_width(&state->src) >> 16,
12315
			drm_rect_height(&state->src) >> 16,
12239
			drm_rect_height(&state->src) >> 16,
12316
			state->dst.x1, state->dst.y1,
12240
			state->dst.x1, state->dst.y1,
12317
			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12241
			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12318
	}
12242
	}
12319
}
12243
}
12320
 
12244
 
12321
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12245
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12322
{
12246
{
12323
	struct drm_device *dev = state->dev;
12247
	struct drm_device *dev = state->dev;
12324
	struct drm_connector *connector;
12248
	struct drm_connector *connector;
12325
	unsigned int used_ports = 0;
12249
	unsigned int used_ports = 0;
12326
 
12250
 
12327
	/*
12251
	/*
12328
	 * Walk the connector list instead of the encoder
12252
	 * Walk the connector list instead of the encoder
12329
	 * list to detect the problem on ddi platforms
12253
	 * list to detect the problem on ddi platforms
12330
	 * where there's just one encoder per digital port.
12254
	 * where there's just one encoder per digital port.
12331
	 */
12255
	 */
12332
	drm_for_each_connector(connector, dev) {
12256
	drm_for_each_connector(connector, dev) {
12333
		struct drm_connector_state *connector_state;
12257
		struct drm_connector_state *connector_state;
12334
		struct intel_encoder *encoder;
12258
		struct intel_encoder *encoder;
12335
 
12259
 
12336
		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12260
		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12337
		if (!connector_state)
12261
		if (!connector_state)
12338
			connector_state = connector->state;
12262
			connector_state = connector->state;
12339
 
12263
 
12340
		if (!connector_state->best_encoder)
12264
		if (!connector_state->best_encoder)
12341
			continue;
12265
			continue;
12342
 
12266
 
12343
		encoder = to_intel_encoder(connector_state->best_encoder);
12267
		encoder = to_intel_encoder(connector_state->best_encoder);
12344
 
12268
 
12345
		WARN_ON(!connector_state->crtc);
12269
		WARN_ON(!connector_state->crtc);
12346
 
12270
 
12347
		switch (encoder->type) {
12271
		switch (encoder->type) {
12348
			unsigned int port_mask;
12272
			unsigned int port_mask;
12349
		case INTEL_OUTPUT_UNKNOWN:
12273
		case INTEL_OUTPUT_UNKNOWN:
12350
			if (WARN_ON(!HAS_DDI(dev)))
12274
			if (WARN_ON(!HAS_DDI(dev)))
12351
				break;
12275
				break;
12352
		case INTEL_OUTPUT_DISPLAYPORT:
12276
		case INTEL_OUTPUT_DISPLAYPORT:
12353
		case INTEL_OUTPUT_HDMI:
12277
		case INTEL_OUTPUT_HDMI:
12354
		case INTEL_OUTPUT_EDP:
12278
		case INTEL_OUTPUT_EDP:
12355
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12279
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12356
 
12280
 
12357
			/* the same port mustn't appear more than once */
12281
			/* the same port mustn't appear more than once */
12358
			if (used_ports & port_mask)
12282
			if (used_ports & port_mask)
12359
				return false;
12283
				return false;
12360
 
12284
 
12361
			used_ports |= port_mask;
12285
			used_ports |= port_mask;
12362
		default:
12286
		default:
12363
			break;
12287
			break;
12364
		}
12288
		}
12365
	}
12289
	}
12366
 
12290
 
12367
	return true;
12291
	return true;
12368
}
12292
}
12369
 
12293
 
12370
static void
12294
static void
12371
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12295
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12372
{
12296
{
12373
	struct drm_crtc_state tmp_state;
12297
	struct drm_crtc_state tmp_state;
12374
	struct intel_crtc_scaler_state scaler_state;
12298
	struct intel_crtc_scaler_state scaler_state;
12375
	struct intel_dpll_hw_state dpll_hw_state;
12299
	struct intel_dpll_hw_state dpll_hw_state;
12376
	enum intel_dpll_id shared_dpll;
12300
	enum intel_dpll_id shared_dpll;
12377
	uint32_t ddi_pll_sel;
12301
	uint32_t ddi_pll_sel;
12378
	bool force_thru;
12302
	bool force_thru;
12379
 
12303
 
12380
	/* FIXME: before the switch to atomic started, a new pipe_config was
12304
	/* FIXME: before the switch to atomic started, a new pipe_config was
12381
	 * kzalloc'd. Code that depends on any field being zero should be
12305
	 * kzalloc'd. Code that depends on any field being zero should be
12382
	 * fixed, so that the crtc_state can be safely duplicated. For now,
12306
	 * fixed, so that the crtc_state can be safely duplicated. For now,
12383
	 * only fields that are know to not cause problems are preserved. */
12307
	 * only fields that are know to not cause problems are preserved. */
12384
 
12308
 
12385
	tmp_state = crtc_state->base;
12309
	tmp_state = crtc_state->base;
12386
	scaler_state = crtc_state->scaler_state;
12310
	scaler_state = crtc_state->scaler_state;
12387
	shared_dpll = crtc_state->shared_dpll;
12311
	shared_dpll = crtc_state->shared_dpll;
12388
	dpll_hw_state = crtc_state->dpll_hw_state;
12312
	dpll_hw_state = crtc_state->dpll_hw_state;
12389
	ddi_pll_sel = crtc_state->ddi_pll_sel;
12313
	ddi_pll_sel = crtc_state->ddi_pll_sel;
12390
	force_thru = crtc_state->pch_pfit.force_thru;
12314
	force_thru = crtc_state->pch_pfit.force_thru;
12391
 
12315
 
12392
	memset(crtc_state, 0, sizeof *crtc_state);
12316
	memset(crtc_state, 0, sizeof *crtc_state);
12393
 
12317
 
12394
	crtc_state->base = tmp_state;
12318
	crtc_state->base = tmp_state;
12395
	crtc_state->scaler_state = scaler_state;
12319
	crtc_state->scaler_state = scaler_state;
12396
	crtc_state->shared_dpll = shared_dpll;
12320
	crtc_state->shared_dpll = shared_dpll;
12397
	crtc_state->dpll_hw_state = dpll_hw_state;
12321
	crtc_state->dpll_hw_state = dpll_hw_state;
12398
	crtc_state->ddi_pll_sel = ddi_pll_sel;
12322
	crtc_state->ddi_pll_sel = ddi_pll_sel;
12399
	crtc_state->pch_pfit.force_thru = force_thru;
12323
	crtc_state->pch_pfit.force_thru = force_thru;
12400
}
12324
}
12401
 
12325
 
12402
static int
12326
static int
12403
intel_modeset_pipe_config(struct drm_crtc *crtc,
12327
intel_modeset_pipe_config(struct drm_crtc *crtc,
12404
			  struct intel_crtc_state *pipe_config)
12328
			  struct intel_crtc_state *pipe_config)
12405
{
12329
{
12406
	struct drm_atomic_state *state = pipe_config->base.state;
12330
	struct drm_atomic_state *state = pipe_config->base.state;
12407
	struct intel_encoder *encoder;
12331
	struct intel_encoder *encoder;
12408
	struct drm_connector *connector;
12332
	struct drm_connector *connector;
12409
	struct drm_connector_state *connector_state;
12333
	struct drm_connector_state *connector_state;
12410
	int base_bpp, ret = -EINVAL;
12334
	int base_bpp, ret = -EINVAL;
12411
	int i;
12335
	int i;
12412
	bool retry = true;
12336
	bool retry = true;
12413
 
12337
 
12414
	clear_intel_crtc_state(pipe_config);
12338
	clear_intel_crtc_state(pipe_config);
12415
 
12339
 
12416
	pipe_config->cpu_transcoder =
12340
	pipe_config->cpu_transcoder =
12417
		(enum transcoder) to_intel_crtc(crtc)->pipe;
12341
		(enum transcoder) to_intel_crtc(crtc)->pipe;
12418
 
12342
 
12419
	/*
12343
	/*
12420
	 * Sanitize sync polarity flags based on requested ones. If neither
12344
	 * Sanitize sync polarity flags based on requested ones. If neither
12421
	 * positive or negative polarity is requested, treat this as meaning
12345
	 * positive or negative polarity is requested, treat this as meaning
12422
	 * negative polarity.
12346
	 * negative polarity.
12423
	 */
12347
	 */
12424
	if (!(pipe_config->base.adjusted_mode.flags &
12348
	if (!(pipe_config->base.adjusted_mode.flags &
12425
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12349
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12426
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12350
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12427
 
12351
 
12428
	if (!(pipe_config->base.adjusted_mode.flags &
12352
	if (!(pipe_config->base.adjusted_mode.flags &
12429
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12353
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12430
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12354
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12431
 
12355
 
12432
	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12356
	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12433
					     pipe_config);
12357
					     pipe_config);
12434
	if (base_bpp < 0)
12358
	if (base_bpp < 0)
12435
		goto fail;
12359
		goto fail;
12436
 
12360
 
12437
	/*
12361
	/*
12438
	 * Determine the real pipe dimensions. Note that stereo modes can
12362
	 * Determine the real pipe dimensions. Note that stereo modes can
12439
	 * increase the actual pipe size due to the frame doubling and
12363
	 * increase the actual pipe size due to the frame doubling and
12440
	 * insertion of additional space for blanks between the frame. This
12364
	 * insertion of additional space for blanks between the frame. This
12441
	 * is stored in the crtc timings. We use the requested mode to do this
12365
	 * is stored in the crtc timings. We use the requested mode to do this
12442
	 * computation to clearly distinguish it from the adjusted mode, which
12366
	 * computation to clearly distinguish it from the adjusted mode, which
12443
	 * can be changed by the connectors in the below retry loop.
12367
	 * can be changed by the connectors in the below retry loop.
12444
	 */
12368
	 */
12445
	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12369
	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12446
			       &pipe_config->pipe_src_w,
12370
			       &pipe_config->pipe_src_w,
12447
			       &pipe_config->pipe_src_h);
12371
			       &pipe_config->pipe_src_h);
12448
 
12372
 
12449
encoder_retry:
12373
encoder_retry:
12450
	/* Ensure the port clock defaults are reset when retrying. */
12374
	/* Ensure the port clock defaults are reset when retrying. */
12451
	pipe_config->port_clock = 0;
12375
	pipe_config->port_clock = 0;
12452
	pipe_config->pixel_multiplier = 1;
12376
	pipe_config->pixel_multiplier = 1;
12453
 
12377
 
12454
	/* Fill in default crtc timings, allow encoders to overwrite them. */
12378
	/* Fill in default crtc timings, allow encoders to overwrite them. */
12455
	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12379
	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12456
			      CRTC_STEREO_DOUBLE);
12380
			      CRTC_STEREO_DOUBLE);
12457
 
12381
 
12458
	/* Pass our mode to the connectors and the CRTC to give them a chance to
12382
	/* Pass our mode to the connectors and the CRTC to give them a chance to
12459
	 * adjust it according to limitations or connector properties, and also
12383
	 * adjust it according to limitations or connector properties, and also
12460
	 * a chance to reject the mode entirely.
12384
	 * a chance to reject the mode entirely.
12461
	 */
12385
	 */
12462
	for_each_connector_in_state(state, connector, connector_state, i) {
12386
	for_each_connector_in_state(state, connector, connector_state, i) {
12463
		if (connector_state->crtc != crtc)
12387
		if (connector_state->crtc != crtc)
12464
			continue;
12388
			continue;
12465
 
12389
 
12466
		encoder = to_intel_encoder(connector_state->best_encoder);
12390
		encoder = to_intel_encoder(connector_state->best_encoder);
12467
 
12391
 
12468
		if (!(encoder->compute_config(encoder, pipe_config))) {
12392
		if (!(encoder->compute_config(encoder, pipe_config))) {
12469
			DRM_DEBUG_KMS("Encoder config failure\n");
12393
			DRM_DEBUG_KMS("Encoder config failure\n");
12470
			goto fail;
12394
			goto fail;
12471
		}
12395
		}
12472
	}
12396
	}
12473
 
12397
 
12474
	/* Set default port clock if not overwritten by the encoder. Needs to be
12398
	/* Set default port clock if not overwritten by the encoder. Needs to be
12475
	 * done afterwards in case the encoder adjusts the mode. */
12399
	 * done afterwards in case the encoder adjusts the mode. */
12476
	if (!pipe_config->port_clock)
12400
	if (!pipe_config->port_clock)
12477
		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12401
		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12478
			* pipe_config->pixel_multiplier;
12402
			* pipe_config->pixel_multiplier;
12479
 
12403
 
12480
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12404
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12481
	if (ret < 0) {
12405
	if (ret < 0) {
12482
		DRM_DEBUG_KMS("CRTC fixup failed\n");
12406
		DRM_DEBUG_KMS("CRTC fixup failed\n");
12483
		goto fail;
12407
		goto fail;
12484
	}
12408
	}
12485
 
12409
 
12486
	if (ret == RETRY) {
12410
	if (ret == RETRY) {
12487
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12411
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12488
			ret = -EINVAL;
12412
			ret = -EINVAL;
12489
			goto fail;
12413
			goto fail;
12490
		}
12414
		}
12491
 
12415
 
12492
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12416
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12493
		retry = false;
12417
		retry = false;
12494
		goto encoder_retry;
12418
		goto encoder_retry;
12495
	}
12419
	}
12496
 
12420
 
12497
	/* Dithering seems to not pass-through bits correctly when it should, so
12421
	/* Dithering seems to not pass-through bits correctly when it should, so
12498
	 * only enable it on 6bpc panels. */
12422
	 * only enable it on 6bpc panels. */
12499
	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12423
	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12500
	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12424
	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12501
		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12425
		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12502
 
12426
 
12503
fail:
12427
fail:
12504
	return ret;
12428
	return ret;
12505
}
12429
}
12506
 
12430
 
12507
static void
12431
static void
12508
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12432
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12509
{
12433
{
12510
	struct drm_crtc *crtc;
12434
	struct drm_crtc *crtc;
12511
	struct drm_crtc_state *crtc_state;
12435
	struct drm_crtc_state *crtc_state;
12512
	int i;
12436
	int i;
12513
 
12437
 
12514
	/* Double check state. */
12438
	/* Double check state. */
12515
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12439
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12516
		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12440
		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12517
 
12441
 
12518
		/* Update hwmode for vblank functions */
12442
		/* Update hwmode for vblank functions */
12519
		if (crtc->state->active)
12443
		if (crtc->state->active)
12520
			crtc->hwmode = crtc->state->adjusted_mode;
12444
			crtc->hwmode = crtc->state->adjusted_mode;
12521
		else
12445
		else
12522
			crtc->hwmode.crtc_clock = 0;
12446
			crtc->hwmode.crtc_clock = 0;
12523
 
12447
 
12524
		/*
12448
		/*
12525
		 * Update legacy state to satisfy fbc code. This can
12449
		 * Update legacy state to satisfy fbc code. This can
12526
		 * be removed when fbc uses the atomic state.
12450
		 * be removed when fbc uses the atomic state.
12527
		 */
12451
		 */
12528
		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12452
		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12529
			struct drm_plane_state *plane_state = crtc->primary->state;
12453
			struct drm_plane_state *plane_state = crtc->primary->state;
12530
 
12454
 
12531
			crtc->primary->fb = plane_state->fb;
12455
			crtc->primary->fb = plane_state->fb;
12532
			crtc->x = plane_state->src_x >> 16;
12456
			crtc->x = plane_state->src_x >> 16;
12533
			crtc->y = plane_state->src_y >> 16;
12457
			crtc->y = plane_state->src_y >> 16;
12534
		}
12458
		}
12535
	}
12459
	}
12536
}
12460
}
12537
 
12461
 
12538
static bool intel_fuzzy_clock_check(int clock1, int clock2)
12462
static bool intel_fuzzy_clock_check(int clock1, int clock2)
12539
{
12463
{
12540
	int diff;
12464
	int diff;
12541
 
12465
 
12542
	if (clock1 == clock2)
12466
	if (clock1 == clock2)
12543
		return true;
12467
		return true;
12544
 
12468
 
12545
	if (!clock1 || !clock2)
12469
	if (!clock1 || !clock2)
12546
		return false;
12470
		return false;
12547
 
12471
 
12548
	diff = abs(clock1 - clock2);
12472
	diff = abs(clock1 - clock2);
12549
 
12473
 
12550
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12474
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12551
		return true;
12475
		return true;
12552
 
12476
 
12553
	return false;
12477
	return false;
12554
}
12478
}
12555
 
12479
 
12556
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12480
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12557
	list_for_each_entry((intel_crtc), \
12481
	list_for_each_entry((intel_crtc), \
12558
			    &(dev)->mode_config.crtc_list, \
12482
			    &(dev)->mode_config.crtc_list, \
12559
			    base.head) \
12483
			    base.head) \
12560
		for_each_if (mask & (1 <<(intel_crtc)->pipe))
12484
		for_each_if (mask & (1 <<(intel_crtc)->pipe))
12561
 
12485
 
12562
static bool
12486
static bool
12563
intel_compare_m_n(unsigned int m, unsigned int n,
12487
intel_compare_m_n(unsigned int m, unsigned int n,
12564
		  unsigned int m2, unsigned int n2,
12488
		  unsigned int m2, unsigned int n2,
12565
		  bool exact)
12489
		  bool exact)
12566
{
12490
{
12567
	if (m == m2 && n == n2)
12491
	if (m == m2 && n == n2)
12568
		return true;
12492
		return true;
12569
 
12493
 
12570
	if (exact || !m || !n || !m2 || !n2)
12494
	if (exact || !m || !n || !m2 || !n2)
12571
		return false;
12495
		return false;
12572
 
12496
 
12573
	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12497
	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12574
 
12498
 
12575
	if (m > m2) {
12499
	if (n > n2) {
12576
		while (m > m2) {
12500
		while (n > n2) {
12577
			m2 <<= 1;
12501
			m2 <<= 1;
12578
			n2 <<= 1;
12502
			n2 <<= 1;
12579
		}
12503
		}
12580
	} else if (m < m2) {
12504
	} else if (n < n2) {
12581
		while (m < m2) {
12505
		while (n < n2) {
12582
			m <<= 1;
12506
			m <<= 1;
12583
			n <<= 1;
12507
			n <<= 1;
12584
		}
12508
		}
12585
	}
12509
	}
-
 
12510
 
-
 
12511
	if (n != n2)
-
 
12512
		return false;
12586
 
12513
 
12587
	return m == m2 && n == n2;
12514
	return intel_fuzzy_clock_check(m, m2);
12588
}
12515
}
12589
 
12516
 
12590
static bool
12517
static bool
12591
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12518
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12592
		       struct intel_link_m_n *m2_n2,
12519
		       struct intel_link_m_n *m2_n2,
12593
		       bool adjust)
12520
		       bool adjust)
12594
{
12521
{
12595
	if (m_n->tu == m2_n2->tu &&
12522
	if (m_n->tu == m2_n2->tu &&
12596
	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12523
	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12597
			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12524
			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12598
	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12525
	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12599
			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12526
			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12600
		if (adjust)
12527
		if (adjust)
12601
			*m2_n2 = *m_n;
12528
			*m2_n2 = *m_n;
12602
 
12529
 
12603
		return true;
12530
		return true;
12604
	}
12531
	}
12605
 
12532
 
12606
	return false;
12533
	return false;
12607
}
12534
}
12608
 
12535
 
12609
static bool
12536
static bool
12610
intel_pipe_config_compare(struct drm_device *dev,
12537
intel_pipe_config_compare(struct drm_device *dev,
12611
			  struct intel_crtc_state *current_config,
12538
			  struct intel_crtc_state *current_config,
12612
			  struct intel_crtc_state *pipe_config,
12539
			  struct intel_crtc_state *pipe_config,
12613
			  bool adjust)
12540
			  bool adjust)
12614
{
12541
{
12615
	bool ret = true;
12542
	bool ret = true;
12616
 
12543
 
12617
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12544
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12618
	do { \
12545
	do { \
12619
		if (!adjust) \
12546
		if (!adjust) \
12620
			DRM_ERROR(fmt, ##__VA_ARGS__); \
12547
			DRM_ERROR(fmt, ##__VA_ARGS__); \
12621
		else \
12548
		else \
12622
			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12549
			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12623
	} while (0)
12550
	} while (0)
12624
 
12551
 
12625
#define PIPE_CONF_CHECK_X(name)	\
12552
#define PIPE_CONF_CHECK_X(name)	\
12626
	if (current_config->name != pipe_config->name) { \
12553
	if (current_config->name != pipe_config->name) { \
12627
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12554
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12628
			  "(expected 0x%08x, found 0x%08x)\n", \
12555
			  "(expected 0x%08x, found 0x%08x)\n", \
12629
			  current_config->name, \
12556
			  current_config->name, \
12630
			  pipe_config->name); \
12557
			  pipe_config->name); \
12631
		ret = false; \
12558
		ret = false; \
12632
	}
12559
	}
12633
 
12560
 
12634
#define PIPE_CONF_CHECK_I(name)	\
12561
#define PIPE_CONF_CHECK_I(name)	\
12635
	if (current_config->name != pipe_config->name) { \
12562
	if (current_config->name != pipe_config->name) { \
12636
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12563
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12637
			  "(expected %i, found %i)\n", \
12564
			  "(expected %i, found %i)\n", \
12638
			  current_config->name, \
12565
			  current_config->name, \
12639
			  pipe_config->name); \
12566
			  pipe_config->name); \
12640
		ret = false; \
12567
		ret = false; \
12641
	}
12568
	}
12642
 
12569
 
12643
#define PIPE_CONF_CHECK_M_N(name) \
12570
#define PIPE_CONF_CHECK_M_N(name) \
12644
	if (!intel_compare_link_m_n(¤t_config->name, \
12571
	if (!intel_compare_link_m_n(¤t_config->name, \
12645
				    &pipe_config->name,\
12572
				    &pipe_config->name,\
12646
				    adjust)) { \
12573
				    adjust)) { \
12647
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12574
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12648
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12575
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12649
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12576
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12650
			  current_config->name.tu, \
12577
			  current_config->name.tu, \
12651
			  current_config->name.gmch_m, \
12578
			  current_config->name.gmch_m, \
12652
			  current_config->name.gmch_n, \
12579
			  current_config->name.gmch_n, \
12653
			  current_config->name.link_m, \
12580
			  current_config->name.link_m, \
12654
			  current_config->name.link_n, \
12581
			  current_config->name.link_n, \
12655
			  pipe_config->name.tu, \
12582
			  pipe_config->name.tu, \
12656
			  pipe_config->name.gmch_m, \
12583
			  pipe_config->name.gmch_m, \
12657
			  pipe_config->name.gmch_n, \
12584
			  pipe_config->name.gmch_n, \
12658
			  pipe_config->name.link_m, \
12585
			  pipe_config->name.link_m, \
12659
			  pipe_config->name.link_n); \
12586
			  pipe_config->name.link_n); \
12660
		ret = false; \
12587
		ret = false; \
12661
	}
12588
	}
12662
 
12589
 
12663
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12590
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12664
	if (!intel_compare_link_m_n(¤t_config->name, \
12591
	if (!intel_compare_link_m_n(¤t_config->name, \
12665
				    &pipe_config->name, adjust) && \
12592
				    &pipe_config->name, adjust) && \
12666
	    !intel_compare_link_m_n(¤t_config->alt_name, \
12593
	    !intel_compare_link_m_n(¤t_config->alt_name, \
12667
				    &pipe_config->name, adjust)) { \
12594
				    &pipe_config->name, adjust)) { \
12668
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12595
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12669
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12596
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12670
			  "or tu %i gmch %i/%i link %i/%i, " \
12597
			  "or tu %i gmch %i/%i link %i/%i, " \
12671
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12598
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12672
			  current_config->name.tu, \
12599
			  current_config->name.tu, \
12673
			  current_config->name.gmch_m, \
12600
			  current_config->name.gmch_m, \
12674
			  current_config->name.gmch_n, \
12601
			  current_config->name.gmch_n, \
12675
			  current_config->name.link_m, \
12602
			  current_config->name.link_m, \
12676
			  current_config->name.link_n, \
12603
			  current_config->name.link_n, \
12677
			  current_config->alt_name.tu, \
12604
			  current_config->alt_name.tu, \
12678
			  current_config->alt_name.gmch_m, \
12605
			  current_config->alt_name.gmch_m, \
12679
			  current_config->alt_name.gmch_n, \
12606
			  current_config->alt_name.gmch_n, \
12680
			  current_config->alt_name.link_m, \
12607
			  current_config->alt_name.link_m, \
12681
			  current_config->alt_name.link_n, \
12608
			  current_config->alt_name.link_n, \
12682
			  pipe_config->name.tu, \
12609
			  pipe_config->name.tu, \
12683
			  pipe_config->name.gmch_m, \
12610
			  pipe_config->name.gmch_m, \
12684
			  pipe_config->name.gmch_n, \
12611
			  pipe_config->name.gmch_n, \
12685
			  pipe_config->name.link_m, \
12612
			  pipe_config->name.link_m, \
12686
			  pipe_config->name.link_n); \
12613
			  pipe_config->name.link_n); \
12687
		ret = false; \
12614
		ret = false; \
12688
	}
12615
	}
12689
 
12616
 
12690
/* This is required for BDW+ where there is only one set of registers for
12617
/* This is required for BDW+ where there is only one set of registers for
12691
 * switching between high and low RR.
12618
 * switching between high and low RR.
12692
 * This macro can be used whenever a comparison has to be made between one
12619
 * This macro can be used whenever a comparison has to be made between one
12693
 * hw state and multiple sw state variables.
12620
 * hw state and multiple sw state variables.
12694
 */
12621
 */
12695
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12622
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12696
	if ((current_config->name != pipe_config->name) && \
12623
	if ((current_config->name != pipe_config->name) && \
12697
		(current_config->alt_name != pipe_config->name)) { \
12624
		(current_config->alt_name != pipe_config->name)) { \
12698
			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12625
			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12699
				  "(expected %i or %i, found %i)\n", \
12626
				  "(expected %i or %i, found %i)\n", \
12700
				  current_config->name, \
12627
				  current_config->name, \
12701
				  current_config->alt_name, \
12628
				  current_config->alt_name, \
12702
				  pipe_config->name); \
12629
				  pipe_config->name); \
12703
			ret = false; \
12630
			ret = false; \
12704
	}
12631
	}
12705
 
12632
 
12706
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12633
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12707
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12634
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12708
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12635
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12709
			  "(expected %i, found %i)\n", \
12636
			  "(expected %i, found %i)\n", \
12710
			  current_config->name & (mask), \
12637
			  current_config->name & (mask), \
12711
			  pipe_config->name & (mask)); \
12638
			  pipe_config->name & (mask)); \
12712
		ret = false; \
12639
		ret = false; \
12713
	}
12640
	}
12714
 
12641
 
12715
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12642
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12716
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12643
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12717
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12644
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12718
			  "(expected %i, found %i)\n", \
12645
			  "(expected %i, found %i)\n", \
12719
			  current_config->name, \
12646
			  current_config->name, \
12720
			  pipe_config->name); \
12647
			  pipe_config->name); \
12721
		ret = false; \
12648
		ret = false; \
12722
	}
12649
	}
12723
 
12650
 
12724
#define PIPE_CONF_QUIRK(quirk)	\
12651
#define PIPE_CONF_QUIRK(quirk)	\
12725
	((current_config->quirks | pipe_config->quirks) & (quirk))
12652
	((current_config->quirks | pipe_config->quirks) & (quirk))
12726
 
12653
 
12727
	PIPE_CONF_CHECK_I(cpu_transcoder);
12654
	PIPE_CONF_CHECK_I(cpu_transcoder);
12728
 
12655
 
12729
	PIPE_CONF_CHECK_I(has_pch_encoder);
12656
	PIPE_CONF_CHECK_I(has_pch_encoder);
12730
	PIPE_CONF_CHECK_I(fdi_lanes);
12657
	PIPE_CONF_CHECK_I(fdi_lanes);
12731
	PIPE_CONF_CHECK_M_N(fdi_m_n);
12658
	PIPE_CONF_CHECK_M_N(fdi_m_n);
12732
 
12659
 
12733
	PIPE_CONF_CHECK_I(has_dp_encoder);
12660
	PIPE_CONF_CHECK_I(has_dp_encoder);
12734
	PIPE_CONF_CHECK_I(lane_count);
12661
	PIPE_CONF_CHECK_I(lane_count);
12735
 
12662
 
12736
	if (INTEL_INFO(dev)->gen < 8) {
12663
	if (INTEL_INFO(dev)->gen < 8) {
12737
		PIPE_CONF_CHECK_M_N(dp_m_n);
12664
		PIPE_CONF_CHECK_M_N(dp_m_n);
12738
 
12665
 
12739
		if (current_config->has_drrs)
12666
		if (current_config->has_drrs)
12740
			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12667
			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12741
	} else
12668
	} else
12742
		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12669
		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12743
 
12670
 
12744
	PIPE_CONF_CHECK_I(has_dsi_encoder);
12671
	PIPE_CONF_CHECK_I(has_dsi_encoder);
12745
 
12672
 
12746
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12673
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12747
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12674
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12748
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12675
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12749
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12676
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12750
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12677
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12751
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12678
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12752
 
12679
 
12753
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12680
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12754
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12681
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12755
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12682
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12756
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12683
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12757
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12684
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12758
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12685
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12759
 
12686
 
12760
	PIPE_CONF_CHECK_I(pixel_multiplier);
12687
	PIPE_CONF_CHECK_I(pixel_multiplier);
12761
	PIPE_CONF_CHECK_I(has_hdmi_sink);
12688
	PIPE_CONF_CHECK_I(has_hdmi_sink);
12762
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12689
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12763
	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12690
	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12764
		PIPE_CONF_CHECK_I(limited_color_range);
12691
		PIPE_CONF_CHECK_I(limited_color_range);
12765
	PIPE_CONF_CHECK_I(has_infoframe);
12692
	PIPE_CONF_CHECK_I(has_infoframe);
12766
 
12693
 
12767
	PIPE_CONF_CHECK_I(has_audio);
12694
	PIPE_CONF_CHECK_I(has_audio);
12768
 
12695
 
12769
	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12696
	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12770
			      DRM_MODE_FLAG_INTERLACE);
12697
			      DRM_MODE_FLAG_INTERLACE);
12771
 
12698
 
12772
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12699
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12773
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12700
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12774
				      DRM_MODE_FLAG_PHSYNC);
12701
				      DRM_MODE_FLAG_PHSYNC);
12775
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12702
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12776
				      DRM_MODE_FLAG_NHSYNC);
12703
				      DRM_MODE_FLAG_NHSYNC);
12777
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12704
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12778
				      DRM_MODE_FLAG_PVSYNC);
12705
				      DRM_MODE_FLAG_PVSYNC);
12779
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12706
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12780
				      DRM_MODE_FLAG_NVSYNC);
12707
				      DRM_MODE_FLAG_NVSYNC);
12781
	}
12708
	}
12782
 
12709
 
12783
	PIPE_CONF_CHECK_X(gmch_pfit.control);
12710
	PIPE_CONF_CHECK_X(gmch_pfit.control);
12784
	/* pfit ratios are autocomputed by the hw on gen4+ */
12711
	/* pfit ratios are autocomputed by the hw on gen4+ */
12785
	if (INTEL_INFO(dev)->gen < 4)
12712
	if (INTEL_INFO(dev)->gen < 4)
12786
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12713
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12787
	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12714
	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12788
 
12715
 
12789
	if (!adjust) {
12716
	if (!adjust) {
12790
		PIPE_CONF_CHECK_I(pipe_src_w);
12717
		PIPE_CONF_CHECK_I(pipe_src_w);
12791
		PIPE_CONF_CHECK_I(pipe_src_h);
12718
		PIPE_CONF_CHECK_I(pipe_src_h);
12792
 
12719
 
12793
		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12720
		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12794
		if (current_config->pch_pfit.enabled) {
12721
		if (current_config->pch_pfit.enabled) {
12795
			PIPE_CONF_CHECK_X(pch_pfit.pos);
12722
			PIPE_CONF_CHECK_X(pch_pfit.pos);
12796
			PIPE_CONF_CHECK_X(pch_pfit.size);
12723
			PIPE_CONF_CHECK_X(pch_pfit.size);
12797
		}
12724
		}
12798
 
12725
 
12799
		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12726
		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12800
	}
12727
	}
12801
 
12728
 
12802
	/* BDW+ don't expose a synchronous way to read the state */
12729
	/* BDW+ don't expose a synchronous way to read the state */
12803
	if (IS_HASWELL(dev))
12730
	if (IS_HASWELL(dev))
12804
		PIPE_CONF_CHECK_I(ips_enabled);
12731
		PIPE_CONF_CHECK_I(ips_enabled);
12805
 
12732
 
12806
	PIPE_CONF_CHECK_I(double_wide);
12733
	PIPE_CONF_CHECK_I(double_wide);
12807
 
12734
 
12808
	PIPE_CONF_CHECK_X(ddi_pll_sel);
12735
	PIPE_CONF_CHECK_X(ddi_pll_sel);
12809
 
12736
 
12810
	PIPE_CONF_CHECK_I(shared_dpll);
12737
	PIPE_CONF_CHECK_I(shared_dpll);
12811
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12738
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12812
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12739
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12813
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12740
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12814
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12741
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12815
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12742
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12816
	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12743
	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12817
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12744
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12818
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12745
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12819
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12746
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12820
 
12747
 
12821
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12748
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12822
		PIPE_CONF_CHECK_I(pipe_bpp);
12749
		PIPE_CONF_CHECK_I(pipe_bpp);
12823
 
12750
 
12824
	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12751
	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12825
	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12752
	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12826
 
12753
 
12827
#undef PIPE_CONF_CHECK_X
12754
#undef PIPE_CONF_CHECK_X
12828
#undef PIPE_CONF_CHECK_I
12755
#undef PIPE_CONF_CHECK_I
12829
#undef PIPE_CONF_CHECK_I_ALT
12756
#undef PIPE_CONF_CHECK_I_ALT
12830
#undef PIPE_CONF_CHECK_FLAGS
12757
#undef PIPE_CONF_CHECK_FLAGS
12831
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
12758
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
12832
#undef PIPE_CONF_QUIRK
12759
#undef PIPE_CONF_QUIRK
12833
#undef INTEL_ERR_OR_DBG_KMS
12760
#undef INTEL_ERR_OR_DBG_KMS
12834
 
12761
 
12835
	return ret;
12762
	return ret;
12836
}
12763
}
12837
 
12764
 
12838
static void check_wm_state(struct drm_device *dev)
12765
static void check_wm_state(struct drm_device *dev)
12839
{
12766
{
12840
	struct drm_i915_private *dev_priv = dev->dev_private;
12767
	struct drm_i915_private *dev_priv = dev->dev_private;
12841
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12768
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12842
	struct intel_crtc *intel_crtc;
12769
	struct intel_crtc *intel_crtc;
12843
	int plane;
12770
	int plane;
12844
 
12771
 
12845
	if (INTEL_INFO(dev)->gen < 9)
12772
	if (INTEL_INFO(dev)->gen < 9)
12846
		return;
12773
		return;
12847
 
12774
 
12848
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12775
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12849
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12776
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12850
 
12777
 
12851
	for_each_intel_crtc(dev, intel_crtc) {
12778
	for_each_intel_crtc(dev, intel_crtc) {
12852
		struct skl_ddb_entry *hw_entry, *sw_entry;
12779
		struct skl_ddb_entry *hw_entry, *sw_entry;
12853
		const enum pipe pipe = intel_crtc->pipe;
12780
		const enum pipe pipe = intel_crtc->pipe;
12854
 
12781
 
12855
		if (!intel_crtc->active)
12782
		if (!intel_crtc->active)
12856
			continue;
12783
			continue;
12857
 
12784
 
12858
		/* planes */
12785
		/* planes */
12859
		for_each_plane(dev_priv, pipe, plane) {
12786
		for_each_plane(dev_priv, pipe, plane) {
12860
			hw_entry = &hw_ddb.plane[pipe][plane];
12787
			hw_entry = &hw_ddb.plane[pipe][plane];
12861
			sw_entry = &sw_ddb->plane[pipe][plane];
12788
			sw_entry = &sw_ddb->plane[pipe][plane];
12862
 
12789
 
12863
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
12790
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
12864
				continue;
12791
				continue;
12865
 
12792
 
12866
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12793
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12867
				  "(expected (%u,%u), found (%u,%u))\n",
12794
				  "(expected (%u,%u), found (%u,%u))\n",
12868
				  pipe_name(pipe), plane + 1,
12795
				  pipe_name(pipe), plane + 1,
12869
				  sw_entry->start, sw_entry->end,
12796
				  sw_entry->start, sw_entry->end,
12870
				  hw_entry->start, hw_entry->end);
12797
				  hw_entry->start, hw_entry->end);
12871
		}
12798
		}
12872
 
12799
 
12873
		/* cursor */
12800
		/* cursor */
12874
		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12801
		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12875
		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12802
		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12876
 
12803
 
12877
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12804
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12878
			continue;
12805
			continue;
12879
 
12806
 
12880
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12807
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12881
			  "(expected (%u,%u), found (%u,%u))\n",
12808
			  "(expected (%u,%u), found (%u,%u))\n",
12882
			  pipe_name(pipe),
12809
			  pipe_name(pipe),
12883
			  sw_entry->start, sw_entry->end,
12810
			  sw_entry->start, sw_entry->end,
12884
			  hw_entry->start, hw_entry->end);
12811
			  hw_entry->start, hw_entry->end);
12885
	}
12812
	}
12886
}
12813
}
12887
 
12814
 
12888
static void
12815
static void
12889
check_connector_state(struct drm_device *dev,
12816
check_connector_state(struct drm_device *dev,
12890
		      struct drm_atomic_state *old_state)
12817
		      struct drm_atomic_state *old_state)
12891
{
12818
{
12892
	struct drm_connector_state *old_conn_state;
12819
	struct drm_connector_state *old_conn_state;
12893
	struct drm_connector *connector;
12820
	struct drm_connector *connector;
12894
	int i;
12821
	int i;
12895
 
12822
 
12896
	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12823
	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12897
		struct drm_encoder *encoder = connector->encoder;
12824
		struct drm_encoder *encoder = connector->encoder;
12898
		struct drm_connector_state *state = connector->state;
12825
		struct drm_connector_state *state = connector->state;
12899
 
12826
 
12900
		/* This also checks the encoder/connector hw state with the
12827
		/* This also checks the encoder/connector hw state with the
12901
		 * ->get_hw_state callbacks. */
12828
		 * ->get_hw_state callbacks. */
12902
		intel_connector_check_state(to_intel_connector(connector));
12829
		intel_connector_check_state(to_intel_connector(connector));
12903
 
12830
 
12904
		I915_STATE_WARN(state->best_encoder != encoder,
12831
		I915_STATE_WARN(state->best_encoder != encoder,
12905
		     "connector's atomic encoder doesn't match legacy encoder\n");
12832
		     "connector's atomic encoder doesn't match legacy encoder\n");
12906
	}
12833
	}
12907
}
12834
}
12908
 
12835
 
12909
static void
12836
static void
12910
check_encoder_state(struct drm_device *dev)
12837
check_encoder_state(struct drm_device *dev)
12911
{
12838
{
12912
	struct intel_encoder *encoder;
12839
	struct intel_encoder *encoder;
12913
	struct intel_connector *connector;
12840
	struct intel_connector *connector;
12914
 
12841
 
12915
	for_each_intel_encoder(dev, encoder) {
12842
	for_each_intel_encoder(dev, encoder) {
12916
		bool enabled = false;
12843
		bool enabled = false;
12917
		enum pipe pipe;
12844
		enum pipe pipe;
12918
 
12845
 
12919
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12846
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12920
			      encoder->base.base.id,
12847
			      encoder->base.base.id,
12921
			      encoder->base.name);
12848
			      encoder->base.name);
12922
 
12849
 
12923
		for_each_intel_connector(dev, connector) {
12850
		for_each_intel_connector(dev, connector) {
12924
			if (connector->base.state->best_encoder != &encoder->base)
12851
			if (connector->base.state->best_encoder != &encoder->base)
12925
				continue;
12852
				continue;
12926
			enabled = true;
12853
			enabled = true;
12927
 
12854
 
12928
			I915_STATE_WARN(connector->base.state->crtc !=
12855
			I915_STATE_WARN(connector->base.state->crtc !=
12929
					encoder->base.crtc,
12856
					encoder->base.crtc,
12930
			     "connector's crtc doesn't match encoder crtc\n");
12857
			     "connector's crtc doesn't match encoder crtc\n");
12931
		}
12858
		}
12932
 
12859
 
12933
		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12860
		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12934
		     "encoder's enabled state mismatch "
12861
		     "encoder's enabled state mismatch "
12935
		     "(expected %i, found %i)\n",
12862
		     "(expected %i, found %i)\n",
12936
		     !!encoder->base.crtc, enabled);
12863
		     !!encoder->base.crtc, enabled);
12937
 
12864
 
12938
		if (!encoder->base.crtc) {
12865
		if (!encoder->base.crtc) {
12939
			bool active;
12866
			bool active;
12940
 
12867
 
12941
			active = encoder->get_hw_state(encoder, &pipe);
12868
			active = encoder->get_hw_state(encoder, &pipe);
12942
			I915_STATE_WARN(active,
12869
			I915_STATE_WARN(active,
12943
			     "encoder detached but still enabled on pipe %c.\n",
12870
			     "encoder detached but still enabled on pipe %c.\n",
12944
			     pipe_name(pipe));
12871
			     pipe_name(pipe));
12945
		}
12872
		}
12946
	}
12873
	}
12947
}
12874
}
12948
 
12875
 
12949
static void
12876
static void
12950
check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12877
check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12951
{
12878
{
12952
	struct drm_i915_private *dev_priv = dev->dev_private;
12879
	struct drm_i915_private *dev_priv = dev->dev_private;
12953
	struct intel_encoder *encoder;
12880
	struct intel_encoder *encoder;
12954
	struct drm_crtc_state *old_crtc_state;
12881
	struct drm_crtc_state *old_crtc_state;
12955
	struct drm_crtc *crtc;
12882
	struct drm_crtc *crtc;
12956
	int i;
12883
	int i;
12957
 
12884
 
12958
	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12885
	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12959
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12886
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12960
		struct intel_crtc_state *pipe_config, *sw_config;
12887
		struct intel_crtc_state *pipe_config, *sw_config;
12961
		bool active;
12888
		bool active;
12962
 
12889
 
12963
		if (!needs_modeset(crtc->state) &&
12890
		if (!needs_modeset(crtc->state) &&
12964
		    !to_intel_crtc_state(crtc->state)->update_pipe)
12891
		    !to_intel_crtc_state(crtc->state)->update_pipe)
12965
			continue;
12892
			continue;
12966
 
12893
 
12967
		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12894
		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12968
		pipe_config = to_intel_crtc_state(old_crtc_state);
12895
		pipe_config = to_intel_crtc_state(old_crtc_state);
12969
		memset(pipe_config, 0, sizeof(*pipe_config));
12896
		memset(pipe_config, 0, sizeof(*pipe_config));
12970
		pipe_config->base.crtc = crtc;
12897
		pipe_config->base.crtc = crtc;
12971
		pipe_config->base.state = old_state;
12898
		pipe_config->base.state = old_state;
12972
 
12899
 
12973
		DRM_DEBUG_KMS("[CRTC:%d]\n",
12900
		DRM_DEBUG_KMS("[CRTC:%d]\n",
12974
			      crtc->base.id);
12901
			      crtc->base.id);
12975
 
12902
 
12976
		active = dev_priv->display.get_pipe_config(intel_crtc,
12903
		active = dev_priv->display.get_pipe_config(intel_crtc,
12977
							   pipe_config);
12904
							   pipe_config);
12978
 
12905
 
12979
		/* hw state is inconsistent with the pipe quirk */
12906
		/* hw state is inconsistent with the pipe quirk */
12980
		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12907
		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12981
		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12908
		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12982
			active = crtc->state->active;
12909
			active = crtc->state->active;
12983
 
12910
 
12984
		I915_STATE_WARN(crtc->state->active != active,
12911
		I915_STATE_WARN(crtc->state->active != active,
12985
		     "crtc active state doesn't match with hw state "
12912
		     "crtc active state doesn't match with hw state "
12986
		     "(expected %i, found %i)\n", crtc->state->active, active);
12913
		     "(expected %i, found %i)\n", crtc->state->active, active);
12987
 
12914
 
12988
		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12915
		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12989
		     "transitional active state does not match atomic hw state "
12916
		     "transitional active state does not match atomic hw state "
12990
		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12917
		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12991
 
12918
 
12992
		for_each_encoder_on_crtc(dev, crtc, encoder) {
12919
		for_each_encoder_on_crtc(dev, crtc, encoder) {
12993
			enum pipe pipe;
12920
			enum pipe pipe;
12994
 
12921
 
12995
			active = encoder->get_hw_state(encoder, &pipe);
12922
			active = encoder->get_hw_state(encoder, &pipe);
12996
			I915_STATE_WARN(active != crtc->state->active,
12923
			I915_STATE_WARN(active != crtc->state->active,
12997
				"[ENCODER:%i] active %i with crtc active %i\n",
12924
				"[ENCODER:%i] active %i with crtc active %i\n",
12998
				encoder->base.base.id, active, crtc->state->active);
12925
				encoder->base.base.id, active, crtc->state->active);
12999
 
12926
 
13000
			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12927
			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13001
					"Encoder connected to wrong pipe %c\n",
12928
					"Encoder connected to wrong pipe %c\n",
13002
					pipe_name(pipe));
12929
					pipe_name(pipe));
13003
 
12930
 
13004
			if (active)
12931
			if (active)
13005
				encoder->get_config(encoder, pipe_config);
12932
				encoder->get_config(encoder, pipe_config);
13006
		}
12933
		}
13007
 
12934
 
13008
		if (!crtc->state->active)
12935
		if (!crtc->state->active)
13009
			continue;
12936
			continue;
13010
 
12937
 
13011
		sw_config = to_intel_crtc_state(crtc->state);
12938
		sw_config = to_intel_crtc_state(crtc->state);
13012
		if (!intel_pipe_config_compare(dev, sw_config,
12939
		if (!intel_pipe_config_compare(dev, sw_config,
13013
					       pipe_config, false)) {
12940
					       pipe_config, false)) {
13014
			I915_STATE_WARN(1, "pipe state doesn't match!\n");
12941
			I915_STATE_WARN(1, "pipe state doesn't match!\n");
13015
			intel_dump_pipe_config(intel_crtc, pipe_config,
12942
			intel_dump_pipe_config(intel_crtc, pipe_config,
13016
					       "[hw state]");
12943
					       "[hw state]");
13017
			intel_dump_pipe_config(intel_crtc, sw_config,
12944
			intel_dump_pipe_config(intel_crtc, sw_config,
13018
					       "[sw state]");
12945
					       "[sw state]");
13019
		}
12946
		}
13020
	}
12947
	}
13021
}
12948
}
13022
 
12949
 
13023
static void
12950
static void
13024
check_shared_dpll_state(struct drm_device *dev)
12951
check_shared_dpll_state(struct drm_device *dev)
13025
{
12952
{
13026
	struct drm_i915_private *dev_priv = dev->dev_private;
12953
	struct drm_i915_private *dev_priv = dev->dev_private;
13027
	struct intel_crtc *crtc;
12954
	struct intel_crtc *crtc;
13028
	struct intel_dpll_hw_state dpll_hw_state;
12955
	struct intel_dpll_hw_state dpll_hw_state;
13029
	int i;
12956
	int i;
13030
 
12957
 
13031
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12958
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13032
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12959
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13033
		int enabled_crtcs = 0, active_crtcs = 0;
12960
		int enabled_crtcs = 0, active_crtcs = 0;
13034
		bool active;
12961
		bool active;
13035
 
12962
 
13036
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12963
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13037
 
12964
 
13038
		DRM_DEBUG_KMS("%s\n", pll->name);
12965
		DRM_DEBUG_KMS("%s\n", pll->name);
13039
 
12966
 
13040
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12967
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
13041
 
12968
 
13042
		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
12969
		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
13043
		     "more active pll users than references: %i vs %i\n",
12970
		     "more active pll users than references: %i vs %i\n",
13044
		     pll->active, hweight32(pll->config.crtc_mask));
12971
		     pll->active, hweight32(pll->config.crtc_mask));
13045
		I915_STATE_WARN(pll->active && !pll->on,
12972
		I915_STATE_WARN(pll->active && !pll->on,
13046
		     "pll in active use but not on in sw tracking\n");
12973
		     "pll in active use but not on in sw tracking\n");
13047
		I915_STATE_WARN(pll->on && !pll->active,
12974
		I915_STATE_WARN(pll->on && !pll->active,
13048
		     "pll in on but not on in use in sw tracking\n");
12975
		     "pll in on but not on in use in sw tracking\n");
13049
		I915_STATE_WARN(pll->on != active,
12976
		I915_STATE_WARN(pll->on != active,
13050
		     "pll on state mismatch (expected %i, found %i)\n",
12977
		     "pll on state mismatch (expected %i, found %i)\n",
13051
		     pll->on, active);
12978
		     pll->on, active);
13052
 
12979
 
13053
		for_each_intel_crtc(dev, crtc) {
12980
		for_each_intel_crtc(dev, crtc) {
13054
			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
12981
			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
13055
				enabled_crtcs++;
12982
				enabled_crtcs++;
13056
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12983
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
13057
				active_crtcs++;
12984
				active_crtcs++;
13058
		}
12985
		}
13059
		I915_STATE_WARN(pll->active != active_crtcs,
12986
		I915_STATE_WARN(pll->active != active_crtcs,
13060
		     "pll active crtcs mismatch (expected %i, found %i)\n",
12987
		     "pll active crtcs mismatch (expected %i, found %i)\n",
13061
		     pll->active, active_crtcs);
12988
		     pll->active, active_crtcs);
13062
		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
12989
		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
13063
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
12990
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
13064
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
12991
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
13065
 
12992
 
13066
		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
12993
		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
13067
				       sizeof(dpll_hw_state)),
12994
				       sizeof(dpll_hw_state)),
13068
		     "pll hw state mismatch\n");
12995
		     "pll hw state mismatch\n");
13069
	}
12996
	}
13070
}
12997
}
13071
 
12998
 
13072
static void
12999
static void
13073
intel_modeset_check_state(struct drm_device *dev,
13000
intel_modeset_check_state(struct drm_device *dev,
13074
			  struct drm_atomic_state *old_state)
13001
			  struct drm_atomic_state *old_state)
13075
{
13002
{
13076
	check_wm_state(dev);
13003
	check_wm_state(dev);
13077
	check_connector_state(dev, old_state);
13004
	check_connector_state(dev, old_state);
13078
	check_encoder_state(dev);
13005
	check_encoder_state(dev);
13079
	check_crtc_state(dev, old_state);
13006
	check_crtc_state(dev, old_state);
13080
	check_shared_dpll_state(dev);
13007
	check_shared_dpll_state(dev);
13081
}
13008
}
13082
 
13009
 
13083
void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
13010
void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
13084
				     int dotclock)
13011
				     int dotclock)
13085
{
13012
{
13086
	/*
13013
	/*
13087
	 * FDI already provided one idea for the dotclock.
13014
	 * FDI already provided one idea for the dotclock.
13088
	 * Yell if the encoder disagrees.
13015
	 * Yell if the encoder disagrees.
13089
	 */
13016
	 */
13090
	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
13017
	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
13091
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13018
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13092
	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
13019
	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
13093
}
13020
}
13094
 
13021
 
13095
static void update_scanline_offset(struct intel_crtc *crtc)
13022
static void update_scanline_offset(struct intel_crtc *crtc)
13096
{
13023
{
13097
	struct drm_device *dev = crtc->base.dev;
13024
	struct drm_device *dev = crtc->base.dev;
13098
 
13025
 
13099
	/*
13026
	/*
13100
	 * The scanline counter increments at the leading edge of hsync.
13027
	 * The scanline counter increments at the leading edge of hsync.
13101
	 *
13028
	 *
13102
	 * On most platforms it starts counting from vtotal-1 on the
13029
	 * On most platforms it starts counting from vtotal-1 on the
13103
	 * first active line. That means the scanline counter value is
13030
	 * first active line. That means the scanline counter value is
13104
	 * always one less than what we would expect. Ie. just after
13031
	 * always one less than what we would expect. Ie. just after
13105
	 * start of vblank, which also occurs at start of hsync (on the
13032
	 * start of vblank, which also occurs at start of hsync (on the
13106
	 * last active line), the scanline counter will read vblank_start-1.
13033
	 * last active line), the scanline counter will read vblank_start-1.
13107
	 *
13034
	 *
13108
	 * On gen2 the scanline counter starts counting from 1 instead
13035
	 * On gen2 the scanline counter starts counting from 1 instead
13109
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13036
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13110
	 * to keep the value positive), instead of adding one.
13037
	 * to keep the value positive), instead of adding one.
13111
	 *
13038
	 *
13112
	 * On HSW+ the behaviour of the scanline counter depends on the output
13039
	 * On HSW+ the behaviour of the scanline counter depends on the output
13113
	 * type. For DP ports it behaves like most other platforms, but on HDMI
13040
	 * type. For DP ports it behaves like most other platforms, but on HDMI
13114
	 * there's an extra 1 line difference. So we need to add two instead of
13041
	 * there's an extra 1 line difference. So we need to add two instead of
13115
	 * one to the value.
13042
	 * one to the value.
13116
	 */
13043
	 */
13117
	if (IS_GEN2(dev)) {
13044
	if (IS_GEN2(dev)) {
13118
		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13045
		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13119
		int vtotal;
13046
		int vtotal;
13120
 
13047
 
13121
		vtotal = adjusted_mode->crtc_vtotal;
13048
		vtotal = adjusted_mode->crtc_vtotal;
13122
		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13049
		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13123
			vtotal /= 2;
13050
			vtotal /= 2;
13124
 
13051
 
13125
		crtc->scanline_offset = vtotal - 1;
13052
		crtc->scanline_offset = vtotal - 1;
13126
	} else if (HAS_DDI(dev) &&
13053
	} else if (HAS_DDI(dev) &&
13127
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13054
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13128
		crtc->scanline_offset = 2;
13055
		crtc->scanline_offset = 2;
13129
	} else
13056
	} else
13130
		crtc->scanline_offset = 1;
13057
		crtc->scanline_offset = 1;
13131
}
13058
}
13132
 
13059
 
13133
static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13060
static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13134
{
13061
{
13135
	struct drm_device *dev = state->dev;
13062
	struct drm_device *dev = state->dev;
13136
	struct drm_i915_private *dev_priv = to_i915(dev);
13063
	struct drm_i915_private *dev_priv = to_i915(dev);
13137
	struct intel_shared_dpll_config *shared_dpll = NULL;
13064
	struct intel_shared_dpll_config *shared_dpll = NULL;
13138
	struct intel_crtc *intel_crtc;
-
 
13139
	struct intel_crtc_state *intel_crtc_state;
-
 
13140
	struct drm_crtc *crtc;
13065
	struct drm_crtc *crtc;
13141
	struct drm_crtc_state *crtc_state;
13066
	struct drm_crtc_state *crtc_state;
13142
	int i;
13067
	int i;
13143
 
13068
 
13144
	if (!dev_priv->display.crtc_compute_clock)
13069
	if (!dev_priv->display.crtc_compute_clock)
13145
		return;
13070
		return;
13146
 
13071
 
13147
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13072
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13148
		int dpll;
-
 
13149
 
-
 
13150
		intel_crtc = to_intel_crtc(crtc);
13073
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13151
		intel_crtc_state = to_intel_crtc_state(crtc_state);
13074
		int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
13152
		dpll = intel_crtc_state->shared_dpll;
-
 
13153
 
13075
 
13154
		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
13076
		if (!needs_modeset(crtc_state))
13155
			continue;
13077
			continue;
-
 
13078
 
-
 
13079
		to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
-
 
13080
 
13156
 
13081
		if (old_dpll == DPLL_ID_PRIVATE)
13157
		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
13082
			continue;
13158
 
13083
 
13159
		if (!shared_dpll)
13084
		if (!shared_dpll)
13160
			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13085
			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13161
 
13086
 
13162
		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
13087
		shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
13163
	}
13088
	}
13164
}
13089
}
13165
 
13090
 
13166
/*
13091
/*
13167
 * This implements the workaround described in the "notes" section of the mode
13092
 * This implements the workaround described in the "notes" section of the mode
13168
 * set sequence documentation. When going from no pipes or single pipe to
13093
 * set sequence documentation. When going from no pipes or single pipe to
13169
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13094
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13170
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13095
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13171
 */
13096
 */
13172
static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13097
static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13173
{
13098
{
13174
	struct drm_crtc_state *crtc_state;
13099
	struct drm_crtc_state *crtc_state;
13175
	struct intel_crtc *intel_crtc;
13100
	struct intel_crtc *intel_crtc;
13176
	struct drm_crtc *crtc;
13101
	struct drm_crtc *crtc;
13177
	struct intel_crtc_state *first_crtc_state = NULL;
13102
	struct intel_crtc_state *first_crtc_state = NULL;
13178
	struct intel_crtc_state *other_crtc_state = NULL;
13103
	struct intel_crtc_state *other_crtc_state = NULL;
13179
	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13104
	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13180
	int i;
13105
	int i;
13181
 
13106
 
13182
	/* look at all crtc's that are going to be enabled in during modeset */
13107
	/* look at all crtc's that are going to be enabled in during modeset */
13183
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13108
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13184
		intel_crtc = to_intel_crtc(crtc);
13109
		intel_crtc = to_intel_crtc(crtc);
13185
 
13110
 
13186
		if (!crtc_state->active || !needs_modeset(crtc_state))
13111
		if (!crtc_state->active || !needs_modeset(crtc_state))
13187
			continue;
13112
			continue;
13188
 
13113
 
13189
		if (first_crtc_state) {
13114
		if (first_crtc_state) {
13190
			other_crtc_state = to_intel_crtc_state(crtc_state);
13115
			other_crtc_state = to_intel_crtc_state(crtc_state);
13191
			break;
13116
			break;
13192
		} else {
13117
		} else {
13193
			first_crtc_state = to_intel_crtc_state(crtc_state);
13118
			first_crtc_state = to_intel_crtc_state(crtc_state);
13194
			first_pipe = intel_crtc->pipe;
13119
			first_pipe = intel_crtc->pipe;
13195
		}
13120
		}
13196
	}
13121
	}
13197
 
13122
 
13198
	/* No workaround needed? */
13123
	/* No workaround needed? */
13199
	if (!first_crtc_state)
13124
	if (!first_crtc_state)
13200
		return 0;
13125
		return 0;
13201
 
13126
 
13202
	/* w/a possibly needed, check how many crtc's are already enabled. */
13127
	/* w/a possibly needed, check how many crtc's are already enabled. */
13203
	for_each_intel_crtc(state->dev, intel_crtc) {
13128
	for_each_intel_crtc(state->dev, intel_crtc) {
13204
		struct intel_crtc_state *pipe_config;
13129
		struct intel_crtc_state *pipe_config;
13205
 
13130
 
13206
		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13131
		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13207
		if (IS_ERR(pipe_config))
13132
		if (IS_ERR(pipe_config))
13208
			return PTR_ERR(pipe_config);
13133
			return PTR_ERR(pipe_config);
13209
 
13134
 
13210
		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13135
		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13211
 
13136
 
13212
		if (!pipe_config->base.active ||
13137
		if (!pipe_config->base.active ||
13213
		    needs_modeset(&pipe_config->base))
13138
		    needs_modeset(&pipe_config->base))
13214
			continue;
13139
			continue;
13215
 
13140
 
13216
		/* 2 or more enabled crtcs means no need for w/a */
13141
		/* 2 or more enabled crtcs means no need for w/a */
13217
		if (enabled_pipe != INVALID_PIPE)
13142
		if (enabled_pipe != INVALID_PIPE)
13218
			return 0;
13143
			return 0;
13219
 
13144
 
13220
		enabled_pipe = intel_crtc->pipe;
13145
		enabled_pipe = intel_crtc->pipe;
13221
	}
13146
	}
13222
 
13147
 
13223
	if (enabled_pipe != INVALID_PIPE)
13148
	if (enabled_pipe != INVALID_PIPE)
13224
		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13149
		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13225
	else if (other_crtc_state)
13150
	else if (other_crtc_state)
13226
		other_crtc_state->hsw_workaround_pipe = first_pipe;
13151
		other_crtc_state->hsw_workaround_pipe = first_pipe;
13227
 
13152
 
13228
	return 0;
13153
	return 0;
13229
}
13154
}
13230
 
13155
 
13231
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13156
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13232
{
13157
{
13233
	struct drm_crtc *crtc;
13158
	struct drm_crtc *crtc;
13234
	struct drm_crtc_state *crtc_state;
13159
	struct drm_crtc_state *crtc_state;
13235
	int ret = 0;
13160
	int ret = 0;
13236
 
13161
 
13237
	/* add all active pipes to the state */
13162
	/* add all active pipes to the state */
13238
	for_each_crtc(state->dev, crtc) {
13163
	for_each_crtc(state->dev, crtc) {
13239
		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13164
		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13240
		if (IS_ERR(crtc_state))
13165
		if (IS_ERR(crtc_state))
13241
			return PTR_ERR(crtc_state);
13166
			return PTR_ERR(crtc_state);
13242
 
13167
 
13243
		if (!crtc_state->active || needs_modeset(crtc_state))
13168
		if (!crtc_state->active || needs_modeset(crtc_state))
13244
			continue;
13169
			continue;
13245
 
13170
 
13246
		crtc_state->mode_changed = true;
13171
		crtc_state->mode_changed = true;
13247
 
13172
 
13248
		ret = drm_atomic_add_affected_connectors(state, crtc);
13173
		ret = drm_atomic_add_affected_connectors(state, crtc);
13249
		if (ret)
13174
		if (ret)
13250
			break;
13175
			break;
13251
 
13176
 
13252
		ret = drm_atomic_add_affected_planes(state, crtc);
13177
		ret = drm_atomic_add_affected_planes(state, crtc);
13253
		if (ret)
13178
		if (ret)
13254
			break;
13179
			break;
13255
	}
13180
	}
13256
 
13181
 
13257
	return ret;
13182
	return ret;
13258
}
13183
}
13259
 
13184
 
13260
static int intel_modeset_checks(struct drm_atomic_state *state)
13185
static int intel_modeset_checks(struct drm_atomic_state *state)
13261
{
13186
{
13262
	struct drm_device *dev = state->dev;
13187
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13263
	struct drm_i915_private *dev_priv = dev->dev_private;
13188
	struct drm_i915_private *dev_priv = state->dev->dev_private;
-
 
13189
	struct drm_crtc *crtc;
-
 
13190
	struct drm_crtc_state *crtc_state;
13264
	int ret;
13191
	int ret = 0, i;
13265
 
13192
 
13266
	if (!check_digital_port_conflicts(state)) {
13193
	if (!check_digital_port_conflicts(state)) {
13267
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13194
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13268
		return -EINVAL;
13195
		return -EINVAL;
13269
	}
13196
	}
-
 
13197
 
-
 
13198
	intel_state->modeset = true;
-
 
13199
	intel_state->active_crtcs = dev_priv->active_crtcs;
-
 
13200
 
-
 
13201
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-
 
13202
		if (crtc_state->active)
-
 
13203
			intel_state->active_crtcs |= 1 << i;
-
 
13204
		else
-
 
13205
			intel_state->active_crtcs &= ~(1 << i);
-
 
13206
	}
13270
 
13207
 
13271
	/*
13208
	/*
13272
	 * See if the config requires any additional preparation, e.g.
13209
	 * See if the config requires any additional preparation, e.g.
13273
	 * to adjust global state with pipes off.  We need to do this
13210
	 * to adjust global state with pipes off.  We need to do this
13274
	 * here so we can get the modeset_pipe updated config for the new
13211
	 * here so we can get the modeset_pipe updated config for the new
13275
	 * mode set on this crtc.  For other crtcs we need to use the
13212
	 * mode set on this crtc.  For other crtcs we need to use the
13276
	 * adjusted_mode bits in the crtc directly.
13213
	 * adjusted_mode bits in the crtc directly.
13277
	 */
13214
	 */
13278
	if (dev_priv->display.modeset_calc_cdclk) {
13215
	if (dev_priv->display.modeset_calc_cdclk) {
13279
		unsigned int cdclk;
-
 
13280
 
-
 
13281
		ret = dev_priv->display.modeset_calc_cdclk(state);
13216
		ret = dev_priv->display.modeset_calc_cdclk(state);
13282
 
-
 
13283
		cdclk = to_intel_atomic_state(state)->cdclk;
13217
 
13284
		if (!ret && cdclk != dev_priv->cdclk_freq)
13218
		if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
13285
			ret = intel_modeset_all_pipes(state);
13219
			ret = intel_modeset_all_pipes(state);
13286
 
13220
 
13287
		if (ret < 0)
13221
		if (ret < 0)
13288
			return ret;
13222
			return ret;
-
 
13223
 
-
 
13224
		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
-
 
13225
			      intel_state->cdclk, intel_state->dev_cdclk);
13289
	} else
13226
	} else
13290
		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
13227
		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13291
 
13228
 
13292
	intel_modeset_clear_plls(state);
13229
	intel_modeset_clear_plls(state);
13293
 
13230
 
13294
	if (IS_HASWELL(dev))
13231
	if (IS_HASWELL(dev_priv))
13295
		return haswell_mode_set_planes_workaround(state);
13232
		return haswell_mode_set_planes_workaround(state);
13296
 
13233
 
13297
	return 0;
13234
	return 0;
13298
}
13235
}
13299
 
13236
 
13300
/*
13237
/*
13301
 * Handle calculation of various watermark data at the end of the atomic check
13238
 * Handle calculation of various watermark data at the end of the atomic check
13302
 * phase.  The code here should be run after the per-crtc and per-plane 'check'
13239
 * phase.  The code here should be run after the per-crtc and per-plane 'check'
13303
 * handlers to ensure that all derived state has been updated.
13240
 * handlers to ensure that all derived state has been updated.
13304
 */
13241
 */
13305
static void calc_watermark_data(struct drm_atomic_state *state)
13242
static void calc_watermark_data(struct drm_atomic_state *state)
13306
{
13243
{
13307
	struct drm_device *dev = state->dev;
13244
	struct drm_device *dev = state->dev;
13308
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13245
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13309
	struct drm_crtc *crtc;
13246
	struct drm_crtc *crtc;
13310
	struct drm_crtc_state *cstate;
13247
	struct drm_crtc_state *cstate;
13311
	struct drm_plane *plane;
13248
	struct drm_plane *plane;
13312
	struct drm_plane_state *pstate;
13249
	struct drm_plane_state *pstate;
13313
 
13250
 
13314
	/*
13251
	/*
13315
	 * Calculate watermark configuration details now that derived
13252
	 * Calculate watermark configuration details now that derived
13316
	 * plane/crtc state is all properly updated.
13253
	 * plane/crtc state is all properly updated.
13317
	 */
13254
	 */
13318
	drm_for_each_crtc(crtc, dev) {
13255
	drm_for_each_crtc(crtc, dev) {
13319
		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13256
		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13320
			crtc->state;
13257
			crtc->state;
13321
 
13258
 
13322
		if (cstate->active)
13259
		if (cstate->active)
13323
			intel_state->wm_config.num_pipes_active++;
13260
			intel_state->wm_config.num_pipes_active++;
13324
	}
13261
	}
13325
	drm_for_each_legacy_plane(plane, dev) {
13262
	drm_for_each_legacy_plane(plane, dev) {
13326
		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13263
		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13327
			plane->state;
13264
			plane->state;
13328
 
13265
 
13329
		if (!to_intel_plane_state(pstate)->visible)
13266
		if (!to_intel_plane_state(pstate)->visible)
13330
			continue;
13267
			continue;
13331
 
13268
 
13332
		intel_state->wm_config.sprites_enabled = true;
13269
		intel_state->wm_config.sprites_enabled = true;
13333
		if (pstate->crtc_w != pstate->src_w >> 16 ||
13270
		if (pstate->crtc_w != pstate->src_w >> 16 ||
13334
		    pstate->crtc_h != pstate->src_h >> 16)
13271
		    pstate->crtc_h != pstate->src_h >> 16)
13335
			intel_state->wm_config.sprites_scaled = true;
13272
			intel_state->wm_config.sprites_scaled = true;
13336
	}
13273
	}
13337
}
13274
}
13338
 
13275
 
13339
/**
13276
/**
13340
 * intel_atomic_check - validate state object
13277
 * intel_atomic_check - validate state object
13341
 * @dev: drm device
13278
 * @dev: drm device
13342
 * @state: state to validate
13279
 * @state: state to validate
13343
 */
13280
 */
13344
static int intel_atomic_check(struct drm_device *dev,
13281
static int intel_atomic_check(struct drm_device *dev,
13345
			      struct drm_atomic_state *state)
13282
			      struct drm_atomic_state *state)
13346
{
13283
{
-
 
13284
	struct drm_i915_private *dev_priv = to_i915(dev);
13347
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13285
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13348
	struct drm_crtc *crtc;
13286
	struct drm_crtc *crtc;
13349
	struct drm_crtc_state *crtc_state;
13287
	struct drm_crtc_state *crtc_state;
13350
	int ret, i;
13288
	int ret, i;
13351
	bool any_ms = false;
13289
	bool any_ms = false;
13352
 
13290
 
13353
	ret = drm_atomic_helper_check_modeset(dev, state);
13291
	ret = drm_atomic_helper_check_modeset(dev, state);
13354
	if (ret)
13292
	if (ret)
13355
		return ret;
13293
		return ret;
13356
 
13294
 
13357
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13295
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13358
		struct intel_crtc_state *pipe_config =
13296
		struct intel_crtc_state *pipe_config =
13359
			to_intel_crtc_state(crtc_state);
13297
			to_intel_crtc_state(crtc_state);
13360
 
13298
 
13361
		memset(&to_intel_crtc(crtc)->atomic, 0,
13299
		memset(&to_intel_crtc(crtc)->atomic, 0,
13362
		       sizeof(struct intel_crtc_atomic_commit));
13300
		       sizeof(struct intel_crtc_atomic_commit));
13363
 
13301
 
13364
		/* Catch I915_MODE_FLAG_INHERITED */
13302
		/* Catch I915_MODE_FLAG_INHERITED */
13365
		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13303
		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13366
			crtc_state->mode_changed = true;
13304
			crtc_state->mode_changed = true;
13367
 
13305
 
13368
		if (!crtc_state->enable) {
13306
		if (!crtc_state->enable) {
13369
			if (needs_modeset(crtc_state))
13307
			if (needs_modeset(crtc_state))
13370
				any_ms = true;
13308
				any_ms = true;
13371
			continue;
13309
			continue;
13372
		}
13310
		}
13373
 
13311
 
13374
		if (!needs_modeset(crtc_state))
13312
		if (!needs_modeset(crtc_state))
13375
			continue;
13313
			continue;
13376
 
13314
 
13377
		/* FIXME: For only active_changed we shouldn't need to do any
13315
		/* FIXME: For only active_changed we shouldn't need to do any
13378
		 * state recomputation at all. */
13316
		 * state recomputation at all. */
13379
 
13317
 
13380
		ret = drm_atomic_add_affected_connectors(state, crtc);
13318
		ret = drm_atomic_add_affected_connectors(state, crtc);
13381
		if (ret)
13319
		if (ret)
13382
			return ret;
13320
			return ret;
13383
 
13321
 
13384
		ret = intel_modeset_pipe_config(crtc, pipe_config);
13322
		ret = intel_modeset_pipe_config(crtc, pipe_config);
13385
		if (ret)
13323
		if (ret)
13386
			return ret;
13324
			return ret;
13387
 
13325
 
13388
		if (i915.fastboot &&
13326
		if (i915.fastboot &&
13389
		    intel_pipe_config_compare(state->dev,
13327
		    intel_pipe_config_compare(dev,
13390
					to_intel_crtc_state(crtc->state),
13328
					to_intel_crtc_state(crtc->state),
13391
					pipe_config, true)) {
13329
					pipe_config, true)) {
13392
			crtc_state->mode_changed = false;
13330
			crtc_state->mode_changed = false;
13393
			to_intel_crtc_state(crtc_state)->update_pipe = true;
13331
			to_intel_crtc_state(crtc_state)->update_pipe = true;
13394
		}
13332
		}
13395
 
13333
 
13396
		if (needs_modeset(crtc_state)) {
13334
		if (needs_modeset(crtc_state)) {
13397
			any_ms = true;
13335
			any_ms = true;
13398
 
13336
 
13399
			ret = drm_atomic_add_affected_planes(state, crtc);
13337
			ret = drm_atomic_add_affected_planes(state, crtc);
13400
			if (ret)
13338
			if (ret)
13401
				return ret;
13339
				return ret;
13402
		}
13340
		}
13403
 
13341
 
13404
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13342
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13405
				       needs_modeset(crtc_state) ?
13343
				       needs_modeset(crtc_state) ?
13406
				       "[modeset]" : "[fastset]");
13344
				       "[modeset]" : "[fastset]");
13407
	}
13345
	}
13408
 
13346
 
13409
	if (any_ms) {
13347
	if (any_ms) {
13410
		ret = intel_modeset_checks(state);
13348
		ret = intel_modeset_checks(state);
13411
 
13349
 
13412
		if (ret)
13350
		if (ret)
13413
			return ret;
13351
			return ret;
13414
	} else
13352
	} else
13415
		intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
13353
		intel_state->cdclk = dev_priv->cdclk_freq;
13416
 
13354
 
13417
	ret = drm_atomic_helper_check_planes(state->dev, state);
13355
	ret = drm_atomic_helper_check_planes(dev, state);
13418
	if (ret)
13356
	if (ret)
13419
		return ret;
13357
		return ret;
-
 
13358
 
13420
 
13359
	intel_fbc_choose_crtc(dev_priv, state);
13421
	calc_watermark_data(state);
13360
	calc_watermark_data(state);
13422
 
13361
 
13423
	return 0;
13362
	return 0;
13424
}
13363
}
13425
 
13364
 
13426
static int intel_atomic_prepare_commit(struct drm_device *dev,
13365
static int intel_atomic_prepare_commit(struct drm_device *dev,
13427
				       struct drm_atomic_state *state,
13366
				       struct drm_atomic_state *state,
13428
				       bool async)
13367
				       bool async)
13429
{
13368
{
13430
	struct drm_i915_private *dev_priv = dev->dev_private;
13369
	struct drm_i915_private *dev_priv = dev->dev_private;
13431
	struct drm_plane_state *plane_state;
13370
	struct drm_plane_state *plane_state;
13432
	struct drm_crtc_state *crtc_state;
13371
	struct drm_crtc_state *crtc_state;
13433
	struct drm_plane *plane;
13372
	struct drm_plane *plane;
13434
	struct drm_crtc *crtc;
13373
	struct drm_crtc *crtc;
13435
	int i, ret;
13374
	int i, ret;
13436
 
13375
 
13437
	if (async) {
13376
	if (async) {
13438
		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13377
		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13439
		return -EINVAL;
13378
		return -EINVAL;
13440
	}
13379
	}
13441
 
13380
 
13442
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13381
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13443
		if (state->legacy_cursor_update)
13382
		if (state->legacy_cursor_update)
13444
			continue;
13383
			continue;
13445
 
13384
 
13446
		ret = intel_crtc_wait_for_pending_flips(crtc);
13385
		ret = intel_crtc_wait_for_pending_flips(crtc);
13447
		if (ret)
13386
		if (ret)
13448
			return ret;
13387
			return ret;
13449
 
13388
 
13450
//		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13389
//		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13451
//			flush_workqueue(dev_priv->wq);
13390
//			flush_workqueue(dev_priv->wq);
13452
	}
13391
	}
13453
 
13392
 
13454
	ret = mutex_lock_interruptible(&dev->struct_mutex);
13393
	ret = mutex_lock_interruptible(&dev->struct_mutex);
13455
	if (ret)
13394
	if (ret)
13456
		return ret;
13395
		return ret;
13457
 
13396
 
13458
	ret = drm_atomic_helper_prepare_planes(dev, state);
13397
	ret = drm_atomic_helper_prepare_planes(dev, state);
13459
	if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13398
	if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13460
		u32 reset_counter;
13399
		u32 reset_counter;
13461
 
13400
 
13462
		reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13401
		reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13463
		mutex_unlock(&dev->struct_mutex);
13402
		mutex_unlock(&dev->struct_mutex);
13464
 
13403
 
13465
		for_each_plane_in_state(state, plane, plane_state, i) {
13404
		for_each_plane_in_state(state, plane, plane_state, i) {
13466
			struct intel_plane_state *intel_plane_state =
13405
			struct intel_plane_state *intel_plane_state =
13467
				to_intel_plane_state(plane_state);
13406
				to_intel_plane_state(plane_state);
13468
 
13407
 
13469
			if (!intel_plane_state->wait_req)
13408
			if (!intel_plane_state->wait_req)
13470
				continue;
13409
				continue;
13471
 
13410
 
13472
			ret = __i915_wait_request(intel_plane_state->wait_req,
13411
			ret = __i915_wait_request(intel_plane_state->wait_req,
13473
						  reset_counter, true,
13412
						  reset_counter, true,
13474
						  NULL, NULL);
13413
						  NULL, NULL);
13475
 
13414
 
13476
			/* Swallow -EIO errors to allow updates during hw lockup. */
13415
			/* Swallow -EIO errors to allow updates during hw lockup. */
13477
			if (ret == -EIO)
13416
			if (ret == -EIO)
13478
				ret = 0;
13417
				ret = 0;
13479
 
13418
 
13480
			if (ret)
13419
			if (ret)
13481
				break;
13420
				break;
13482
		}
13421
		}
13483
 
13422
 
13484
		if (!ret)
13423
		if (!ret)
13485
			return 0;
13424
			return 0;
13486
 
13425
 
13487
		mutex_lock(&dev->struct_mutex);
13426
		mutex_lock(&dev->struct_mutex);
13488
		drm_atomic_helper_cleanup_planes(dev, state);
13427
		drm_atomic_helper_cleanup_planes(dev, state);
13489
	}
13428
	}
13490
 
13429
 
13491
	mutex_unlock(&dev->struct_mutex);
13430
	mutex_unlock(&dev->struct_mutex);
13492
	return ret;
13431
	return ret;
13493
}
13432
}
-
 
13433
 
-
 
13434
static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
-
 
13435
					  struct drm_i915_private *dev_priv,
-
 
13436
					  unsigned crtc_mask)
-
 
13437
{
-
 
13438
	unsigned last_vblank_count[I915_MAX_PIPES];
-
 
13439
	enum pipe pipe;
-
 
13440
	int ret;
-
 
13441
 
-
 
13442
	if (!crtc_mask)
-
 
13443
		return;
-
 
13444
 
-
 
13445
	for_each_pipe(dev_priv, pipe) {
-
 
13446
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
 
13447
 
-
 
13448
		if (!((1 << pipe) & crtc_mask))
-
 
13449
			continue;
-
 
13450
 
-
 
13451
		ret = drm_crtc_vblank_get(crtc);
-
 
13452
		if (WARN_ON(ret != 0)) {
-
 
13453
			crtc_mask &= ~(1 << pipe);
-
 
13454
			continue;
-
 
13455
		}
-
 
13456
 
-
 
13457
		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
-
 
13458
	}
-
 
13459
 
-
 
13460
	for_each_pipe(dev_priv, pipe) {
-
 
13461
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
 
13462
		long lret;
-
 
13463
 
-
 
13464
		if (!((1 << pipe) & crtc_mask))
-
 
13465
			continue;
-
 
13466
 
-
 
13467
		lret = wait_event_timeout(dev->vblank[pipe].queue,
-
 
13468
				last_vblank_count[pipe] !=
-
 
13469
					drm_crtc_vblank_count(crtc),
-
 
13470
				msecs_to_jiffies(50));
-
 
13471
 
-
 
13472
		WARN_ON(!lret);
-
 
13473
 
-
 
13474
		drm_crtc_vblank_put(crtc);
-
 
13475
	}
-
 
13476
}
-
 
13477
 
-
 
13478
static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
-
 
13479
{
-
 
13480
	/* fb updated, need to unpin old fb */
-
 
13481
	if (crtc_state->fb_changed)
-
 
13482
		return true;
-
 
13483
 
-
 
13484
	/* wm changes, need vblank before final wm's */
-
 
13485
	if (crtc_state->update_wm_post)
-
 
13486
		return true;
-
 
13487
 
-
 
13488
	/*
-
 
13489
	 * cxsr is re-enabled after vblank.
-
 
13490
	 * This is already handled by crtc_state->update_wm_post,
-
 
13491
	 * but added for clarity.
-
 
13492
	 */
-
 
13493
	if (crtc_state->disable_cxsr)
-
 
13494
		return true;
-
 
13495
 
-
 
13496
	return false;
-
 
13497
}
13494
 
13498
 
13495
/**
13499
/**
13496
 * intel_atomic_commit - commit validated state object
13500
 * intel_atomic_commit - commit validated state object
13497
 * @dev: DRM device
13501
 * @dev: DRM device
13498
 * @state: the top-level driver state object
13502
 * @state: the top-level driver state object
13499
 * @async: asynchronous commit
13503
 * @async: asynchronous commit
13500
 *
13504
 *
13501
 * This function commits a top-level state object that has been validated
13505
 * This function commits a top-level state object that has been validated
13502
 * with drm_atomic_helper_check().
13506
 * with drm_atomic_helper_check().
13503
 *
13507
 *
13504
 * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13508
 * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13505
 * we can only handle plane-related operations and do not yet support
13509
 * we can only handle plane-related operations and do not yet support
13506
 * asynchronous commit.
13510
 * asynchronous commit.
13507
 *
13511
 *
13508
 * RETURNS
13512
 * RETURNS
13509
 * Zero for success or -errno.
13513
 * Zero for success or -errno.
13510
 */
13514
 */
13511
static int intel_atomic_commit(struct drm_device *dev,
13515
static int intel_atomic_commit(struct drm_device *dev,
13512
			       struct drm_atomic_state *state,
13516
			       struct drm_atomic_state *state,
13513
			       bool async)
13517
			       bool async)
13514
{
13518
{
-
 
13519
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13515
	struct drm_i915_private *dev_priv = dev->dev_private;
13520
	struct drm_i915_private *dev_priv = dev->dev_private;
13516
	struct drm_crtc_state *crtc_state;
13521
	struct drm_crtc_state *crtc_state;
13517
	struct drm_crtc *crtc;
13522
	struct drm_crtc *crtc;
13518
	int ret = 0;
13523
	int ret = 0, i;
13519
	int i;
13524
	bool hw_check = intel_state->modeset;
-
 
13525
	unsigned long put_domains[I915_MAX_PIPES] = {};
13520
	bool any_ms = false;
13526
	unsigned crtc_vblank_mask = 0;
13521
 
13527
 
13522
	ret = intel_atomic_prepare_commit(dev, state, async);
13528
	ret = intel_atomic_prepare_commit(dev, state, async);
13523
	if (ret) {
13529
	if (ret) {
13524
		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13530
		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13525
		return ret;
13531
		return ret;
13526
	}
13532
	}
13527
 
13533
 
13528
	drm_atomic_helper_swap_state(dev, state);
13534
	drm_atomic_helper_swap_state(dev, state);
13529
	dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
13535
	dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
-
 
13536
 
-
 
13537
	if (intel_state->modeset) {
-
 
13538
		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
-
 
13539
		       sizeof(intel_state->min_pixclk));
-
 
13540
		dev_priv->active_crtcs = intel_state->active_crtcs;
-
 
13541
		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
 
13542
 
-
 
13543
		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
-
 
13544
	}
13530
 
13545
 
13531
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13546
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13532
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13547
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
13548
 
-
 
13549
		if (needs_modeset(crtc->state) ||
-
 
13550
		    to_intel_crtc_state(crtc->state)->update_pipe) {
-
 
13551
			hw_check = true;
-
 
13552
 
-
 
13553
			put_domains[to_intel_crtc(crtc)->pipe] =
-
 
13554
				modeset_get_crtc_power_domains(crtc,
-
 
13555
					to_intel_crtc_state(crtc->state));
-
 
13556
		}
13533
 
13557
 
13534
		if (!needs_modeset(crtc->state))
13558
		if (!needs_modeset(crtc->state))
13535
			continue;
13559
			continue;
13536
 
-
 
13537
		any_ms = true;
13560
 
13538
		intel_pre_plane_update(intel_crtc);
13561
		intel_pre_plane_update(to_intel_crtc_state(crtc_state));
13539
 
13562
 
13540
		if (crtc_state->active) {
13563
		if (crtc_state->active) {
13541
			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13564
			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13542
			dev_priv->display.crtc_disable(crtc);
13565
			dev_priv->display.crtc_disable(crtc);
13543
			intel_crtc->active = false;
13566
			intel_crtc->active = false;
-
 
13567
			intel_fbc_disable(intel_crtc);
13544
			intel_disable_shared_dpll(intel_crtc);
13568
			intel_disable_shared_dpll(intel_crtc);
13545
 
13569
 
13546
			/*
13570
			/*
13547
			 * Underruns don't always raise
13571
			 * Underruns don't always raise
13548
			 * interrupts, so check manually.
13572
			 * interrupts, so check manually.
13549
			 */
13573
			 */
13550
			intel_check_cpu_fifo_underruns(dev_priv);
13574
			intel_check_cpu_fifo_underruns(dev_priv);
13551
			intel_check_pch_fifo_underruns(dev_priv);
13575
			intel_check_pch_fifo_underruns(dev_priv);
13552
 
13576
 
13553
			if (!crtc->state->active)
13577
			if (!crtc->state->active)
13554
				intel_update_watermarks(crtc);
13578
				intel_update_watermarks(crtc);
13555
		}
13579
		}
13556
	}
13580
	}
13557
 
13581
 
13558
	/* Only after disabling all output pipelines that will be changed can we
13582
	/* Only after disabling all output pipelines that will be changed can we
13559
	 * update the the output configuration. */
13583
	 * update the the output configuration. */
13560
	intel_modeset_update_crtc_state(state);
13584
	intel_modeset_update_crtc_state(state);
13561
 
13585
 
13562
	if (any_ms) {
13586
	if (intel_state->modeset) {
13563
		intel_shared_dpll_commit(state);
13587
		intel_shared_dpll_commit(state);
13564
 
13588
 
13565
		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13589
		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
-
 
13590
 
-
 
13591
		if (dev_priv->display.modeset_commit_cdclk &&
-
 
13592
		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
13566
		modeset_update_crtc_power_domains(state);
13593
			dev_priv->display.modeset_commit_cdclk(state);
13567
	}
13594
	}
13568
 
13595
 
13569
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13596
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13570
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13597
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13571
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13598
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13572
		bool modeset = needs_modeset(crtc->state);
13599
		bool modeset = needs_modeset(crtc->state);
13573
		bool update_pipe = !modeset &&
13600
		struct intel_crtc_state *pipe_config =
13574
			to_intel_crtc_state(crtc->state)->update_pipe;
13601
			to_intel_crtc_state(crtc->state);
13575
		unsigned long put_domains = 0;
-
 
13576
 
-
 
13577
		if (modeset)
-
 
13578
			intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13602
		bool update_pipe = !modeset && pipe_config->update_pipe;
13579
 
13603
 
13580
		if (modeset && crtc->state->active) {
13604
		if (modeset && crtc->state->active) {
13581
			update_scanline_offset(to_intel_crtc(crtc));
13605
			update_scanline_offset(to_intel_crtc(crtc));
13582
			dev_priv->display.crtc_enable(crtc);
13606
			dev_priv->display.crtc_enable(crtc);
13583
		}
13607
		}
13584
 
-
 
13585
		if (update_pipe) {
-
 
13586
			put_domains = modeset_get_crtc_power_domains(crtc);
-
 
13587
 
-
 
13588
			/* make sure intel_modeset_check_state runs */
-
 
13589
			any_ms = true;
-
 
13590
		}
-
 
13591
 
13608
 
-
 
13609
		if (!modeset)
-
 
13610
			intel_pre_plane_update(to_intel_crtc_state(crtc_state));
-
 
13611
 
13592
		if (!modeset)
13612
		if (crtc->state->active && intel_crtc->atomic.update_fbc)
13593
			intel_pre_plane_update(intel_crtc);
13613
			intel_fbc_enable(intel_crtc);
13594
 
13614
 
13595
		if (crtc->state->active &&
13615
		if (crtc->state->active &&
13596
		    (crtc->state->planes_changed || update_pipe))
13616
		    (crtc->state->planes_changed || update_pipe))
13597
		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13617
			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13598
 
-
 
13599
		if (put_domains)
13618
 
13600
			modeset_put_power_domains(dev_priv, put_domains);
-
 
13601
 
13619
		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13602
		intel_post_plane_update(intel_crtc);
-
 
13603
 
-
 
13604
		if (modeset)
-
 
13605
			intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13620
			crtc_vblank_mask |= 1 << i;
13606
	}
13621
	}
13607
 
13622
 
13608
	/* FIXME: add subpixel order */
13623
	/* FIXME: add subpixel order */
-
 
13624
 
13609
 
13625
	if (!state->legacy_cursor_update)
-
 
13626
		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
-
 
13627
 
-
 
13628
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-
 
13629
		intel_post_plane_update(to_intel_crtc(crtc));
-
 
13630
 
-
 
13631
		if (put_domains[i])
-
 
13632
			modeset_put_power_domains(dev_priv, put_domains[i]);
-
 
13633
	}
-
 
13634
 
-
 
13635
	if (intel_state->modeset)
13610
	drm_atomic_helper_wait_for_vblanks(dev, state);
13636
		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13611
 
13637
 
13612
	mutex_lock(&dev->struct_mutex);
13638
	mutex_lock(&dev->struct_mutex);
13613
	drm_atomic_helper_cleanup_planes(dev, state);
13639
	drm_atomic_helper_cleanup_planes(dev, state);
13614
	mutex_unlock(&dev->struct_mutex);
13640
	mutex_unlock(&dev->struct_mutex);
13615
 
13641
 
13616
	if (any_ms)
13642
	if (hw_check)
13617
		intel_modeset_check_state(dev, state);
13643
		intel_modeset_check_state(dev, state);
13618
 
13644
 
13619
	drm_atomic_state_free(state);
13645
	drm_atomic_state_free(state);
-
 
13646
 
-
 
13647
	/* As one of the primary mmio accessors, KMS has a high likelihood
-
 
13648
	 * of triggering bugs in unclaimed access. After we finish
-
 
13649
	 * modesetting, see if an error has been flagged, and if so
-
 
13650
	 * enable debugging for the next modeset - and hope we catch
-
 
13651
	 * the culprit.
-
 
13652
	 *
-
 
13653
	 * XXX note that we assume display power is on at this point.
-
 
13654
	 * This might hold true now but we need to add pm helper to check
-
 
13655
	 * unclaimed only when the hardware is on, as atomic commits
-
 
13656
	 * can happen also when the device is completely off.
-
 
13657
	 */
-
 
13658
	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13620
 
13659
 
13621
	return 0;
13660
	return 0;
13622
}
13661
}
13623
 
13662
 
13624
void intel_crtc_restore_mode(struct drm_crtc *crtc)
13663
void intel_crtc_restore_mode(struct drm_crtc *crtc)
13625
{
13664
{
13626
	struct drm_device *dev = crtc->dev;
13665
	struct drm_device *dev = crtc->dev;
13627
	struct drm_atomic_state *state;
13666
	struct drm_atomic_state *state;
13628
	struct drm_crtc_state *crtc_state;
13667
	struct drm_crtc_state *crtc_state;
13629
	int ret;
13668
	int ret;
13630
 
13669
 
13631
	state = drm_atomic_state_alloc(dev);
13670
	state = drm_atomic_state_alloc(dev);
13632
	if (!state) {
13671
	if (!state) {
13633
		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13672
		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13634
			      crtc->base.id);
13673
			      crtc->base.id);
13635
		return;
13674
		return;
13636
	}
13675
	}
13637
 
13676
 
13638
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13677
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13639
 
13678
 
13640
retry:
13679
retry:
13641
	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13680
	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13642
	ret = PTR_ERR_OR_ZERO(crtc_state);
13681
	ret = PTR_ERR_OR_ZERO(crtc_state);
13643
	if (!ret) {
13682
	if (!ret) {
13644
		if (!crtc_state->active)
13683
		if (!crtc_state->active)
13645
			goto out;
13684
			goto out;
13646
 
13685
 
13647
		crtc_state->mode_changed = true;
13686
		crtc_state->mode_changed = true;
13648
		ret = drm_atomic_commit(state);
13687
		ret = drm_atomic_commit(state);
13649
	}
13688
	}
13650
 
13689
 
13651
	if (ret == -EDEADLK) {
13690
	if (ret == -EDEADLK) {
13652
		drm_atomic_state_clear(state);
13691
		drm_atomic_state_clear(state);
13653
		drm_modeset_backoff(state->acquire_ctx);
13692
		drm_modeset_backoff(state->acquire_ctx);
13654
		goto retry;
13693
		goto retry;
13655
	}
13694
	}
13656
 
13695
 
13657
	if (ret)
13696
	if (ret)
13658
out:
13697
out:
13659
		drm_atomic_state_free(state);
13698
		drm_atomic_state_free(state);
13660
}
13699
}
13661
 
13700
 
13662
#undef for_each_intel_crtc_masked
13701
#undef for_each_intel_crtc_masked
13663
 
13702
 
13664
static const struct drm_crtc_funcs intel_crtc_funcs = {
13703
static const struct drm_crtc_funcs intel_crtc_funcs = {
13665
	.gamma_set = intel_crtc_gamma_set,
13704
	.gamma_set = intel_crtc_gamma_set,
13666
	.set_config = drm_atomic_helper_set_config,
13705
	.set_config = drm_atomic_helper_set_config,
13667
	.destroy = intel_crtc_destroy,
13706
	.destroy = intel_crtc_destroy,
13668
	.page_flip = intel_crtc_page_flip,
13707
	.page_flip = intel_crtc_page_flip,
13669
	.atomic_duplicate_state = intel_crtc_duplicate_state,
13708
	.atomic_duplicate_state = intel_crtc_duplicate_state,
13670
	.atomic_destroy_state = intel_crtc_destroy_state,
13709
	.atomic_destroy_state = intel_crtc_destroy_state,
13671
};
13710
};
13672
 
13711
 
13673
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13712
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13674
				      struct intel_shared_dpll *pll,
13713
				      struct intel_shared_dpll *pll,
13675
				      struct intel_dpll_hw_state *hw_state)
13714
				      struct intel_dpll_hw_state *hw_state)
13676
{
13715
{
13677
	uint32_t val;
13716
	uint32_t val;
13678
 
13717
 
13679
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13718
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13680
		return false;
13719
		return false;
13681
 
13720
 
13682
	val = I915_READ(PCH_DPLL(pll->id));
13721
	val = I915_READ(PCH_DPLL(pll->id));
13683
	hw_state->dpll = val;
13722
	hw_state->dpll = val;
13684
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13723
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13685
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13724
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13686
 
13725
 
13687
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13726
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13688
 
13727
 
13689
	return val & DPLL_VCO_ENABLE;
13728
	return val & DPLL_VCO_ENABLE;
13690
}
13729
}
13691
 
13730
 
13692
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13731
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13693
				  struct intel_shared_dpll *pll)
13732
				  struct intel_shared_dpll *pll)
13694
{
13733
{
13695
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13734
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13696
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
13735
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
13697
}
13736
}
13698
 
13737
 
13699
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13738
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13700
				struct intel_shared_dpll *pll)
13739
				struct intel_shared_dpll *pll)
13701
{
13740
{
13702
	/* PCH refclock must be enabled first */
13741
	/* PCH refclock must be enabled first */
13703
	ibx_assert_pch_refclk_enabled(dev_priv);
13742
	ibx_assert_pch_refclk_enabled(dev_priv);
13704
 
13743
 
13705
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13744
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13706
 
13745
 
13707
	/* Wait for the clocks to stabilize. */
13746
	/* Wait for the clocks to stabilize. */
13708
	POSTING_READ(PCH_DPLL(pll->id));
13747
	POSTING_READ(PCH_DPLL(pll->id));
13709
	udelay(150);
13748
	udelay(150);
13710
 
13749
 
13711
	/* The pixel multiplier can only be updated once the
13750
	/* The pixel multiplier can only be updated once the
13712
	 * DPLL is enabled and the clocks are stable.
13751
	 * DPLL is enabled and the clocks are stable.
13713
	 *
13752
	 *
13714
	 * So write it again.
13753
	 * So write it again.
13715
	 */
13754
	 */
13716
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13755
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13717
	POSTING_READ(PCH_DPLL(pll->id));
13756
	POSTING_READ(PCH_DPLL(pll->id));
13718
	udelay(200);
13757
	udelay(200);
13719
}
13758
}
13720
 
13759
 
13721
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13760
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13722
				 struct intel_shared_dpll *pll)
13761
				 struct intel_shared_dpll *pll)
13723
{
13762
{
13724
	struct drm_device *dev = dev_priv->dev;
13763
	struct drm_device *dev = dev_priv->dev;
13725
	struct intel_crtc *crtc;
13764
	struct intel_crtc *crtc;
13726
 
13765
 
13727
	/* Make sure no transcoder isn't still depending on us. */
13766
	/* Make sure no transcoder isn't still depending on us. */
13728
	for_each_intel_crtc(dev, crtc) {
13767
	for_each_intel_crtc(dev, crtc) {
13729
		if (intel_crtc_to_shared_dpll(crtc) == pll)
13768
		if (intel_crtc_to_shared_dpll(crtc) == pll)
13730
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
13769
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
13731
	}
13770
	}
13732
 
13771
 
13733
	I915_WRITE(PCH_DPLL(pll->id), 0);
13772
	I915_WRITE(PCH_DPLL(pll->id), 0);
13734
	POSTING_READ(PCH_DPLL(pll->id));
13773
	POSTING_READ(PCH_DPLL(pll->id));
13735
	udelay(200);
13774
	udelay(200);
13736
}
13775
}
13737
 
13776
 
13738
static char *ibx_pch_dpll_names[] = {
13777
static char *ibx_pch_dpll_names[] = {
13739
	"PCH DPLL A",
13778
	"PCH DPLL A",
13740
	"PCH DPLL B",
13779
	"PCH DPLL B",
13741
};
13780
};
13742
 
13781
 
13743
static void ibx_pch_dpll_init(struct drm_device *dev)
13782
static void ibx_pch_dpll_init(struct drm_device *dev)
13744
{
13783
{
13745
	struct drm_i915_private *dev_priv = dev->dev_private;
13784
	struct drm_i915_private *dev_priv = dev->dev_private;
13746
	int i;
13785
	int i;
13747
 
13786
 
13748
	dev_priv->num_shared_dpll = 2;
13787
	dev_priv->num_shared_dpll = 2;
13749
 
13788
 
13750
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13789
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13751
		dev_priv->shared_dplls[i].id = i;
13790
		dev_priv->shared_dplls[i].id = i;
13752
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13791
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13753
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13792
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13754
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13793
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13755
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13794
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13756
		dev_priv->shared_dplls[i].get_hw_state =
13795
		dev_priv->shared_dplls[i].get_hw_state =
13757
			ibx_pch_dpll_get_hw_state;
13796
			ibx_pch_dpll_get_hw_state;
13758
	}
13797
	}
13759
}
13798
}
13760
 
13799
 
13761
static void intel_shared_dpll_init(struct drm_device *dev)
13800
static void intel_shared_dpll_init(struct drm_device *dev)
13762
{
13801
{
13763
	struct drm_i915_private *dev_priv = dev->dev_private;
13802
	struct drm_i915_private *dev_priv = dev->dev_private;
13764
 
13803
 
13765
	if (HAS_DDI(dev))
13804
	if (HAS_DDI(dev))
13766
		intel_ddi_pll_init(dev);
13805
		intel_ddi_pll_init(dev);
13767
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13806
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13768
		ibx_pch_dpll_init(dev);
13807
		ibx_pch_dpll_init(dev);
13769
	else
13808
	else
13770
		dev_priv->num_shared_dpll = 0;
13809
		dev_priv->num_shared_dpll = 0;
13771
 
13810
 
13772
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13811
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13773
}
13812
}
13774
 
13813
 
13775
/**
13814
/**
13776
 * intel_prepare_plane_fb - Prepare fb for usage on plane
13815
 * intel_prepare_plane_fb - Prepare fb for usage on plane
13777
 * @plane: drm plane to prepare for
13816
 * @plane: drm plane to prepare for
13778
 * @fb: framebuffer to prepare for presentation
13817
 * @fb: framebuffer to prepare for presentation
13779
 *
13818
 *
13780
 * Prepares a framebuffer for usage on a display plane.  Generally this
13819
 * Prepares a framebuffer for usage on a display plane.  Generally this
13781
 * involves pinning the underlying object and updating the frontbuffer tracking
13820
 * involves pinning the underlying object and updating the frontbuffer tracking
13782
 * bits.  Some older platforms need special physical address handling for
13821
 * bits.  Some older platforms need special physical address handling for
13783
 * cursor planes.
13822
 * cursor planes.
13784
 *
13823
 *
13785
 * Must be called with struct_mutex held.
13824
 * Must be called with struct_mutex held.
13786
 *
13825
 *
13787
 * Returns 0 on success, negative error code on failure.
13826
 * Returns 0 on success, negative error code on failure.
13788
 */
13827
 */
13789
int
13828
int
13790
intel_prepare_plane_fb(struct drm_plane *plane,
13829
intel_prepare_plane_fb(struct drm_plane *plane,
13791
		       const struct drm_plane_state *new_state)
13830
		       const struct drm_plane_state *new_state)
13792
{
13831
{
13793
	struct drm_device *dev = plane->dev;
13832
	struct drm_device *dev = plane->dev;
13794
	struct drm_framebuffer *fb = new_state->fb;
13833
	struct drm_framebuffer *fb = new_state->fb;
13795
	struct intel_plane *intel_plane = to_intel_plane(plane);
13834
	struct intel_plane *intel_plane = to_intel_plane(plane);
13796
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13835
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13797
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13836
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13798
	int ret = 0;
13837
	int ret = 0;
13799
 
13838
 
13800
	if (!obj && !old_obj)
13839
	if (!obj && !old_obj)
13801
		return 0;
13840
		return 0;
13802
 
13841
 
13803
	if (old_obj) {
13842
	if (old_obj) {
13804
		struct drm_crtc_state *crtc_state =
13843
		struct drm_crtc_state *crtc_state =
13805
			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13844
			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13806
 
13845
 
13807
		/* Big Hammer, we also need to ensure that any pending
13846
		/* Big Hammer, we also need to ensure that any pending
13808
		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13847
		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13809
		 * current scanout is retired before unpinning the old
13848
		 * current scanout is retired before unpinning the old
13810
		 * framebuffer. Note that we rely on userspace rendering
13849
		 * framebuffer. Note that we rely on userspace rendering
13811
		 * into the buffer attached to the pipe they are waiting
13850
		 * into the buffer attached to the pipe they are waiting
13812
		 * on. If not, userspace generates a GPU hang with IPEHR
13851
		 * on. If not, userspace generates a GPU hang with IPEHR
13813
		 * point to the MI_WAIT_FOR_EVENT.
13852
		 * point to the MI_WAIT_FOR_EVENT.
13814
		 *
13853
		 *
13815
		 * This should only fail upon a hung GPU, in which case we
13854
		 * This should only fail upon a hung GPU, in which case we
13816
		 * can safely continue.
13855
		 * can safely continue.
13817
		 */
13856
		 */
13818
		if (needs_modeset(crtc_state))
13857
		if (needs_modeset(crtc_state))
13819
			ret = i915_gem_object_wait_rendering(old_obj, true);
13858
			ret = i915_gem_object_wait_rendering(old_obj, true);
13820
 
13859
 
13821
		/* Swallow -EIO errors to allow updates during hw lockup. */
13860
		/* Swallow -EIO errors to allow updates during hw lockup. */
13822
		if (ret && ret != -EIO)
13861
		if (ret && ret != -EIO)
13823
			return ret;
13862
			return ret;
13824
	}
13863
	}
13825
 
13864
 
13826
	/* For framebuffer backed by dmabuf, wait for fence */
13865
	/* For framebuffer backed by dmabuf, wait for fence */
13827
 
13866
 
13828
	if (!obj) {
13867
	if (!obj) {
13829
		ret = 0;
13868
		ret = 0;
13830
	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13869
	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13831
	    INTEL_INFO(dev)->cursor_needs_physical) {
13870
	    INTEL_INFO(dev)->cursor_needs_physical) {
13832
		int align = IS_I830(dev) ? 16 * 1024 : 256;
13871
		int align = IS_I830(dev) ? 16 * 1024 : 256;
13833
		ret = i915_gem_object_attach_phys(obj, align);
13872
		ret = i915_gem_object_attach_phys(obj, align);
13834
		if (ret)
13873
		if (ret)
13835
			DRM_DEBUG_KMS("failed to attach phys object\n");
13874
			DRM_DEBUG_KMS("failed to attach phys object\n");
13836
	} else {
13875
	} else {
13837
		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
13876
		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
13838
	}
13877
	}
13839
 
13878
 
13840
	if (ret == 0) {
13879
	if (ret == 0) {
13841
		if (obj) {
13880
		if (obj) {
13842
			struct intel_plane_state *plane_state =
13881
			struct intel_plane_state *plane_state =
13843
				to_intel_plane_state(new_state);
13882
				to_intel_plane_state(new_state);
13844
 
13883
 
13845
			i915_gem_request_assign(&plane_state->wait_req,
13884
			i915_gem_request_assign(&plane_state->wait_req,
13846
						obj->last_write_req);
13885
						obj->last_write_req);
13847
	}
13886
		}
13848
 
13887
 
13849
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13888
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13850
	}
13889
	}
13851
 
13890
 
13852
	return ret;
13891
	return ret;
13853
}
13892
}
13854
 
13893
 
13855
/**
13894
/**
13856
 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13895
 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13857
 * @plane: drm plane to clean up for
13896
 * @plane: drm plane to clean up for
13858
 * @fb: old framebuffer that was on plane
13897
 * @fb: old framebuffer that was on plane
13859
 *
13898
 *
13860
 * Cleans up a framebuffer that has just been removed from a plane.
13899
 * Cleans up a framebuffer that has just been removed from a plane.
13861
 *
13900
 *
13862
 * Must be called with struct_mutex held.
13901
 * Must be called with struct_mutex held.
13863
 */
13902
 */
13864
void
13903
void
13865
intel_cleanup_plane_fb(struct drm_plane *plane,
13904
intel_cleanup_plane_fb(struct drm_plane *plane,
13866
		       const struct drm_plane_state *old_state)
13905
		       const struct drm_plane_state *old_state)
13867
{
13906
{
13868
	struct drm_device *dev = plane->dev;
13907
	struct drm_device *dev = plane->dev;
13869
	struct intel_plane *intel_plane = to_intel_plane(plane);
13908
	struct intel_plane *intel_plane = to_intel_plane(plane);
13870
	struct intel_plane_state *old_intel_state;
13909
	struct intel_plane_state *old_intel_state;
13871
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13910
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13872
	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13911
	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13873
 
13912
 
13874
	old_intel_state = to_intel_plane_state(old_state);
13913
	old_intel_state = to_intel_plane_state(old_state);
13875
 
13914
 
13876
	if (!obj && !old_obj)
13915
	if (!obj && !old_obj)
13877
		return;
13916
		return;
13878
 
13917
 
13879
	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13918
	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13880
	    !INTEL_INFO(dev)->cursor_needs_physical))
13919
	    !INTEL_INFO(dev)->cursor_needs_physical))
13881
		intel_unpin_fb_obj(old_state->fb, old_state);
13920
		intel_unpin_fb_obj(old_state->fb, old_state);
13882
 
13921
 
13883
	/* prepare_fb aborted? */
13922
	/* prepare_fb aborted? */
13884
	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13923
	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13885
	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13924
	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13886
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13925
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13887
 
13926
 
13888
	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13927
	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13889
 
13928
 
13890
}
13929
}
13891
 
13930
 
13892
int
13931
int
13893
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13932
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13894
{
13933
{
13895
	int max_scale;
13934
	int max_scale;
13896
	struct drm_device *dev;
13935
	struct drm_device *dev;
13897
	struct drm_i915_private *dev_priv;
13936
	struct drm_i915_private *dev_priv;
13898
	int crtc_clock, cdclk;
13937
	int crtc_clock, cdclk;
13899
 
13938
 
13900
	if (!intel_crtc || !crtc_state)
13939
	if (!intel_crtc || !crtc_state->base.enable)
13901
		return DRM_PLANE_HELPER_NO_SCALING;
13940
		return DRM_PLANE_HELPER_NO_SCALING;
13902
 
13941
 
13903
	dev = intel_crtc->base.dev;
13942
	dev = intel_crtc->base.dev;
13904
	dev_priv = dev->dev_private;
13943
	dev_priv = dev->dev_private;
13905
	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13944
	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13906
	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13945
	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13907
 
13946
 
13908
	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13947
	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13909
		return DRM_PLANE_HELPER_NO_SCALING;
13948
		return DRM_PLANE_HELPER_NO_SCALING;
13910
 
13949
 
13911
	/*
13950
	/*
13912
	 * skl max scale is lower of:
13951
	 * skl max scale is lower of:
13913
	 *    close to 3 but not 3, -1 is for that purpose
13952
	 *    close to 3 but not 3, -1 is for that purpose
13914
	 *            or
13953
	 *            or
13915
	 *    cdclk/crtc_clock
13954
	 *    cdclk/crtc_clock
13916
	 */
13955
	 */
13917
	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13956
	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13918
 
13957
 
13919
	return max_scale;
13958
	return max_scale;
13920
}
13959
}
13921
 
13960
 
13922
static int
13961
static int
13923
intel_check_primary_plane(struct drm_plane *plane,
13962
intel_check_primary_plane(struct drm_plane *plane,
13924
			  struct intel_crtc_state *crtc_state,
13963
			  struct intel_crtc_state *crtc_state,
13925
			  struct intel_plane_state *state)
13964
			  struct intel_plane_state *state)
13926
{
13965
{
13927
	struct drm_crtc *crtc = state->base.crtc;
13966
	struct drm_crtc *crtc = state->base.crtc;
13928
	struct drm_framebuffer *fb = state->base.fb;
13967
	struct drm_framebuffer *fb = state->base.fb;
13929
	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13968
	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13930
	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13969
	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13931
	bool can_position = false;
13970
	bool can_position = false;
13932
 
13971
 
13933
	if (INTEL_INFO(plane->dev)->gen >= 9) {
13972
	if (INTEL_INFO(plane->dev)->gen >= 9) {
13934
	/* use scaler when colorkey is not required */
13973
		/* use scaler when colorkey is not required */
13935
		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13974
		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13936
		min_scale = 1;
13975
			min_scale = 1;
13937
		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13976
			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13938
		}
13977
		}
13939
		can_position = true;
13978
		can_position = true;
13940
	}
13979
	}
13941
 
13980
 
13942
	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13981
	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13943
					     &state->dst, &state->clip,
13982
					     &state->dst, &state->clip,
13944
					     min_scale, max_scale,
13983
					     min_scale, max_scale,
13945
					     can_position, true,
13984
					     can_position, true,
13946
					     &state->visible);
13985
					     &state->visible);
13947
}
13986
}
13948
 
-
 
13949
static void
-
 
13950
intel_commit_primary_plane(struct drm_plane *plane,
-
 
13951
			   struct intel_plane_state *state)
-
 
13952
{
-
 
13953
	struct drm_crtc *crtc = state->base.crtc;
-
 
13954
	struct drm_framebuffer *fb = state->base.fb;
-
 
13955
	struct drm_device *dev = plane->dev;
-
 
13956
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
13957
 
-
 
13958
	crtc = crtc ? crtc : plane->crtc;
-
 
13959
 
-
 
13960
	dev_priv->display.update_primary_plane(crtc, fb,
-
 
13961
					       state->src.x1 >> 16,
-
 
13962
					       state->src.y1 >> 16);
-
 
13963
}
-
 
13964
 
-
 
13965
static void
-
 
13966
intel_disable_primary_plane(struct drm_plane *plane,
-
 
13967
			    struct drm_crtc *crtc)
-
 
13968
{
-
 
13969
	struct drm_device *dev = plane->dev;
-
 
13970
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
13971
 
-
 
13972
	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
-
 
13973
}
-
 
13974
 
13987
 
13975
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13988
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13976
				    struct drm_crtc_state *old_crtc_state)
13989
				    struct drm_crtc_state *old_crtc_state)
13977
{
13990
{
13978
	struct drm_device *dev = crtc->dev;
13991
	struct drm_device *dev = crtc->dev;
13979
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13992
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13980
	struct intel_crtc_state *old_intel_state =
13993
	struct intel_crtc_state *old_intel_state =
13981
		to_intel_crtc_state(old_crtc_state);
13994
		to_intel_crtc_state(old_crtc_state);
13982
	bool modeset = needs_modeset(crtc->state);
13995
	bool modeset = needs_modeset(crtc->state);
13983
 
13996
 
13984
	/* Perform vblank evasion around commit operation */
13997
	/* Perform vblank evasion around commit operation */
13985
		intel_pipe_update_start(intel_crtc);
13998
	intel_pipe_update_start(intel_crtc);
13986
 
13999
 
13987
	if (modeset)
14000
	if (modeset)
13988
		return;
14001
		return;
13989
 
14002
 
13990
	if (to_intel_crtc_state(crtc->state)->update_pipe)
14003
	if (to_intel_crtc_state(crtc->state)->update_pipe)
13991
		intel_update_pipe_config(intel_crtc, old_intel_state);
14004
		intel_update_pipe_config(intel_crtc, old_intel_state);
13992
	else if (INTEL_INFO(dev)->gen >= 9)
14005
	else if (INTEL_INFO(dev)->gen >= 9)
13993
		skl_detach_scalers(intel_crtc);
14006
		skl_detach_scalers(intel_crtc);
13994
}
14007
}
13995
 
14008
 
13996
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14009
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13997
				     struct drm_crtc_state *old_crtc_state)
14010
				     struct drm_crtc_state *old_crtc_state)
13998
{
14011
{
13999
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14012
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14000
 
14013
 
14001
		intel_pipe_update_end(intel_crtc);
14014
	intel_pipe_update_end(intel_crtc);
14002
}
14015
}
14003
 
14016
 
14004
/**
14017
/**
14005
 * intel_plane_destroy - destroy a plane
14018
 * intel_plane_destroy - destroy a plane
14006
 * @plane: plane to destroy
14019
 * @plane: plane to destroy
14007
 *
14020
 *
14008
 * Common destruction function for all types of planes (primary, cursor,
14021
 * Common destruction function for all types of planes (primary, cursor,
14009
 * sprite).
14022
 * sprite).
14010
 */
14023
 */
14011
void intel_plane_destroy(struct drm_plane *plane)
14024
void intel_plane_destroy(struct drm_plane *plane)
14012
{
14025
{
14013
	struct intel_plane *intel_plane = to_intel_plane(plane);
14026
	struct intel_plane *intel_plane = to_intel_plane(plane);
14014
	drm_plane_cleanup(plane);
14027
	drm_plane_cleanup(plane);
14015
	kfree(intel_plane);
14028
	kfree(intel_plane);
14016
}
14029
}
14017
 
14030
 
14018
const struct drm_plane_funcs intel_plane_funcs = {
14031
const struct drm_plane_funcs intel_plane_funcs = {
14019
	.update_plane = drm_atomic_helper_update_plane,
14032
	.update_plane = drm_atomic_helper_update_plane,
14020
	.disable_plane = drm_atomic_helper_disable_plane,
14033
	.disable_plane = drm_atomic_helper_disable_plane,
14021
	.destroy = intel_plane_destroy,
14034
	.destroy = intel_plane_destroy,
14022
	.set_property = drm_atomic_helper_plane_set_property,
14035
	.set_property = drm_atomic_helper_plane_set_property,
14023
	.atomic_get_property = intel_plane_atomic_get_property,
14036
	.atomic_get_property = intel_plane_atomic_get_property,
14024
	.atomic_set_property = intel_plane_atomic_set_property,
14037
	.atomic_set_property = intel_plane_atomic_set_property,
14025
	.atomic_duplicate_state = intel_plane_duplicate_state,
14038
	.atomic_duplicate_state = intel_plane_duplicate_state,
14026
	.atomic_destroy_state = intel_plane_destroy_state,
14039
	.atomic_destroy_state = intel_plane_destroy_state,
14027
 
14040
 
14028
};
14041
};
14029
 
14042
 
14030
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14043
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14031
						    int pipe)
14044
						    int pipe)
14032
{
14045
{
14033
	struct intel_plane *primary;
14046
	struct intel_plane *primary;
14034
	struct intel_plane_state *state;
14047
	struct intel_plane_state *state;
14035
	const uint32_t *intel_primary_formats;
14048
	const uint32_t *intel_primary_formats;
14036
	unsigned int num_formats;
14049
	unsigned int num_formats;
14037
 
14050
 
14038
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14051
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14039
	if (primary == NULL)
14052
	if (primary == NULL)
14040
		return NULL;
14053
		return NULL;
14041
 
14054
 
14042
	state = intel_create_plane_state(&primary->base);
14055
	state = intel_create_plane_state(&primary->base);
14043
	if (!state) {
14056
	if (!state) {
14044
		kfree(primary);
14057
		kfree(primary);
14045
		return NULL;
14058
		return NULL;
14046
	}
14059
	}
14047
	primary->base.state = &state->base;
14060
	primary->base.state = &state->base;
14048
 
14061
 
14049
	primary->can_scale = false;
14062
	primary->can_scale = false;
14050
	primary->max_downscale = 1;
14063
	primary->max_downscale = 1;
14051
	if (INTEL_INFO(dev)->gen >= 9) {
14064
	if (INTEL_INFO(dev)->gen >= 9) {
14052
		primary->can_scale = true;
14065
		primary->can_scale = true;
14053
		state->scaler_id = -1;
14066
		state->scaler_id = -1;
14054
	}
14067
	}
14055
	primary->pipe = pipe;
14068
	primary->pipe = pipe;
14056
	primary->plane = pipe;
14069
	primary->plane = pipe;
14057
	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14070
	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14058
	primary->check_plane = intel_check_primary_plane;
14071
	primary->check_plane = intel_check_primary_plane;
14059
	primary->commit_plane = intel_commit_primary_plane;
-
 
14060
	primary->disable_plane = intel_disable_primary_plane;
-
 
14061
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14072
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14062
		primary->plane = !pipe;
14073
		primary->plane = !pipe;
14063
 
14074
 
14064
	if (INTEL_INFO(dev)->gen >= 9) {
14075
	if (INTEL_INFO(dev)->gen >= 9) {
14065
		intel_primary_formats = skl_primary_formats;
14076
		intel_primary_formats = skl_primary_formats;
14066
		num_formats = ARRAY_SIZE(skl_primary_formats);
14077
		num_formats = ARRAY_SIZE(skl_primary_formats);
-
 
14078
 
-
 
14079
		primary->update_plane = skylake_update_primary_plane;
-
 
14080
		primary->disable_plane = skylake_disable_primary_plane;
-
 
14081
	} else if (HAS_PCH_SPLIT(dev)) {
-
 
14082
		intel_primary_formats = i965_primary_formats;
-
 
14083
		num_formats = ARRAY_SIZE(i965_primary_formats);
-
 
14084
 
-
 
14085
		primary->update_plane = ironlake_update_primary_plane;
-
 
14086
		primary->disable_plane = i9xx_disable_primary_plane;
14067
	} else if (INTEL_INFO(dev)->gen >= 4) {
14087
	} else if (INTEL_INFO(dev)->gen >= 4) {
14068
		intel_primary_formats = i965_primary_formats;
14088
		intel_primary_formats = i965_primary_formats;
14069
		num_formats = ARRAY_SIZE(i965_primary_formats);
14089
		num_formats = ARRAY_SIZE(i965_primary_formats);
-
 
14090
 
-
 
14091
		primary->update_plane = i9xx_update_primary_plane;
-
 
14092
		primary->disable_plane = i9xx_disable_primary_plane;
14070
	} else {
14093
	} else {
14071
		intel_primary_formats = i8xx_primary_formats;
14094
		intel_primary_formats = i8xx_primary_formats;
14072
		num_formats = ARRAY_SIZE(i8xx_primary_formats);
14095
		num_formats = ARRAY_SIZE(i8xx_primary_formats);
-
 
14096
 
-
 
14097
		primary->update_plane = i9xx_update_primary_plane;
-
 
14098
		primary->disable_plane = i9xx_disable_primary_plane;
14073
	}
14099
	}
14074
 
14100
 
14075
	drm_universal_plane_init(dev, &primary->base, 0,
14101
	drm_universal_plane_init(dev, &primary->base, 0,
14076
				 &intel_plane_funcs,
14102
				 &intel_plane_funcs,
14077
				 intel_primary_formats, num_formats,
14103
				 intel_primary_formats, num_formats,
14078
				 DRM_PLANE_TYPE_PRIMARY, NULL);
14104
				 DRM_PLANE_TYPE_PRIMARY, NULL);
14079
 
14105
 
14080
	if (INTEL_INFO(dev)->gen >= 4)
14106
	if (INTEL_INFO(dev)->gen >= 4)
14081
		intel_create_rotation_property(dev, primary);
14107
		intel_create_rotation_property(dev, primary);
14082
 
14108
 
14083
	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14109
	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14084
 
14110
 
14085
	return &primary->base;
14111
	return &primary->base;
14086
}
14112
}
14087
 
14113
 
14088
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14114
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14089
{
14115
{
14090
	if (!dev->mode_config.rotation_property) {
14116
	if (!dev->mode_config.rotation_property) {
14091
		unsigned long flags = BIT(DRM_ROTATE_0) |
14117
		unsigned long flags = BIT(DRM_ROTATE_0) |
14092
			BIT(DRM_ROTATE_180);
14118
			BIT(DRM_ROTATE_180);
14093
 
14119
 
14094
		if (INTEL_INFO(dev)->gen >= 9)
14120
		if (INTEL_INFO(dev)->gen >= 9)
14095
			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14121
			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14096
 
14122
 
14097
		dev->mode_config.rotation_property =
14123
		dev->mode_config.rotation_property =
14098
			drm_mode_create_rotation_property(dev, flags);
14124
			drm_mode_create_rotation_property(dev, flags);
14099
	}
14125
	}
14100
	if (dev->mode_config.rotation_property)
14126
	if (dev->mode_config.rotation_property)
14101
		drm_object_attach_property(&plane->base.base,
14127
		drm_object_attach_property(&plane->base.base,
14102
				dev->mode_config.rotation_property,
14128
				dev->mode_config.rotation_property,
14103
				plane->base.state->rotation);
14129
				plane->base.state->rotation);
14104
}
14130
}
14105
 
14131
 
14106
static int
14132
static int
14107
intel_check_cursor_plane(struct drm_plane *plane,
14133
intel_check_cursor_plane(struct drm_plane *plane,
14108
			 struct intel_crtc_state *crtc_state,
14134
			 struct intel_crtc_state *crtc_state,
14109
			 struct intel_plane_state *state)
14135
			 struct intel_plane_state *state)
14110
{
14136
{
14111
	struct drm_crtc *crtc = crtc_state->base.crtc;
14137
	struct drm_crtc *crtc = crtc_state->base.crtc;
14112
	struct drm_framebuffer *fb = state->base.fb;
14138
	struct drm_framebuffer *fb = state->base.fb;
14113
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14139
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14114
	enum pipe pipe = to_intel_plane(plane)->pipe;
14140
	enum pipe pipe = to_intel_plane(plane)->pipe;
14115
	unsigned stride;
14141
	unsigned stride;
14116
	int ret;
14142
	int ret;
14117
 
14143
 
14118
	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14144
	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14119
					    &state->dst, &state->clip,
14145
					    &state->dst, &state->clip,
14120
					    DRM_PLANE_HELPER_NO_SCALING,
14146
					    DRM_PLANE_HELPER_NO_SCALING,
14121
					    DRM_PLANE_HELPER_NO_SCALING,
14147
					    DRM_PLANE_HELPER_NO_SCALING,
14122
					    true, true, &state->visible);
14148
					    true, true, &state->visible);
14123
	if (ret)
14149
	if (ret)
14124
		return ret;
14150
		return ret;
14125
 
14151
 
14126
	/* if we want to turn off the cursor ignore width and height */
14152
	/* if we want to turn off the cursor ignore width and height */
14127
	if (!obj)
14153
	if (!obj)
14128
		return 0;
14154
		return 0;
14129
 
14155
 
14130
	/* Check for which cursor types we support */
14156
	/* Check for which cursor types we support */
14131
	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14157
	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14132
		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14158
		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14133
			  state->base.crtc_w, state->base.crtc_h);
14159
			  state->base.crtc_w, state->base.crtc_h);
14134
		return -EINVAL;
14160
		return -EINVAL;
14135
	}
14161
	}
14136
 
14162
 
14137
	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14163
	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14138
	if (obj->base.size < stride * state->base.crtc_h) {
14164
	if (obj->base.size < stride * state->base.crtc_h) {
14139
		DRM_DEBUG_KMS("buffer is too small\n");
14165
		DRM_DEBUG_KMS("buffer is too small\n");
14140
		return -ENOMEM;
14166
		return -ENOMEM;
14141
	}
14167
	}
14142
 
14168
 
14143
	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14169
	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14144
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
14170
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
14145
		return -EINVAL;
14171
		return -EINVAL;
14146
	}
14172
	}
14147
 
14173
 
14148
	/*
14174
	/*
14149
	 * There's something wrong with the cursor on CHV pipe C.
14175
	 * There's something wrong with the cursor on CHV pipe C.
14150
	 * If it straddles the left edge of the screen then
14176
	 * If it straddles the left edge of the screen then
14151
	 * moving it away from the edge or disabling it often
14177
	 * moving it away from the edge or disabling it often
14152
	 * results in a pipe underrun, and often that can lead to
14178
	 * results in a pipe underrun, and often that can lead to
14153
	 * dead pipe (constant underrun reported, and it scans
14179
	 * dead pipe (constant underrun reported, and it scans
14154
	 * out just a solid color). To recover from that, the
14180
	 * out just a solid color). To recover from that, the
14155
	 * display power well must be turned off and on again.
14181
	 * display power well must be turned off and on again.
14156
	 * Refuse the put the cursor into that compromised position.
14182
	 * Refuse the put the cursor into that compromised position.
14157
	 */
14183
	 */
14158
	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14184
	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14159
	    state->visible && state->base.crtc_x < 0) {
14185
	    state->visible && state->base.crtc_x < 0) {
14160
		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14186
		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14161
		return -EINVAL;
14187
		return -EINVAL;
14162
	}
14188
	}
14163
 
14189
 
14164
	return 0;
14190
	return 0;
14165
}
14191
}
14166
 
14192
 
14167
static void
14193
static void
14168
intel_disable_cursor_plane(struct drm_plane *plane,
14194
intel_disable_cursor_plane(struct drm_plane *plane,
14169
			   struct drm_crtc *crtc)
14195
			   struct drm_crtc *crtc)
14170
{
14196
{
-
 
14197
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
14198
 
-
 
14199
	intel_crtc->cursor_addr = 0;
14171
	intel_crtc_update_cursor(crtc, false);
14200
	intel_crtc_update_cursor(crtc, NULL);
14172
}
14201
}
14173
 
14202
 
14174
static void
14203
static void
14175
intel_commit_cursor_plane(struct drm_plane *plane,
14204
intel_update_cursor_plane(struct drm_plane *plane,
-
 
14205
			  const struct intel_crtc_state *crtc_state,
14176
			  struct intel_plane_state *state)
14206
			  const struct intel_plane_state *state)
14177
{
14207
{
14178
	struct drm_crtc *crtc = state->base.crtc;
14208
	struct drm_crtc *crtc = crtc_state->base.crtc;
-
 
14209
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14179
	struct drm_device *dev = plane->dev;
14210
	struct drm_device *dev = plane->dev;
14180
	struct intel_crtc *intel_crtc;
-
 
14181
	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14211
	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14182
	uint32_t addr;
14212
	uint32_t addr;
14183
 
-
 
14184
	crtc = crtc ? crtc : plane->crtc;
-
 
14185
	intel_crtc = to_intel_crtc(crtc);
-
 
14186
 
14213
 
14187
	if (!obj)
14214
	if (!obj)
14188
		addr = 0;
14215
		addr = 0;
14189
	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14216
	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14190
		addr = i915_gem_obj_ggtt_offset(obj);
14217
		addr = i915_gem_obj_ggtt_offset(obj);
14191
	else
14218
	else
14192
		addr = obj->phys_handle->busaddr;
14219
		addr = obj->phys_handle->busaddr;
14193
 
14220
 
14194
	intel_crtc->cursor_addr = addr;
14221
	intel_crtc->cursor_addr = addr;
14195
 
-
 
14196
	if (crtc->state->active)
-
 
14197
		intel_crtc_update_cursor(crtc, state->visible);
14222
	intel_crtc_update_cursor(crtc, state);
14198
}
14223
}
14199
 
14224
 
14200
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14225
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14201
						   int pipe)
14226
						   int pipe)
14202
{
14227
{
14203
	struct intel_plane *cursor;
14228
	struct intel_plane *cursor;
14204
	struct intel_plane_state *state;
14229
	struct intel_plane_state *state;
14205
 
14230
 
14206
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14231
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14207
	if (cursor == NULL)
14232
	if (cursor == NULL)
14208
		return NULL;
14233
		return NULL;
14209
 
14234
 
14210
	state = intel_create_plane_state(&cursor->base);
14235
	state = intel_create_plane_state(&cursor->base);
14211
	if (!state) {
14236
	if (!state) {
14212
		kfree(cursor);
14237
		kfree(cursor);
14213
		return NULL;
14238
		return NULL;
14214
	}
14239
	}
14215
	cursor->base.state = &state->base;
14240
	cursor->base.state = &state->base;
14216
 
14241
 
14217
	cursor->can_scale = false;
14242
	cursor->can_scale = false;
14218
	cursor->max_downscale = 1;
14243
	cursor->max_downscale = 1;
14219
	cursor->pipe = pipe;
14244
	cursor->pipe = pipe;
14220
	cursor->plane = pipe;
14245
	cursor->plane = pipe;
14221
	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14246
	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14222
	cursor->check_plane = intel_check_cursor_plane;
14247
	cursor->check_plane = intel_check_cursor_plane;
14223
	cursor->commit_plane = intel_commit_cursor_plane;
14248
	cursor->update_plane = intel_update_cursor_plane;
14224
	cursor->disable_plane = intel_disable_cursor_plane;
14249
	cursor->disable_plane = intel_disable_cursor_plane;
14225
 
14250
 
14226
	drm_universal_plane_init(dev, &cursor->base, 0,
14251
	drm_universal_plane_init(dev, &cursor->base, 0,
14227
				 &intel_plane_funcs,
14252
				 &intel_plane_funcs,
14228
				 intel_cursor_formats,
14253
				 intel_cursor_formats,
14229
				 ARRAY_SIZE(intel_cursor_formats),
14254
				 ARRAY_SIZE(intel_cursor_formats),
14230
				 DRM_PLANE_TYPE_CURSOR, NULL);
14255
				 DRM_PLANE_TYPE_CURSOR, NULL);
14231
 
14256
 
14232
	if (INTEL_INFO(dev)->gen >= 4) {
14257
	if (INTEL_INFO(dev)->gen >= 4) {
14233
		if (!dev->mode_config.rotation_property)
14258
		if (!dev->mode_config.rotation_property)
14234
			dev->mode_config.rotation_property =
14259
			dev->mode_config.rotation_property =
14235
				drm_mode_create_rotation_property(dev,
14260
				drm_mode_create_rotation_property(dev,
14236
							BIT(DRM_ROTATE_0) |
14261
							BIT(DRM_ROTATE_0) |
14237
							BIT(DRM_ROTATE_180));
14262
							BIT(DRM_ROTATE_180));
14238
		if (dev->mode_config.rotation_property)
14263
		if (dev->mode_config.rotation_property)
14239
			drm_object_attach_property(&cursor->base.base,
14264
			drm_object_attach_property(&cursor->base.base,
14240
				dev->mode_config.rotation_property,
14265
				dev->mode_config.rotation_property,
14241
				state->base.rotation);
14266
				state->base.rotation);
14242
	}
14267
	}
14243
 
14268
 
14244
	if (INTEL_INFO(dev)->gen >=9)
14269
	if (INTEL_INFO(dev)->gen >=9)
14245
		state->scaler_id = -1;
14270
		state->scaler_id = -1;
14246
 
14271
 
14247
	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14272
	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14248
 
14273
 
14249
	return &cursor->base;
14274
	return &cursor->base;
14250
}
14275
}
14251
 
14276
 
14252
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14277
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14253
	struct intel_crtc_state *crtc_state)
14278
	struct intel_crtc_state *crtc_state)
14254
{
14279
{
14255
	int i;
14280
	int i;
14256
	struct intel_scaler *intel_scaler;
14281
	struct intel_scaler *intel_scaler;
14257
	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14282
	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14258
 
14283
 
14259
	for (i = 0; i < intel_crtc->num_scalers; i++) {
14284
	for (i = 0; i < intel_crtc->num_scalers; i++) {
14260
		intel_scaler = &scaler_state->scalers[i];
14285
		intel_scaler = &scaler_state->scalers[i];
14261
		intel_scaler->in_use = 0;
14286
		intel_scaler->in_use = 0;
14262
		intel_scaler->mode = PS_SCALER_MODE_DYN;
14287
		intel_scaler->mode = PS_SCALER_MODE_DYN;
14263
	}
14288
	}
14264
 
14289
 
14265
	scaler_state->scaler_id = -1;
14290
	scaler_state->scaler_id = -1;
14266
}
14291
}
14267
 
14292
 
14268
static void intel_crtc_init(struct drm_device *dev, int pipe)
14293
static void intel_crtc_init(struct drm_device *dev, int pipe)
14269
{
14294
{
14270
	struct drm_i915_private *dev_priv = dev->dev_private;
14295
	struct drm_i915_private *dev_priv = dev->dev_private;
14271
	struct intel_crtc *intel_crtc;
14296
	struct intel_crtc *intel_crtc;
14272
	struct intel_crtc_state *crtc_state = NULL;
14297
	struct intel_crtc_state *crtc_state = NULL;
14273
	struct drm_plane *primary = NULL;
14298
	struct drm_plane *primary = NULL;
14274
	struct drm_plane *cursor = NULL;
14299
	struct drm_plane *cursor = NULL;
14275
	int i, ret;
14300
	int i, ret;
14276
 
14301
 
14277
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14302
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14278
	if (intel_crtc == NULL)
14303
	if (intel_crtc == NULL)
14279
		return;
14304
		return;
14280
 
14305
 
14281
	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14306
	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14282
	if (!crtc_state)
14307
	if (!crtc_state)
14283
		goto fail;
14308
		goto fail;
14284
	intel_crtc->config = crtc_state;
14309
	intel_crtc->config = crtc_state;
14285
	intel_crtc->base.state = &crtc_state->base;
14310
	intel_crtc->base.state = &crtc_state->base;
14286
	crtc_state->base.crtc = &intel_crtc->base;
14311
	crtc_state->base.crtc = &intel_crtc->base;
14287
 
14312
 
14288
	/* initialize shared scalers */
14313
	/* initialize shared scalers */
14289
	if (INTEL_INFO(dev)->gen >= 9) {
14314
	if (INTEL_INFO(dev)->gen >= 9) {
14290
		if (pipe == PIPE_C)
14315
		if (pipe == PIPE_C)
14291
			intel_crtc->num_scalers = 1;
14316
			intel_crtc->num_scalers = 1;
14292
		else
14317
		else
14293
			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14318
			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14294
 
14319
 
14295
		skl_init_scalers(dev, intel_crtc, crtc_state);
14320
		skl_init_scalers(dev, intel_crtc, crtc_state);
14296
	}
14321
	}
14297
 
14322
 
14298
	primary = intel_primary_plane_create(dev, pipe);
14323
	primary = intel_primary_plane_create(dev, pipe);
14299
	if (!primary)
14324
	if (!primary)
14300
		goto fail;
14325
		goto fail;
14301
 
14326
 
14302
	cursor = intel_cursor_plane_create(dev, pipe);
14327
	cursor = intel_cursor_plane_create(dev, pipe);
14303
	if (!cursor)
14328
	if (!cursor)
14304
		goto fail;
14329
		goto fail;
14305
 
14330
 
14306
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14331
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14307
					cursor, &intel_crtc_funcs, NULL);
14332
					cursor, &intel_crtc_funcs, NULL);
14308
	if (ret)
14333
	if (ret)
14309
		goto fail;
14334
		goto fail;
14310
 
14335
 
14311
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14336
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14312
	for (i = 0; i < 256; i++) {
14337
	for (i = 0; i < 256; i++) {
14313
		intel_crtc->lut_r[i] = i;
14338
		intel_crtc->lut_r[i] = i;
14314
		intel_crtc->lut_g[i] = i;
14339
		intel_crtc->lut_g[i] = i;
14315
		intel_crtc->lut_b[i] = i;
14340
		intel_crtc->lut_b[i] = i;
14316
	}
14341
	}
14317
 
14342
 
14318
	/*
14343
	/*
14319
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14344
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14320
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14345
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14321
	 */
14346
	 */
14322
	intel_crtc->pipe = pipe;
14347
	intel_crtc->pipe = pipe;
14323
	intel_crtc->plane = pipe;
14348
	intel_crtc->plane = pipe;
14324
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14349
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14325
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14350
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14326
		intel_crtc->plane = !pipe;
14351
		intel_crtc->plane = !pipe;
14327
	}
14352
	}
14328
 
14353
 
14329
	intel_crtc->cursor_base = ~0;
14354
	intel_crtc->cursor_base = ~0;
14330
	intel_crtc->cursor_cntl = ~0;
14355
	intel_crtc->cursor_cntl = ~0;
14331
	intel_crtc->cursor_size = ~0;
14356
	intel_crtc->cursor_size = ~0;
14332
 
14357
 
14333
	intel_crtc->wm.cxsr_allowed = true;
14358
	intel_crtc->wm.cxsr_allowed = true;
14334
 
14359
 
14335
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14360
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14336
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14361
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14337
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14362
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14338
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14363
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14339
 
14364
 
14340
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14365
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14341
 
14366
 
14342
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14367
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14343
	return;
14368
	return;
14344
 
14369
 
14345
fail:
14370
fail:
14346
	if (primary)
14371
	if (primary)
14347
		drm_plane_cleanup(primary);
14372
		drm_plane_cleanup(primary);
14348
	if (cursor)
14373
	if (cursor)
14349
		drm_plane_cleanup(cursor);
14374
		drm_plane_cleanup(cursor);
14350
	kfree(crtc_state);
14375
	kfree(crtc_state);
14351
	kfree(intel_crtc);
14376
	kfree(intel_crtc);
14352
}
14377
}
14353
 
14378
 
14354
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14379
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14355
{
14380
{
14356
	struct drm_encoder *encoder = connector->base.encoder;
14381
	struct drm_encoder *encoder = connector->base.encoder;
14357
	struct drm_device *dev = connector->base.dev;
14382
	struct drm_device *dev = connector->base.dev;
14358
 
14383
 
14359
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14384
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14360
 
14385
 
14361
	if (!encoder || WARN_ON(!encoder->crtc))
14386
	if (!encoder || WARN_ON(!encoder->crtc))
14362
		return INVALID_PIPE;
14387
		return INVALID_PIPE;
14363
 
14388
 
14364
	return to_intel_crtc(encoder->crtc)->pipe;
14389
	return to_intel_crtc(encoder->crtc)->pipe;
14365
}
14390
}
14366
 
14391
 
14367
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14392
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14368
				struct drm_file *file)
14393
				struct drm_file *file)
14369
{
14394
{
14370
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14395
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14371
	struct drm_crtc *drmmode_crtc;
14396
	struct drm_crtc *drmmode_crtc;
14372
	struct intel_crtc *crtc;
14397
	struct intel_crtc *crtc;
14373
 
14398
 
14374
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14399
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14375
 
14400
 
14376
	if (!drmmode_crtc) {
14401
	if (!drmmode_crtc) {
14377
		DRM_ERROR("no such CRTC id\n");
14402
		DRM_ERROR("no such CRTC id\n");
14378
		return -ENOENT;
14403
		return -ENOENT;
14379
	}
14404
	}
14380
 
14405
 
14381
	crtc = to_intel_crtc(drmmode_crtc);
14406
	crtc = to_intel_crtc(drmmode_crtc);
14382
	pipe_from_crtc_id->pipe = crtc->pipe;
14407
	pipe_from_crtc_id->pipe = crtc->pipe;
14383
 
14408
 
14384
	return 0;
14409
	return 0;
14385
}
14410
}
14386
 
14411
 
14387
static int intel_encoder_clones(struct intel_encoder *encoder)
14412
static int intel_encoder_clones(struct intel_encoder *encoder)
14388
{
14413
{
14389
	struct drm_device *dev = encoder->base.dev;
14414
	struct drm_device *dev = encoder->base.dev;
14390
	struct intel_encoder *source_encoder;
14415
	struct intel_encoder *source_encoder;
14391
	int index_mask = 0;
14416
	int index_mask = 0;
14392
	int entry = 0;
14417
	int entry = 0;
14393
 
14418
 
14394
	for_each_intel_encoder(dev, source_encoder) {
14419
	for_each_intel_encoder(dev, source_encoder) {
14395
		if (encoders_cloneable(encoder, source_encoder))
14420
		if (encoders_cloneable(encoder, source_encoder))
14396
			index_mask |= (1 << entry);
14421
			index_mask |= (1 << entry);
14397
 
14422
 
14398
		entry++;
14423
		entry++;
14399
	}
14424
	}
14400
 
14425
 
14401
	return index_mask;
14426
	return index_mask;
14402
}
14427
}
14403
 
14428
 
14404
static bool has_edp_a(struct drm_device *dev)
14429
static bool has_edp_a(struct drm_device *dev)
14405
{
14430
{
14406
	struct drm_i915_private *dev_priv = dev->dev_private;
14431
	struct drm_i915_private *dev_priv = dev->dev_private;
14407
 
14432
 
14408
	if (!IS_MOBILE(dev))
14433
	if (!IS_MOBILE(dev))
14409
		return false;
14434
		return false;
14410
 
14435
 
14411
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14436
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14412
		return false;
14437
		return false;
14413
 
14438
 
14414
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14439
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14415
		return false;
14440
		return false;
14416
 
14441
 
14417
	return true;
14442
	return true;
14418
}
14443
}
14419
 
14444
 
14420
static bool intel_crt_present(struct drm_device *dev)
14445
static bool intel_crt_present(struct drm_device *dev)
14421
{
14446
{
14422
	struct drm_i915_private *dev_priv = dev->dev_private;
14447
	struct drm_i915_private *dev_priv = dev->dev_private;
14423
 
14448
 
14424
	if (INTEL_INFO(dev)->gen >= 9)
14449
	if (INTEL_INFO(dev)->gen >= 9)
14425
		return false;
14450
		return false;
14426
 
14451
 
14427
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14452
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14428
		return false;
14453
		return false;
14429
 
14454
 
14430
	if (IS_CHERRYVIEW(dev))
14455
	if (IS_CHERRYVIEW(dev))
14431
		return false;
14456
		return false;
14432
 
14457
 
14433
	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14458
	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14434
		return false;
14459
		return false;
14435
 
14460
 
14436
	/* DDI E can't be used if DDI A requires 4 lanes */
14461
	/* DDI E can't be used if DDI A requires 4 lanes */
14437
	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14462
	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14438
		return false;
14463
		return false;
14439
 
14464
 
14440
	if (!dev_priv->vbt.int_crt_support)
14465
	if (!dev_priv->vbt.int_crt_support)
14441
		return false;
14466
		return false;
14442
 
14467
 
14443
	return true;
14468
	return true;
14444
}
14469
}
14445
 
14470
 
14446
static void intel_setup_outputs(struct drm_device *dev)
14471
static void intel_setup_outputs(struct drm_device *dev)
14447
{
14472
{
14448
	struct drm_i915_private *dev_priv = dev->dev_private;
14473
	struct drm_i915_private *dev_priv = dev->dev_private;
14449
	struct intel_encoder *encoder;
14474
	struct intel_encoder *encoder;
14450
	bool dpd_is_edp = false;
14475
	bool dpd_is_edp = false;
14451
 
14476
 
14452
	intel_lvds_init(dev);
14477
	intel_lvds_init(dev);
14453
 
14478
 
14454
	if (intel_crt_present(dev))
14479
	if (intel_crt_present(dev))
14455
		intel_crt_init(dev);
14480
		intel_crt_init(dev);
14456
 
14481
 
14457
	if (IS_BROXTON(dev)) {
14482
	if (IS_BROXTON(dev)) {
14458
		/*
14483
		/*
14459
		 * FIXME: Broxton doesn't support port detection via the
14484
		 * FIXME: Broxton doesn't support port detection via the
14460
		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14485
		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14461
		 * detect the ports.
14486
		 * detect the ports.
14462
		 */
14487
		 */
14463
		intel_ddi_init(dev, PORT_A);
14488
		intel_ddi_init(dev, PORT_A);
14464
		intel_ddi_init(dev, PORT_B);
14489
		intel_ddi_init(dev, PORT_B);
14465
		intel_ddi_init(dev, PORT_C);
14490
		intel_ddi_init(dev, PORT_C);
14466
	} else if (HAS_DDI(dev)) {
14491
	} else if (HAS_DDI(dev)) {
14467
		int found;
14492
		int found;
14468
 
14493
 
14469
		/*
14494
		/*
14470
		 * Haswell uses DDI functions to detect digital outputs.
14495
		 * Haswell uses DDI functions to detect digital outputs.
14471
		 * On SKL pre-D0 the strap isn't connected, so we assume
14496
		 * On SKL pre-D0 the strap isn't connected, so we assume
14472
		 * it's there.
14497
		 * it's there.
14473
		 */
14498
		 */
14474
		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14499
		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14475
		/* WaIgnoreDDIAStrap: skl */
14500
		/* WaIgnoreDDIAStrap: skl */
14476
		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14501
		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14477
			intel_ddi_init(dev, PORT_A);
14502
			intel_ddi_init(dev, PORT_A);
14478
 
14503
 
14479
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14504
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14480
		 * register */
14505
		 * register */
14481
		found = I915_READ(SFUSE_STRAP);
14506
		found = I915_READ(SFUSE_STRAP);
14482
 
14507
 
14483
		if (found & SFUSE_STRAP_DDIB_DETECTED)
14508
		if (found & SFUSE_STRAP_DDIB_DETECTED)
14484
			intel_ddi_init(dev, PORT_B);
14509
			intel_ddi_init(dev, PORT_B);
14485
		if (found & SFUSE_STRAP_DDIC_DETECTED)
14510
		if (found & SFUSE_STRAP_DDIC_DETECTED)
14486
			intel_ddi_init(dev, PORT_C);
14511
			intel_ddi_init(dev, PORT_C);
14487
		if (found & SFUSE_STRAP_DDID_DETECTED)
14512
		if (found & SFUSE_STRAP_DDID_DETECTED)
14488
			intel_ddi_init(dev, PORT_D);
14513
			intel_ddi_init(dev, PORT_D);
14489
		/*
14514
		/*
14490
		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14515
		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14491
		 */
14516
		 */
14492
		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14517
		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14493
		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14518
		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14494
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14519
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14495
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14520
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14496
			intel_ddi_init(dev, PORT_E);
14521
			intel_ddi_init(dev, PORT_E);
14497
 
14522
 
14498
	} else if (HAS_PCH_SPLIT(dev)) {
14523
	} else if (HAS_PCH_SPLIT(dev)) {
14499
		int found;
14524
		int found;
14500
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14525
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14501
 
14526
 
14502
		if (has_edp_a(dev))
14527
		if (has_edp_a(dev))
14503
			intel_dp_init(dev, DP_A, PORT_A);
14528
			intel_dp_init(dev, DP_A, PORT_A);
14504
 
14529
 
14505
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14530
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14506
			/* PCH SDVOB multiplex with HDMIB */
14531
			/* PCH SDVOB multiplex with HDMIB */
14507
			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14532
			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14508
			if (!found)
14533
			if (!found)
14509
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14534
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14510
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14535
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14511
				intel_dp_init(dev, PCH_DP_B, PORT_B);
14536
				intel_dp_init(dev, PCH_DP_B, PORT_B);
14512
		}
14537
		}
14513
 
14538
 
14514
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14539
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14515
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14540
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14516
 
14541
 
14517
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14542
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14518
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14543
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14519
 
14544
 
14520
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14545
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14521
			intel_dp_init(dev, PCH_DP_C, PORT_C);
14546
			intel_dp_init(dev, PCH_DP_C, PORT_C);
14522
 
14547
 
14523
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14548
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14524
			intel_dp_init(dev, PCH_DP_D, PORT_D);
14549
			intel_dp_init(dev, PCH_DP_D, PORT_D);
14525
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14550
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14526
		/*
14551
		/*
14527
		 * The DP_DETECTED bit is the latched state of the DDC
14552
		 * The DP_DETECTED bit is the latched state of the DDC
14528
		 * SDA pin at boot. However since eDP doesn't require DDC
14553
		 * SDA pin at boot. However since eDP doesn't require DDC
14529
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14554
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14530
		 * eDP ports may have been muxed to an alternate function.
14555
		 * eDP ports may have been muxed to an alternate function.
14531
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14556
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14532
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14557
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14533
		 * detect eDP ports.
14558
		 * detect eDP ports.
14534
		 */
14559
		 */
14535
		if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14560
		if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14536
		    !intel_dp_is_edp(dev, PORT_B))
14561
		    !intel_dp_is_edp(dev, PORT_B))
14537
			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14562
			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14538
		if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14563
		if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14539
		    intel_dp_is_edp(dev, PORT_B))
14564
		    intel_dp_is_edp(dev, PORT_B))
14540
			intel_dp_init(dev, VLV_DP_B, PORT_B);
14565
			intel_dp_init(dev, VLV_DP_B, PORT_B);
14541
 
14566
 
14542
		if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14567
		if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14543
		    !intel_dp_is_edp(dev, PORT_C))
14568
		    !intel_dp_is_edp(dev, PORT_C))
14544
			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14569
			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14545
		if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14570
		if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14546
		    intel_dp_is_edp(dev, PORT_C))
14571
		    intel_dp_is_edp(dev, PORT_C))
14547
			intel_dp_init(dev, VLV_DP_C, PORT_C);
14572
			intel_dp_init(dev, VLV_DP_C, PORT_C);
14548
 
14573
 
14549
		if (IS_CHERRYVIEW(dev)) {
14574
		if (IS_CHERRYVIEW(dev)) {
14550
			/* eDP not supported on port D, so don't check VBT */
14575
			/* eDP not supported on port D, so don't check VBT */
14551
			if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14576
			if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14552
				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14577
				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14553
			if (I915_READ(CHV_DP_D) & DP_DETECTED)
14578
			if (I915_READ(CHV_DP_D) & DP_DETECTED)
14554
				intel_dp_init(dev, CHV_DP_D, PORT_D);
14579
				intel_dp_init(dev, CHV_DP_D, PORT_D);
14555
		}
14580
		}
14556
 
14581
 
14557
		intel_dsi_init(dev);
14582
		intel_dsi_init(dev);
14558
	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14583
	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14559
		bool found = false;
14584
		bool found = false;
14560
 
14585
 
14561
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14586
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14562
			DRM_DEBUG_KMS("probing SDVOB\n");
14587
			DRM_DEBUG_KMS("probing SDVOB\n");
14563
			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14588
			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14564
			if (!found && IS_G4X(dev)) {
14589
			if (!found && IS_G4X(dev)) {
14565
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14590
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14566
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14591
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14567
			}
14592
			}
14568
 
14593
 
14569
			if (!found && IS_G4X(dev))
14594
			if (!found && IS_G4X(dev))
14570
				intel_dp_init(dev, DP_B, PORT_B);
14595
				intel_dp_init(dev, DP_B, PORT_B);
14571
		}
14596
		}
14572
 
14597
 
14573
		/* Before G4X SDVOC doesn't have its own detect register */
14598
		/* Before G4X SDVOC doesn't have its own detect register */
14574
 
14599
 
14575
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14600
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14576
			DRM_DEBUG_KMS("probing SDVOC\n");
14601
			DRM_DEBUG_KMS("probing SDVOC\n");
14577
			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14602
			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14578
		}
14603
		}
14579
 
14604
 
14580
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14605
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14581
 
14606
 
14582
			if (IS_G4X(dev)) {
14607
			if (IS_G4X(dev)) {
14583
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14608
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14584
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14609
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14585
			}
14610
			}
14586
			if (IS_G4X(dev))
14611
			if (IS_G4X(dev))
14587
				intel_dp_init(dev, DP_C, PORT_C);
14612
				intel_dp_init(dev, DP_C, PORT_C);
14588
		}
14613
		}
14589
 
14614
 
14590
		if (IS_G4X(dev) &&
14615
		if (IS_G4X(dev) &&
14591
		    (I915_READ(DP_D) & DP_DETECTED))
14616
		    (I915_READ(DP_D) & DP_DETECTED))
14592
			intel_dp_init(dev, DP_D, PORT_D);
14617
			intel_dp_init(dev, DP_D, PORT_D);
14593
	} else if (IS_GEN2(dev))
14618
	} else if (IS_GEN2(dev))
14594
		intel_dvo_init(dev);
14619
		intel_dvo_init(dev);
14595
 
14620
 
14596
//   if (SUPPORTS_TV(dev))
14621
//   if (SUPPORTS_TV(dev))
14597
//       intel_tv_init(dev);
14622
//       intel_tv_init(dev);
14598
 
14623
 
14599
	intel_psr_init(dev);
14624
	intel_psr_init(dev);
14600
 
14625
 
14601
	for_each_intel_encoder(dev, encoder) {
14626
	for_each_intel_encoder(dev, encoder) {
14602
		encoder->base.possible_crtcs = encoder->crtc_mask;
14627
		encoder->base.possible_crtcs = encoder->crtc_mask;
14603
		encoder->base.possible_clones =
14628
		encoder->base.possible_clones =
14604
			intel_encoder_clones(encoder);
14629
			intel_encoder_clones(encoder);
14605
	}
14630
	}
14606
 
14631
 
14607
	intel_init_pch_refclk(dev);
14632
	intel_init_pch_refclk(dev);
14608
 
14633
 
14609
	drm_helper_move_panel_connectors_to_head(dev);
14634
	drm_helper_move_panel_connectors_to_head(dev);
14610
}
14635
}
14611
 
14636
 
14612
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14637
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14613
{
14638
{
14614
	struct drm_device *dev = fb->dev;
14639
	struct drm_device *dev = fb->dev;
14615
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14640
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14616
 
14641
 
14617
	drm_framebuffer_cleanup(fb);
14642
	drm_framebuffer_cleanup(fb);
14618
	mutex_lock(&dev->struct_mutex);
14643
	mutex_lock(&dev->struct_mutex);
14619
	WARN_ON(!intel_fb->obj->framebuffer_references--);
14644
	WARN_ON(!intel_fb->obj->framebuffer_references--);
14620
	drm_gem_object_unreference(&intel_fb->obj->base);
14645
	drm_gem_object_unreference(&intel_fb->obj->base);
14621
	mutex_unlock(&dev->struct_mutex);
14646
	mutex_unlock(&dev->struct_mutex);
14622
	kfree(intel_fb);
14647
	kfree(intel_fb);
14623
}
14648
}
14624
 
14649
 
14625
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14650
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14626
						struct drm_file *file,
14651
						struct drm_file *file,
14627
						unsigned int *handle)
14652
						unsigned int *handle)
14628
{
14653
{
14629
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14654
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14630
	struct drm_i915_gem_object *obj = intel_fb->obj;
14655
	struct drm_i915_gem_object *obj = intel_fb->obj;
14631
 
14656
 
14632
	if (obj->userptr.mm) {
14657
	if (obj->userptr.mm) {
14633
		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14658
		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14634
		return -EINVAL;
14659
		return -EINVAL;
14635
	}
14660
	}
14636
 
14661
 
14637
	return drm_gem_handle_create(file, &obj->base, handle);
14662
	return drm_gem_handle_create(file, &obj->base, handle);
14638
}
14663
}
14639
 
14664
 
14640
static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14665
static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14641
					struct drm_file *file,
14666
					struct drm_file *file,
14642
					unsigned flags, unsigned color,
14667
					unsigned flags, unsigned color,
14643
					struct drm_clip_rect *clips,
14668
					struct drm_clip_rect *clips,
14644
					unsigned num_clips)
14669
					unsigned num_clips)
14645
{
14670
{
14646
	struct drm_device *dev = fb->dev;
14671
	struct drm_device *dev = fb->dev;
14647
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14672
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14648
	struct drm_i915_gem_object *obj = intel_fb->obj;
14673
	struct drm_i915_gem_object *obj = intel_fb->obj;
14649
 
14674
 
14650
	mutex_lock(&dev->struct_mutex);
14675
	mutex_lock(&dev->struct_mutex);
14651
	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14676
	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14652
	mutex_unlock(&dev->struct_mutex);
14677
	mutex_unlock(&dev->struct_mutex);
14653
 
14678
 
14654
	return 0;
14679
	return 0;
14655
}
14680
}
14656
 
14681
 
14657
static const struct drm_framebuffer_funcs intel_fb_funcs = {
14682
static const struct drm_framebuffer_funcs intel_fb_funcs = {
14658
	.destroy = intel_user_framebuffer_destroy,
14683
	.destroy = intel_user_framebuffer_destroy,
14659
	.create_handle = intel_user_framebuffer_create_handle,
14684
	.create_handle = intel_user_framebuffer_create_handle,
14660
	.dirty = intel_user_framebuffer_dirty,
14685
	.dirty = intel_user_framebuffer_dirty,
14661
};
14686
};
14662
 
14687
 
14663
static
14688
static
14664
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14689
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14665
			 uint32_t pixel_format)
14690
			 uint32_t pixel_format)
14666
{
14691
{
14667
	u32 gen = INTEL_INFO(dev)->gen;
14692
	u32 gen = INTEL_INFO(dev)->gen;
14668
 
14693
 
14669
	if (gen >= 9) {
14694
	if (gen >= 9) {
-
 
14695
		int cpp = drm_format_plane_cpp(pixel_format, 0);
-
 
14696
 
14670
		/* "The stride in bytes must not exceed the of the size of 8K
14697
		/* "The stride in bytes must not exceed the of the size of 8K
14671
		 *  pixels and 32K bytes."
14698
		 *  pixels and 32K bytes."
14672
		 */
14699
		 */
14673
		 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14700
		return min(8192 * cpp, 32768);
14674
	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14701
	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14675
		return 32*1024;
14702
		return 32*1024;
14676
	} else if (gen >= 4) {
14703
	} else if (gen >= 4) {
14677
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14704
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14678
			return 16*1024;
14705
			return 16*1024;
14679
		else
14706
		else
14680
			return 32*1024;
14707
			return 32*1024;
14681
	} else if (gen >= 3) {
14708
	} else if (gen >= 3) {
14682
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14709
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14683
			return 8*1024;
14710
			return 8*1024;
14684
		else
14711
		else
14685
			return 16*1024;
14712
			return 16*1024;
14686
	} else {
14713
	} else {
14687
		/* XXX DSPC is limited to 4k tiled */
14714
		/* XXX DSPC is limited to 4k tiled */
14688
		return 8*1024;
14715
		return 8*1024;
14689
	}
14716
	}
14690
}
14717
}
14691
 
14718
 
14692
static int intel_framebuffer_init(struct drm_device *dev,
14719
static int intel_framebuffer_init(struct drm_device *dev,
14693
				  struct intel_framebuffer *intel_fb,
14720
				  struct intel_framebuffer *intel_fb,
14694
				  struct drm_mode_fb_cmd2 *mode_cmd,
14721
				  struct drm_mode_fb_cmd2 *mode_cmd,
14695
				  struct drm_i915_gem_object *obj)
14722
				  struct drm_i915_gem_object *obj)
14696
{
14723
{
-
 
14724
	struct drm_i915_private *dev_priv = to_i915(dev);
14697
	unsigned int aligned_height;
14725
	unsigned int aligned_height;
14698
	int ret;
14726
	int ret;
14699
	u32 pitch_limit, stride_alignment;
14727
	u32 pitch_limit, stride_alignment;
14700
 
14728
 
14701
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14729
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14702
 
14730
 
14703
	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14731
	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14704
		/* Enforce that fb modifier and tiling mode match, but only for
14732
		/* Enforce that fb modifier and tiling mode match, but only for
14705
		 * X-tiled. This is needed for FBC. */
14733
		 * X-tiled. This is needed for FBC. */
14706
		if (!!(obj->tiling_mode == I915_TILING_X) !=
14734
		if (!!(obj->tiling_mode == I915_TILING_X) !=
14707
		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14735
		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14708
			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14736
			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14709
			return -EINVAL;
14737
			return -EINVAL;
14710
		}
14738
		}
14711
	} else {
14739
	} else {
14712
		if (obj->tiling_mode == I915_TILING_X)
14740
		if (obj->tiling_mode == I915_TILING_X)
14713
			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14741
			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14714
		else if (obj->tiling_mode == I915_TILING_Y) {
14742
		else if (obj->tiling_mode == I915_TILING_Y) {
14715
			DRM_DEBUG("No Y tiling for legacy addfb\n");
14743
			DRM_DEBUG("No Y tiling for legacy addfb\n");
14716
			return -EINVAL;
14744
			return -EINVAL;
14717
		}
14745
		}
14718
	}
14746
	}
14719
 
14747
 
14720
	/* Passed in modifier sanity checking. */
14748
	/* Passed in modifier sanity checking. */
14721
	switch (mode_cmd->modifier[0]) {
14749
	switch (mode_cmd->modifier[0]) {
14722
	case I915_FORMAT_MOD_Y_TILED:
14750
	case I915_FORMAT_MOD_Y_TILED:
14723
	case I915_FORMAT_MOD_Yf_TILED:
14751
	case I915_FORMAT_MOD_Yf_TILED:
14724
		if (INTEL_INFO(dev)->gen < 9) {
14752
		if (INTEL_INFO(dev)->gen < 9) {
14725
			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14753
			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14726
				  mode_cmd->modifier[0]);
14754
				  mode_cmd->modifier[0]);
14727
			return -EINVAL;
14755
			return -EINVAL;
14728
		}
14756
		}
14729
	case DRM_FORMAT_MOD_NONE:
14757
	case DRM_FORMAT_MOD_NONE:
14730
	case I915_FORMAT_MOD_X_TILED:
14758
	case I915_FORMAT_MOD_X_TILED:
14731
		break;
14759
		break;
14732
	default:
14760
	default:
14733
		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14761
		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14734
			  mode_cmd->modifier[0]);
14762
			  mode_cmd->modifier[0]);
14735
		return -EINVAL;
14763
		return -EINVAL;
14736
	}
14764
	}
14737
 
14765
 
-
 
14766
	stride_alignment = intel_fb_stride_alignment(dev_priv,
14738
	stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14767
						     mode_cmd->modifier[0],
14739
						     mode_cmd->pixel_format);
14768
						     mode_cmd->pixel_format);
14740
	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14769
	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14741
		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14770
		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14742
			  mode_cmd->pitches[0], stride_alignment);
14771
			  mode_cmd->pitches[0], stride_alignment);
14743
		return -EINVAL;
14772
		return -EINVAL;
14744
	}
14773
	}
14745
 
14774
 
14746
	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14775
	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14747
					   mode_cmd->pixel_format);
14776
					   mode_cmd->pixel_format);
14748
	if (mode_cmd->pitches[0] > pitch_limit) {
14777
	if (mode_cmd->pitches[0] > pitch_limit) {
14749
		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14778
		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14750
			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14779
			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14751
			  "tiled" : "linear",
14780
			  "tiled" : "linear",
14752
			  mode_cmd->pitches[0], pitch_limit);
14781
			  mode_cmd->pitches[0], pitch_limit);
14753
		return -EINVAL;
14782
		return -EINVAL;
14754
	}
14783
	}
14755
 
14784
 
14756
	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14785
	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14757
	    mode_cmd->pitches[0] != obj->stride) {
14786
	    mode_cmd->pitches[0] != obj->stride) {
14758
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14787
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14759
			  mode_cmd->pitches[0], obj->stride);
14788
			  mode_cmd->pitches[0], obj->stride);
14760
		return -EINVAL;
14789
		return -EINVAL;
14761
	}
14790
	}
14762
 
14791
 
14763
	/* Reject formats not supported by any plane early. */
14792
	/* Reject formats not supported by any plane early. */
14764
	switch (mode_cmd->pixel_format) {
14793
	switch (mode_cmd->pixel_format) {
14765
	case DRM_FORMAT_C8:
14794
	case DRM_FORMAT_C8:
14766
	case DRM_FORMAT_RGB565:
14795
	case DRM_FORMAT_RGB565:
14767
	case DRM_FORMAT_XRGB8888:
14796
	case DRM_FORMAT_XRGB8888:
14768
	case DRM_FORMAT_ARGB8888:
14797
	case DRM_FORMAT_ARGB8888:
14769
		break;
14798
		break;
14770
	case DRM_FORMAT_XRGB1555:
14799
	case DRM_FORMAT_XRGB1555:
14771
		if (INTEL_INFO(dev)->gen > 3) {
14800
		if (INTEL_INFO(dev)->gen > 3) {
14772
			DRM_DEBUG("unsupported pixel format: %s\n",
14801
			DRM_DEBUG("unsupported pixel format: %s\n",
14773
				  drm_get_format_name(mode_cmd->pixel_format));
14802
				  drm_get_format_name(mode_cmd->pixel_format));
14774
			return -EINVAL;
14803
			return -EINVAL;
14775
		}
14804
		}
14776
		break;
14805
		break;
14777
	case DRM_FORMAT_ABGR8888:
14806
	case DRM_FORMAT_ABGR8888:
14778
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14807
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14779
		    INTEL_INFO(dev)->gen < 9) {
14808
		    INTEL_INFO(dev)->gen < 9) {
14780
			DRM_DEBUG("unsupported pixel format: %s\n",
14809
			DRM_DEBUG("unsupported pixel format: %s\n",
14781
				  drm_get_format_name(mode_cmd->pixel_format));
14810
				  drm_get_format_name(mode_cmd->pixel_format));
14782
			return -EINVAL;
14811
			return -EINVAL;
14783
		}
14812
		}
14784
		break;
14813
		break;
14785
	case DRM_FORMAT_XBGR8888:
14814
	case DRM_FORMAT_XBGR8888:
14786
	case DRM_FORMAT_XRGB2101010:
14815
	case DRM_FORMAT_XRGB2101010:
14787
	case DRM_FORMAT_XBGR2101010:
14816
	case DRM_FORMAT_XBGR2101010:
14788
		if (INTEL_INFO(dev)->gen < 4) {
14817
		if (INTEL_INFO(dev)->gen < 4) {
14789
			DRM_DEBUG("unsupported pixel format: %s\n",
14818
			DRM_DEBUG("unsupported pixel format: %s\n",
14790
				  drm_get_format_name(mode_cmd->pixel_format));
14819
				  drm_get_format_name(mode_cmd->pixel_format));
14791
			return -EINVAL;
14820
			return -EINVAL;
14792
		}
14821
		}
14793
		break;
14822
		break;
14794
	case DRM_FORMAT_ABGR2101010:
14823
	case DRM_FORMAT_ABGR2101010:
14795
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14824
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14796
			DRM_DEBUG("unsupported pixel format: %s\n",
14825
			DRM_DEBUG("unsupported pixel format: %s\n",
14797
				  drm_get_format_name(mode_cmd->pixel_format));
14826
				  drm_get_format_name(mode_cmd->pixel_format));
14798
			return -EINVAL;
14827
			return -EINVAL;
14799
		}
14828
		}
14800
		break;
14829
		break;
14801
	case DRM_FORMAT_YUYV:
14830
	case DRM_FORMAT_YUYV:
14802
	case DRM_FORMAT_UYVY:
14831
	case DRM_FORMAT_UYVY:
14803
	case DRM_FORMAT_YVYU:
14832
	case DRM_FORMAT_YVYU:
14804
	case DRM_FORMAT_VYUY:
14833
	case DRM_FORMAT_VYUY:
14805
		if (INTEL_INFO(dev)->gen < 5) {
14834
		if (INTEL_INFO(dev)->gen < 5) {
14806
			DRM_DEBUG("unsupported pixel format: %s\n",
14835
			DRM_DEBUG("unsupported pixel format: %s\n",
14807
				  drm_get_format_name(mode_cmd->pixel_format));
14836
				  drm_get_format_name(mode_cmd->pixel_format));
14808
			return -EINVAL;
14837
			return -EINVAL;
14809
		}
14838
		}
14810
		break;
14839
		break;
14811
	default:
14840
	default:
14812
		DRM_DEBUG("unsupported pixel format: %s\n",
14841
		DRM_DEBUG("unsupported pixel format: %s\n",
14813
			  drm_get_format_name(mode_cmd->pixel_format));
14842
			  drm_get_format_name(mode_cmd->pixel_format));
14814
		return -EINVAL;
14843
		return -EINVAL;
14815
	}
14844
	}
14816
 
14845
 
14817
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14846
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14818
	if (mode_cmd->offsets[0] != 0)
14847
	if (mode_cmd->offsets[0] != 0)
14819
		return -EINVAL;
14848
		return -EINVAL;
14820
 
14849
 
14821
	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14850
	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14822
					       mode_cmd->pixel_format,
14851
					       mode_cmd->pixel_format,
14823
					       mode_cmd->modifier[0]);
14852
					       mode_cmd->modifier[0]);
14824
	/* FIXME drm helper for size checks (especially planar formats)? */
14853
	/* FIXME drm helper for size checks (especially planar formats)? */
14825
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14854
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14826
		return -EINVAL;
14855
		return -EINVAL;
14827
 
14856
 
14828
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14857
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14829
	intel_fb->obj = obj;
14858
	intel_fb->obj = obj;
14830
	intel_fb->obj->framebuffer_references++;
-
 
14831
 
14859
 
14832
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14860
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14833
	if (ret) {
14861
	if (ret) {
14834
		DRM_ERROR("framebuffer init failed %d\n", ret);
14862
		DRM_ERROR("framebuffer init failed %d\n", ret);
14835
		return ret;
14863
		return ret;
14836
	}
14864
	}
-
 
14865
 
-
 
14866
	intel_fb->obj->framebuffer_references++;
-
 
14867
 
14837
	kolibri_framebuffer_init(intel_fb);
14868
	kolibri_framebuffer_init(intel_fb);
-
 
14869
 
14838
	return 0;
14870
	return 0;
14839
}
14871
}
14840
 
14872
 
14841
static struct drm_framebuffer *
14873
static struct drm_framebuffer *
14842
intel_user_framebuffer_create(struct drm_device *dev,
14874
intel_user_framebuffer_create(struct drm_device *dev,
14843
			      struct drm_file *filp,
14875
			      struct drm_file *filp,
14844
			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
14876
			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
14845
{
14877
{
14846
	struct drm_framebuffer *fb;
14878
	struct drm_framebuffer *fb;
14847
	struct drm_i915_gem_object *obj;
14879
	struct drm_i915_gem_object *obj;
14848
	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14880
	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14849
 
14881
 
14850
	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14882
	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14851
						mode_cmd.handles[0]));
14883
						mode_cmd.handles[0]));
14852
	if (&obj->base == NULL)
14884
	if (&obj->base == NULL)
14853
		return ERR_PTR(-ENOENT);
14885
		return ERR_PTR(-ENOENT);
14854
 
14886
 
14855
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14887
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14856
	if (IS_ERR(fb))
14888
	if (IS_ERR(fb))
14857
		drm_gem_object_unreference_unlocked(&obj->base);
14889
		drm_gem_object_unreference_unlocked(&obj->base);
14858
 
14890
 
14859
	return fb;
14891
	return fb;
14860
}
14892
}
14861
 
14893
 
14862
#ifndef CONFIG_DRM_FBDEV_EMULATION
14894
#ifndef CONFIG_DRM_FBDEV_EMULATION
14863
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14895
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14864
{
14896
{
14865
}
14897
}
14866
#endif
14898
#endif
14867
 
14899
 
14868
static const struct drm_mode_config_funcs intel_mode_funcs = {
14900
static const struct drm_mode_config_funcs intel_mode_funcs = {
14869
	.fb_create = intel_user_framebuffer_create,
14901
	.fb_create = intel_user_framebuffer_create,
14870
	.output_poll_changed = intel_fbdev_output_poll_changed,
14902
	.output_poll_changed = intel_fbdev_output_poll_changed,
14871
	.atomic_check = intel_atomic_check,
14903
	.atomic_check = intel_atomic_check,
14872
	.atomic_commit = intel_atomic_commit,
14904
	.atomic_commit = intel_atomic_commit,
14873
	.atomic_state_alloc = intel_atomic_state_alloc,
14905
	.atomic_state_alloc = intel_atomic_state_alloc,
14874
	.atomic_state_clear = intel_atomic_state_clear,
14906
	.atomic_state_clear = intel_atomic_state_clear,
14875
};
14907
};
14876
 
14908
 
14877
/* Set up chip specific display functions */
14909
/* Set up chip specific display functions */
14878
static void intel_init_display(struct drm_device *dev)
14910
static void intel_init_display(struct drm_device *dev)
14879
{
14911
{
14880
	struct drm_i915_private *dev_priv = dev->dev_private;
14912
	struct drm_i915_private *dev_priv = dev->dev_private;
14881
 
14913
 
14882
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14914
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14883
		dev_priv->display.find_dpll = g4x_find_best_dpll;
14915
		dev_priv->display.find_dpll = g4x_find_best_dpll;
14884
	else if (IS_CHERRYVIEW(dev))
14916
	else if (IS_CHERRYVIEW(dev))
14885
		dev_priv->display.find_dpll = chv_find_best_dpll;
14917
		dev_priv->display.find_dpll = chv_find_best_dpll;
14886
	else if (IS_VALLEYVIEW(dev))
14918
	else if (IS_VALLEYVIEW(dev))
14887
		dev_priv->display.find_dpll = vlv_find_best_dpll;
14919
		dev_priv->display.find_dpll = vlv_find_best_dpll;
14888
	else if (IS_PINEVIEW(dev))
14920
	else if (IS_PINEVIEW(dev))
14889
		dev_priv->display.find_dpll = pnv_find_best_dpll;
14921
		dev_priv->display.find_dpll = pnv_find_best_dpll;
14890
	else
14922
	else
14891
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
14923
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
14892
 
14924
 
14893
	if (INTEL_INFO(dev)->gen >= 9) {
14925
	if (INTEL_INFO(dev)->gen >= 9) {
14894
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14926
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14895
		dev_priv->display.get_initial_plane_config =
14927
		dev_priv->display.get_initial_plane_config =
14896
			skylake_get_initial_plane_config;
14928
			skylake_get_initial_plane_config;
14897
		dev_priv->display.crtc_compute_clock =
14929
		dev_priv->display.crtc_compute_clock =
14898
			haswell_crtc_compute_clock;
14930
			haswell_crtc_compute_clock;
14899
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14931
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14900
		dev_priv->display.crtc_disable = haswell_crtc_disable;
14932
		dev_priv->display.crtc_disable = haswell_crtc_disable;
14901
		dev_priv->display.update_primary_plane =
-
 
14902
			skylake_update_primary_plane;
-
 
14903
	} else if (HAS_DDI(dev)) {
14933
	} else if (HAS_DDI(dev)) {
14904
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14934
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14905
		dev_priv->display.get_initial_plane_config =
14935
		dev_priv->display.get_initial_plane_config =
14906
			ironlake_get_initial_plane_config;
14936
			ironlake_get_initial_plane_config;
14907
		dev_priv->display.crtc_compute_clock =
14937
		dev_priv->display.crtc_compute_clock =
14908
			haswell_crtc_compute_clock;
14938
			haswell_crtc_compute_clock;
14909
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14939
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14910
		dev_priv->display.crtc_disable = haswell_crtc_disable;
14940
		dev_priv->display.crtc_disable = haswell_crtc_disable;
14911
		dev_priv->display.update_primary_plane =
-
 
14912
			ironlake_update_primary_plane;
-
 
14913
	} else if (HAS_PCH_SPLIT(dev)) {
14941
	} else if (HAS_PCH_SPLIT(dev)) {
14914
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14942
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14915
		dev_priv->display.get_initial_plane_config =
14943
		dev_priv->display.get_initial_plane_config =
14916
			ironlake_get_initial_plane_config;
14944
			ironlake_get_initial_plane_config;
14917
		dev_priv->display.crtc_compute_clock =
14945
		dev_priv->display.crtc_compute_clock =
14918
			ironlake_crtc_compute_clock;
14946
			ironlake_crtc_compute_clock;
14919
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14947
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14920
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14948
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14921
		dev_priv->display.update_primary_plane =
-
 
14922
			ironlake_update_primary_plane;
-
 
14923
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14949
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14924
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14950
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14925
		dev_priv->display.get_initial_plane_config =
14951
		dev_priv->display.get_initial_plane_config =
14926
			i9xx_get_initial_plane_config;
14952
			i9xx_get_initial_plane_config;
14927
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14953
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14928
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14954
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14929
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14955
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14930
		dev_priv->display.update_primary_plane =
-
 
14931
			i9xx_update_primary_plane;
-
 
14932
	} else {
14956
	} else {
14933
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14957
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14934
		dev_priv->display.get_initial_plane_config =
14958
		dev_priv->display.get_initial_plane_config =
14935
			i9xx_get_initial_plane_config;
14959
			i9xx_get_initial_plane_config;
14936
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14960
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14937
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14961
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14938
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14962
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14939
		dev_priv->display.update_primary_plane =
-
 
14940
			i9xx_update_primary_plane;
-
 
14941
	}
14963
	}
14942
 
14964
 
14943
	/* Returns the core display clock speed */
14965
	/* Returns the core display clock speed */
14944
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14966
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14945
		dev_priv->display.get_display_clock_speed =
14967
		dev_priv->display.get_display_clock_speed =
14946
			skylake_get_display_clock_speed;
14968
			skylake_get_display_clock_speed;
14947
	else if (IS_BROXTON(dev))
14969
	else if (IS_BROXTON(dev))
14948
		dev_priv->display.get_display_clock_speed =
14970
		dev_priv->display.get_display_clock_speed =
14949
			broxton_get_display_clock_speed;
14971
			broxton_get_display_clock_speed;
14950
	else if (IS_BROADWELL(dev))
14972
	else if (IS_BROADWELL(dev))
14951
		dev_priv->display.get_display_clock_speed =
14973
		dev_priv->display.get_display_clock_speed =
14952
			broadwell_get_display_clock_speed;
14974
			broadwell_get_display_clock_speed;
14953
	else if (IS_HASWELL(dev))
14975
	else if (IS_HASWELL(dev))
14954
		dev_priv->display.get_display_clock_speed =
14976
		dev_priv->display.get_display_clock_speed =
14955
			haswell_get_display_clock_speed;
14977
			haswell_get_display_clock_speed;
14956
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
14978
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
14957
		dev_priv->display.get_display_clock_speed =
14979
		dev_priv->display.get_display_clock_speed =
14958
			valleyview_get_display_clock_speed;
14980
			valleyview_get_display_clock_speed;
14959
	else if (IS_GEN5(dev))
14981
	else if (IS_GEN5(dev))
14960
		dev_priv->display.get_display_clock_speed =
14982
		dev_priv->display.get_display_clock_speed =
14961
			ilk_get_display_clock_speed;
14983
			ilk_get_display_clock_speed;
14962
	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14984
	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14963
		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14985
		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14964
		dev_priv->display.get_display_clock_speed =
14986
		dev_priv->display.get_display_clock_speed =
14965
			i945_get_display_clock_speed;
14987
			i945_get_display_clock_speed;
14966
	else if (IS_GM45(dev))
14988
	else if (IS_GM45(dev))
14967
		dev_priv->display.get_display_clock_speed =
14989
		dev_priv->display.get_display_clock_speed =
14968
			gm45_get_display_clock_speed;
14990
			gm45_get_display_clock_speed;
14969
	else if (IS_CRESTLINE(dev))
14991
	else if (IS_CRESTLINE(dev))
14970
		dev_priv->display.get_display_clock_speed =
14992
		dev_priv->display.get_display_clock_speed =
14971
			i965gm_get_display_clock_speed;
14993
			i965gm_get_display_clock_speed;
14972
	else if (IS_PINEVIEW(dev))
14994
	else if (IS_PINEVIEW(dev))
14973
		dev_priv->display.get_display_clock_speed =
14995
		dev_priv->display.get_display_clock_speed =
14974
			pnv_get_display_clock_speed;
14996
			pnv_get_display_clock_speed;
14975
	else if (IS_G33(dev) || IS_G4X(dev))
14997
	else if (IS_G33(dev) || IS_G4X(dev))
14976
		dev_priv->display.get_display_clock_speed =
14998
		dev_priv->display.get_display_clock_speed =
14977
			g33_get_display_clock_speed;
14999
			g33_get_display_clock_speed;
14978
	else if (IS_I915G(dev))
15000
	else if (IS_I915G(dev))
14979
		dev_priv->display.get_display_clock_speed =
15001
		dev_priv->display.get_display_clock_speed =
14980
			i915_get_display_clock_speed;
15002
			i915_get_display_clock_speed;
14981
	else if (IS_I945GM(dev) || IS_845G(dev))
15003
	else if (IS_I945GM(dev) || IS_845G(dev))
14982
		dev_priv->display.get_display_clock_speed =
15004
		dev_priv->display.get_display_clock_speed =
14983
			i9xx_misc_get_display_clock_speed;
15005
			i9xx_misc_get_display_clock_speed;
14984
	else if (IS_I915GM(dev))
15006
	else if (IS_I915GM(dev))
14985
		dev_priv->display.get_display_clock_speed =
15007
		dev_priv->display.get_display_clock_speed =
14986
			i915gm_get_display_clock_speed;
15008
			i915gm_get_display_clock_speed;
14987
	else if (IS_I865G(dev))
15009
	else if (IS_I865G(dev))
14988
		dev_priv->display.get_display_clock_speed =
15010
		dev_priv->display.get_display_clock_speed =
14989
			i865_get_display_clock_speed;
15011
			i865_get_display_clock_speed;
14990
	else if (IS_I85X(dev))
15012
	else if (IS_I85X(dev))
14991
		dev_priv->display.get_display_clock_speed =
15013
		dev_priv->display.get_display_clock_speed =
14992
			i85x_get_display_clock_speed;
15014
			i85x_get_display_clock_speed;
14993
	else { /* 830 */
15015
	else { /* 830 */
14994
		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
15016
		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
14995
		dev_priv->display.get_display_clock_speed =
15017
		dev_priv->display.get_display_clock_speed =
14996
			i830_get_display_clock_speed;
15018
			i830_get_display_clock_speed;
14997
	}
15019
	}
14998
 
15020
 
14999
	if (IS_GEN5(dev)) {
15021
	if (IS_GEN5(dev)) {
15000
		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15022
		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15001
	} else if (IS_GEN6(dev)) {
15023
	} else if (IS_GEN6(dev)) {
15002
		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15024
		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15003
	} else if (IS_IVYBRIDGE(dev)) {
15025
	} else if (IS_IVYBRIDGE(dev)) {
15004
		/* FIXME: detect B0+ stepping and use auto training */
15026
		/* FIXME: detect B0+ stepping and use auto training */
15005
		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15027
		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15006
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
15028
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
15007
		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15029
		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15008
		if (IS_BROADWELL(dev)) {
15030
		if (IS_BROADWELL(dev)) {
15009
			dev_priv->display.modeset_commit_cdclk =
15031
			dev_priv->display.modeset_commit_cdclk =
15010
				broadwell_modeset_commit_cdclk;
15032
				broadwell_modeset_commit_cdclk;
15011
			dev_priv->display.modeset_calc_cdclk =
15033
			dev_priv->display.modeset_calc_cdclk =
15012
				broadwell_modeset_calc_cdclk;
15034
				broadwell_modeset_calc_cdclk;
15013
		}
15035
		}
15014
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
15036
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
15015
		dev_priv->display.modeset_commit_cdclk =
15037
		dev_priv->display.modeset_commit_cdclk =
15016
			valleyview_modeset_commit_cdclk;
15038
			valleyview_modeset_commit_cdclk;
15017
		dev_priv->display.modeset_calc_cdclk =
15039
		dev_priv->display.modeset_calc_cdclk =
15018
			valleyview_modeset_calc_cdclk;
15040
			valleyview_modeset_calc_cdclk;
15019
	} else if (IS_BROXTON(dev)) {
15041
	} else if (IS_BROXTON(dev)) {
15020
		dev_priv->display.modeset_commit_cdclk =
15042
		dev_priv->display.modeset_commit_cdclk =
15021
			broxton_modeset_commit_cdclk;
15043
			broxton_modeset_commit_cdclk;
15022
		dev_priv->display.modeset_calc_cdclk =
15044
		dev_priv->display.modeset_calc_cdclk =
15023
			broxton_modeset_calc_cdclk;
15045
			broxton_modeset_calc_cdclk;
15024
	}
15046
	}
15025
 
15047
 
15026
	switch (INTEL_INFO(dev)->gen) {
15048
	switch (INTEL_INFO(dev)->gen) {
15027
	case 2:
15049
	case 2:
15028
		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15050
		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15029
		break;
15051
		break;
15030
 
15052
 
15031
	case 3:
15053
	case 3:
15032
		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15054
		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15033
		break;
15055
		break;
15034
 
15056
 
15035
	case 4:
15057
	case 4:
15036
	case 5:
15058
	case 5:
15037
		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15059
		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15038
		break;
15060
		break;
15039
 
15061
 
15040
	case 6:
15062
	case 6:
15041
		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15063
		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15042
		break;
15064
		break;
15043
	case 7:
15065
	case 7:
15044
	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15066
	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15045
		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15067
		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15046
		break;
15068
		break;
15047
	case 9:
15069
	case 9:
15048
		/* Drop through - unsupported since execlist only. */
15070
		/* Drop through - unsupported since execlist only. */
15049
	default:
15071
	default:
15050
		/* Default just returns -ENODEV to indicate unsupported */
15072
		/* Default just returns -ENODEV to indicate unsupported */
15051
		dev_priv->display.queue_flip = intel_default_queue_flip;
15073
		dev_priv->display.queue_flip = intel_default_queue_flip;
15052
	}
15074
	}
15053
 
15075
 
15054
	mutex_init(&dev_priv->pps_mutex);
15076
	mutex_init(&dev_priv->pps_mutex);
15055
}
15077
}
15056
 
15078
 
15057
/*
15079
/*
15058
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15080
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15059
 * resume, or other times.  This quirk makes sure that's the case for
15081
 * resume, or other times.  This quirk makes sure that's the case for
15060
 * affected systems.
15082
 * affected systems.
15061
 */
15083
 */
15062
static void quirk_pipea_force(struct drm_device *dev)
15084
static void quirk_pipea_force(struct drm_device *dev)
15063
{
15085
{
15064
	struct drm_i915_private *dev_priv = dev->dev_private;
15086
	struct drm_i915_private *dev_priv = dev->dev_private;
15065
 
15087
 
15066
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15088
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15067
	DRM_INFO("applying pipe a force quirk\n");
15089
	DRM_INFO("applying pipe a force quirk\n");
15068
}
15090
}
15069
 
15091
 
15070
static void quirk_pipeb_force(struct drm_device *dev)
15092
static void quirk_pipeb_force(struct drm_device *dev)
15071
{
15093
{
15072
	struct drm_i915_private *dev_priv = dev->dev_private;
15094
	struct drm_i915_private *dev_priv = dev->dev_private;
15073
 
15095
 
15074
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15096
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15075
	DRM_INFO("applying pipe b force quirk\n");
15097
	DRM_INFO("applying pipe b force quirk\n");
15076
}
15098
}
15077
 
15099
 
15078
/*
15100
/*
15079
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15101
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15080
 */
15102
 */
15081
static void quirk_ssc_force_disable(struct drm_device *dev)
15103
static void quirk_ssc_force_disable(struct drm_device *dev)
15082
{
15104
{
15083
	struct drm_i915_private *dev_priv = dev->dev_private;
15105
	struct drm_i915_private *dev_priv = dev->dev_private;
15084
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15106
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15085
	DRM_INFO("applying lvds SSC disable quirk\n");
15107
	DRM_INFO("applying lvds SSC disable quirk\n");
15086
}
15108
}
15087
 
15109
 
15088
/*
15110
/*
15089
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15111
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15090
 * brightness value
15112
 * brightness value
15091
 */
15113
 */
15092
static void quirk_invert_brightness(struct drm_device *dev)
15114
static void quirk_invert_brightness(struct drm_device *dev)
15093
{
15115
{
15094
	struct drm_i915_private *dev_priv = dev->dev_private;
15116
	struct drm_i915_private *dev_priv = dev->dev_private;
15095
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15117
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15096
	DRM_INFO("applying inverted panel brightness quirk\n");
15118
	DRM_INFO("applying inverted panel brightness quirk\n");
15097
}
15119
}
15098
 
15120
 
15099
/* Some VBT's incorrectly indicate no backlight is present */
15121
/* Some VBT's incorrectly indicate no backlight is present */
15100
static void quirk_backlight_present(struct drm_device *dev)
15122
static void quirk_backlight_present(struct drm_device *dev)
15101
{
15123
{
15102
	struct drm_i915_private *dev_priv = dev->dev_private;
15124
	struct drm_i915_private *dev_priv = dev->dev_private;
15103
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15125
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15104
	DRM_INFO("applying backlight present quirk\n");
15126
	DRM_INFO("applying backlight present quirk\n");
15105
}
15127
}
15106
 
15128
 
15107
struct intel_quirk {
15129
struct intel_quirk {
15108
	int device;
15130
	int device;
15109
	int subsystem_vendor;
15131
	int subsystem_vendor;
15110
	int subsystem_device;
15132
	int subsystem_device;
15111
	void (*hook)(struct drm_device *dev);
15133
	void (*hook)(struct drm_device *dev);
15112
};
15134
};
15113
 
15135
 
15114
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15136
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15115
struct intel_dmi_quirk {
15137
struct intel_dmi_quirk {
15116
	void (*hook)(struct drm_device *dev);
15138
	void (*hook)(struct drm_device *dev);
15117
	const struct dmi_system_id (*dmi_id_list)[];
15139
	const struct dmi_system_id (*dmi_id_list)[];
15118
};
15140
};
15119
 
15141
 
15120
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15142
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15121
{
15143
{
15122
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15144
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15123
	return 1;
15145
	return 1;
15124
}
15146
}
15125
 
15147
 
15126
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15148
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15127
	{
15149
	{
15128
		.dmi_id_list = &(const struct dmi_system_id[]) {
15150
		.dmi_id_list = &(const struct dmi_system_id[]) {
15129
			{
15151
			{
15130
				.callback = intel_dmi_reverse_brightness,
15152
				.callback = intel_dmi_reverse_brightness,
15131
				.ident = "NCR Corporation",
15153
				.ident = "NCR Corporation",
15132
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15154
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15133
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15155
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15134
				},
15156
				},
15135
			},
15157
			},
15136
			{ }  /* terminating entry */
15158
			{ }  /* terminating entry */
15137
		},
15159
		},
15138
		.hook = quirk_invert_brightness,
15160
		.hook = quirk_invert_brightness,
15139
	},
15161
	},
15140
};
15162
};
15141
 
15163
 
15142
static struct intel_quirk intel_quirks[] = {
15164
static struct intel_quirk intel_quirks[] = {
15143
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15165
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15144
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15166
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15145
 
15167
 
15146
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15168
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15147
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15169
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15148
 
15170
 
15149
	/* 830 needs to leave pipe A & dpll A up */
15171
	/* 830 needs to leave pipe A & dpll A up */
15150
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15172
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15151
 
15173
 
15152
	/* 830 needs to leave pipe B & dpll B up */
15174
	/* 830 needs to leave pipe B & dpll B up */
15153
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15175
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15154
 
15176
 
15155
	/* Lenovo U160 cannot use SSC on LVDS */
15177
	/* Lenovo U160 cannot use SSC on LVDS */
15156
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15178
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15157
 
15179
 
15158
	/* Sony Vaio Y cannot use SSC on LVDS */
15180
	/* Sony Vaio Y cannot use SSC on LVDS */
15159
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15181
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15160
 
15182
 
15161
	/* Acer Aspire 5734Z must invert backlight brightness */
15183
	/* Acer Aspire 5734Z must invert backlight brightness */
15162
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15184
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15163
 
15185
 
15164
	/* Acer/eMachines G725 */
15186
	/* Acer/eMachines G725 */
15165
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15187
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15166
 
15188
 
15167
	/* Acer/eMachines e725 */
15189
	/* Acer/eMachines e725 */
15168
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15190
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15169
 
15191
 
15170
	/* Acer/Packard Bell NCL20 */
15192
	/* Acer/Packard Bell NCL20 */
15171
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15193
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15172
 
15194
 
15173
	/* Acer Aspire 4736Z */
15195
	/* Acer Aspire 4736Z */
15174
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15196
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15175
 
15197
 
15176
	/* Acer Aspire 5336 */
15198
	/* Acer Aspire 5336 */
15177
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15199
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15178
 
15200
 
15179
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15201
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15180
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15202
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15181
 
15203
 
15182
	/* Acer C720 Chromebook (Core i3 4005U) */
15204
	/* Acer C720 Chromebook (Core i3 4005U) */
15183
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15205
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15184
 
15206
 
15185
	/* Apple Macbook 2,1 (Core 2 T7400) */
15207
	/* Apple Macbook 2,1 (Core 2 T7400) */
15186
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15208
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15187
 
15209
 
15188
	/* Apple Macbook 4,1 */
15210
	/* Apple Macbook 4,1 */
15189
	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15211
	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15190
 
15212
 
15191
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15213
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15192
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15214
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15193
 
15215
 
15194
	/* HP Chromebook 14 (Celeron 2955U) */
15216
	/* HP Chromebook 14 (Celeron 2955U) */
15195
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15217
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15196
 
15218
 
15197
	/* Dell Chromebook 11 */
15219
	/* Dell Chromebook 11 */
15198
	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15220
	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15199
 
15221
 
15200
	/* Dell Chromebook 11 (2015 version) */
15222
	/* Dell Chromebook 11 (2015 version) */
15201
	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15223
	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15202
};
15224
};
15203
 
15225
 
15204
static void intel_init_quirks(struct drm_device *dev)
15226
static void intel_init_quirks(struct drm_device *dev)
15205
{
15227
{
15206
	struct pci_dev *d = dev->pdev;
15228
	struct pci_dev *d = dev->pdev;
15207
	int i;
15229
	int i;
15208
 
15230
 
15209
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15231
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15210
		struct intel_quirk *q = &intel_quirks[i];
15232
		struct intel_quirk *q = &intel_quirks[i];
15211
 
15233
 
15212
		if (d->device == q->device &&
15234
		if (d->device == q->device &&
15213
		    (d->subsystem_vendor == q->subsystem_vendor ||
15235
		    (d->subsystem_vendor == q->subsystem_vendor ||
15214
		     q->subsystem_vendor == PCI_ANY_ID) &&
15236
		     q->subsystem_vendor == PCI_ANY_ID) &&
15215
		    (d->subsystem_device == q->subsystem_device ||
15237
		    (d->subsystem_device == q->subsystem_device ||
15216
		     q->subsystem_device == PCI_ANY_ID))
15238
		     q->subsystem_device == PCI_ANY_ID))
15217
			q->hook(dev);
15239
			q->hook(dev);
15218
	}
15240
	}
15219
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15241
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15220
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15242
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15221
			intel_dmi_quirks[i].hook(dev);
15243
			intel_dmi_quirks[i].hook(dev);
15222
	}
15244
	}
15223
}
15245
}
15224
 
15246
 
15225
/* Disable the VGA plane that we never use */
15247
/* Disable the VGA plane that we never use */
15226
static void i915_disable_vga(struct drm_device *dev)
15248
static void i915_disable_vga(struct drm_device *dev)
15227
{
15249
{
15228
	struct drm_i915_private *dev_priv = dev->dev_private;
15250
	struct drm_i915_private *dev_priv = dev->dev_private;
15229
	u8 sr1;
15251
	u8 sr1;
15230
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15252
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15231
 
15253
 
15232
	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15254
	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15233
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15255
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15234
	outb(SR01, VGA_SR_INDEX);
15256
	outb(SR01, VGA_SR_INDEX);
15235
	sr1 = inb(VGA_SR_DATA);
15257
	sr1 = inb(VGA_SR_DATA);
15236
	outb(sr1 | 1<<5, VGA_SR_DATA);
15258
	outb(sr1 | 1<<5, VGA_SR_DATA);
15237
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15259
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15238
	udelay(300);
15260
	udelay(300);
15239
 
15261
 
15240
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15262
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15241
	POSTING_READ(vga_reg);
15263
	POSTING_READ(vga_reg);
15242
}
15264
}
15243
 
15265
 
15244
void intel_modeset_init_hw(struct drm_device *dev)
15266
void intel_modeset_init_hw(struct drm_device *dev)
15245
{
15267
{
-
 
15268
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
15269
 
15246
	intel_update_cdclk(dev);
15270
	intel_update_cdclk(dev);
-
 
15271
 
15247
	intel_prepare_ddi(dev);
15272
	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
-
 
15273
 
15248
	intel_init_clock_gating(dev);
15274
	intel_init_clock_gating(dev);
15249
	intel_enable_gt_powersave(dev);
15275
	intel_enable_gt_powersave(dev);
15250
}
15276
}
-
 
15277
 
-
 
15278
/*
-
 
15279
 * Calculate what we think the watermarks should be for the state we've read
-
 
15280
 * out of the hardware and then immediately program those watermarks so that
-
 
15281
 * we ensure the hardware settings match our internal state.
-
 
15282
 *
-
 
15283
 * We can calculate what we think WM's should be by creating a duplicate of the
-
 
15284
 * current state (which was constructed during hardware readout) and running it
-
 
15285
 * through the atomic check code to calculate new watermark values in the
-
 
15286
 * state object.
-
 
15287
 */
-
 
15288
static void sanitize_watermarks(struct drm_device *dev)
-
 
15289
{
-
 
15290
	struct drm_i915_private *dev_priv = to_i915(dev);
-
 
15291
	struct drm_atomic_state *state;
-
 
15292
	struct drm_crtc *crtc;
-
 
15293
	struct drm_crtc_state *cstate;
-
 
15294
	struct drm_modeset_acquire_ctx ctx;
-
 
15295
	int ret;
-
 
15296
	int i;
-
 
15297
 
-
 
15298
	/* Only supported on platforms that use atomic watermark design */
-
 
15299
	if (!dev_priv->display.program_watermarks)
-
 
15300
		return;
-
 
15301
 
-
 
15302
	/*
-
 
15303
	 * We need to hold connection_mutex before calling duplicate_state so
-
 
15304
	 * that the connector loop is protected.
-
 
15305
	 */
-
 
15306
	drm_modeset_acquire_init(&ctx, 0);
-
 
15307
retry:
-
 
15308
	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
 
15309
	if (ret == -EDEADLK) {
-
 
15310
		drm_modeset_backoff(&ctx);
-
 
15311
		goto retry;
-
 
15312
	} else if (WARN_ON(ret)) {
-
 
15313
		goto fail;
-
 
15314
	}
-
 
15315
 
-
 
15316
	state = drm_atomic_helper_duplicate_state(dev, &ctx);
-
 
15317
	if (WARN_ON(IS_ERR(state)))
-
 
15318
		goto fail;
-
 
15319
 
-
 
15320
	ret = intel_atomic_check(dev, state);
-
 
15321
	if (ret) {
-
 
15322
		/*
-
 
15323
		 * If we fail here, it means that the hardware appears to be
-
 
15324
		 * programmed in a way that shouldn't be possible, given our
-
 
15325
		 * understanding of watermark requirements.  This might mean a
-
 
15326
		 * mistake in the hardware readout code or a mistake in the
-
 
15327
		 * watermark calculations for a given platform.  Raise a WARN
-
 
15328
		 * so that this is noticeable.
-
 
15329
		 *
-
 
15330
		 * If this actually happens, we'll have to just leave the
-
 
15331
		 * BIOS-programmed watermarks untouched and hope for the best.
-
 
15332
		 */
-
 
15333
		WARN(true, "Could not determine valid watermarks for inherited state\n");
-
 
15334
		goto fail;
-
 
15335
	}
-
 
15336
 
-
 
15337
	/* Write calculated watermark values back */
-
 
15338
	to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
-
 
15339
	for_each_crtc_in_state(state, crtc, cstate, i) {
-
 
15340
		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
-
 
15341
 
-
 
15342
		dev_priv->display.program_watermarks(cs);
-
 
15343
	}
-
 
15344
 
-
 
15345
	drm_atomic_state_free(state);
-
 
15346
fail:
-
 
15347
	drm_modeset_drop_locks(&ctx);
-
 
15348
	drm_modeset_acquire_fini(&ctx);
-
 
15349
}
15251
 
15350
 
15252
void intel_modeset_init(struct drm_device *dev)
15351
void intel_modeset_init(struct drm_device *dev)
15253
{
15352
{
15254
	struct drm_i915_private *dev_priv = dev->dev_private;
15353
	struct drm_i915_private *dev_priv = dev->dev_private;
15255
	int sprite, ret;
15354
	int sprite, ret;
15256
	enum pipe pipe;
15355
	enum pipe pipe;
15257
	struct intel_crtc *crtc;
15356
	struct intel_crtc *crtc;
15258
 
15357
 
15259
	drm_mode_config_init(dev);
15358
	drm_mode_config_init(dev);
15260
 
15359
 
15261
	dev->mode_config.min_width = 0;
15360
	dev->mode_config.min_width = 0;
15262
	dev->mode_config.min_height = 0;
15361
	dev->mode_config.min_height = 0;
15263
 
15362
 
15264
	dev->mode_config.preferred_depth = 24;
15363
	dev->mode_config.preferred_depth = 24;
15265
	dev->mode_config.prefer_shadow = 1;
15364
	dev->mode_config.prefer_shadow = 1;
15266
 
15365
 
15267
	dev->mode_config.allow_fb_modifiers = true;
15366
	dev->mode_config.allow_fb_modifiers = true;
15268
 
15367
 
15269
	dev->mode_config.funcs = &intel_mode_funcs;
15368
	dev->mode_config.funcs = &intel_mode_funcs;
15270
 
15369
 
15271
	intel_init_quirks(dev);
15370
	intel_init_quirks(dev);
15272
 
15371
 
15273
	intel_init_pm(dev);
15372
	intel_init_pm(dev);
15274
 
15373
 
15275
	if (INTEL_INFO(dev)->num_pipes == 0)
15374
	if (INTEL_INFO(dev)->num_pipes == 0)
15276
		return;
15375
		return;
15277
 
15376
 
15278
	/*
15377
	/*
15279
	 * There may be no VBT; and if the BIOS enabled SSC we can
15378
	 * There may be no VBT; and if the BIOS enabled SSC we can
15280
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15379
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15281
	 * BIOS isn't using it, don't assume it will work even if the VBT
15380
	 * BIOS isn't using it, don't assume it will work even if the VBT
15282
	 * indicates as much.
15381
	 * indicates as much.
15283
	 */
15382
	 */
15284
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15383
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15285
		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15384
		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15286
					    DREF_SSC1_ENABLE);
15385
					    DREF_SSC1_ENABLE);
15287
 
15386
 
15288
		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15387
		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15289
			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15388
			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15290
				     bios_lvds_use_ssc ? "en" : "dis",
15389
				     bios_lvds_use_ssc ? "en" : "dis",
15291
				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15390
				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15292
			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15391
			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15293
		}
15392
		}
15294
	}
15393
	}
15295
 
15394
 
15296
	intel_init_display(dev);
15395
	intel_init_display(dev);
15297
 
15396
 
15298
	if (IS_GEN2(dev)) {
15397
	if (IS_GEN2(dev)) {
15299
		dev->mode_config.max_width = 2048;
15398
		dev->mode_config.max_width = 2048;
15300
		dev->mode_config.max_height = 2048;
15399
		dev->mode_config.max_height = 2048;
15301
	} else if (IS_GEN3(dev)) {
15400
	} else if (IS_GEN3(dev)) {
15302
		dev->mode_config.max_width = 4096;
15401
		dev->mode_config.max_width = 4096;
15303
		dev->mode_config.max_height = 4096;
15402
		dev->mode_config.max_height = 4096;
15304
	} else {
15403
	} else {
15305
		dev->mode_config.max_width = 8192;
15404
		dev->mode_config.max_width = 8192;
15306
		dev->mode_config.max_height = 8192;
15405
		dev->mode_config.max_height = 8192;
15307
	}
15406
	}
15308
 
15407
 
15309
	if (IS_GEN2(dev)) {
15408
	if (IS_GEN2(dev)) {
15310
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15409
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15311
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15410
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15312
	} else {
15411
	} else {
15313
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15412
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15314
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15413
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15315
	}
15414
	}
15316
 
15415
 
15317
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
15416
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
15318
 
15417
 
15319
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
15418
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
15320
		      INTEL_INFO(dev)->num_pipes,
15419
		      INTEL_INFO(dev)->num_pipes,
15321
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15420
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15322
 
15421
 
15323
	for_each_pipe(dev_priv, pipe) {
15422
	for_each_pipe(dev_priv, pipe) {
15324
		intel_crtc_init(dev, pipe);
15423
		intel_crtc_init(dev, pipe);
15325
		for_each_sprite(dev_priv, pipe, sprite) {
15424
		for_each_sprite(dev_priv, pipe, sprite) {
15326
			ret = intel_plane_init(dev, pipe, sprite);
15425
			ret = intel_plane_init(dev, pipe, sprite);
15327
			if (ret)
15426
			if (ret)
15328
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15427
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15329
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
15428
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
15330
		}
15429
		}
15331
	}
15430
	}
15332
 
15431
 
15333
	intel_update_czclk(dev_priv);
15432
	intel_update_czclk(dev_priv);
15334
	intel_update_cdclk(dev);
15433
	intel_update_cdclk(dev);
15335
 
15434
 
15336
	intel_shared_dpll_init(dev);
15435
	intel_shared_dpll_init(dev);
15337
 
15436
 
15338
	/* Just disable it once at startup */
15437
	/* Just disable it once at startup */
15339
	i915_disable_vga(dev);
15438
	i915_disable_vga(dev);
15340
	intel_setup_outputs(dev);
15439
	intel_setup_outputs(dev);
15341
 
15440
 
15342
	drm_modeset_lock_all(dev);
15441
	drm_modeset_lock_all(dev);
15343
	intel_modeset_setup_hw_state(dev);
15442
	intel_modeset_setup_hw_state(dev);
15344
	drm_modeset_unlock_all(dev);
15443
	drm_modeset_unlock_all(dev);
15345
 
15444
 
15346
	for_each_intel_crtc(dev, crtc) {
15445
	for_each_intel_crtc(dev, crtc) {
15347
		struct intel_initial_plane_config plane_config = {};
15446
		struct intel_initial_plane_config plane_config = {};
15348
 
15447
 
15349
		if (!crtc->active)
15448
		if (!crtc->active)
15350
			continue;
15449
			continue;
15351
 
15450
 
15352
		/*
15451
		/*
15353
		 * Note that reserving the BIOS fb up front prevents us
15452
		 * Note that reserving the BIOS fb up front prevents us
15354
		 * from stuffing other stolen allocations like the ring
15453
		 * from stuffing other stolen allocations like the ring
15355
		 * on top.  This prevents some ugliness at boot time, and
15454
		 * on top.  This prevents some ugliness at boot time, and
15356
		 * can even allow for smooth boot transitions if the BIOS
15455
		 * can even allow for smooth boot transitions if the BIOS
15357
		 * fb is large enough for the active pipe configuration.
15456
		 * fb is large enough for the active pipe configuration.
15358
		 */
15457
		 */
15359
		dev_priv->display.get_initial_plane_config(crtc,
15458
		dev_priv->display.get_initial_plane_config(crtc,
15360
							   &plane_config);
15459
							   &plane_config);
15361
 
15460
 
15362
		/*
15461
		/*
15363
		 * If the fb is shared between multiple heads, we'll
15462
		 * If the fb is shared between multiple heads, we'll
15364
		 * just get the first one.
15463
		 * just get the first one.
15365
		 */
15464
		 */
15366
		intel_find_initial_plane_obj(crtc, &plane_config);
15465
		intel_find_initial_plane_obj(crtc, &plane_config);
15367
	}
15466
	}
-
 
15467
 
-
 
15468
	/*
-
 
15469
	 * Make sure hardware watermarks really match the state we read out.
-
 
15470
	 * Note that we need to do this after reconstructing the BIOS fb's
-
 
15471
	 * since the watermark calculation done here will use pstate->fb.
-
 
15472
	 */
-
 
15473
	sanitize_watermarks(dev);
15368
}
15474
}
15369
 
15475
 
15370
static void intel_enable_pipe_a(struct drm_device *dev)
15476
static void intel_enable_pipe_a(struct drm_device *dev)
15371
{
15477
{
15372
	struct intel_connector *connector;
15478
	struct intel_connector *connector;
15373
	struct drm_connector *crt = NULL;
15479
	struct drm_connector *crt = NULL;
15374
	struct intel_load_detect_pipe load_detect_temp;
15480
	struct intel_load_detect_pipe load_detect_temp;
15375
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15481
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15376
 
15482
 
15377
	/* We can't just switch on the pipe A, we need to set things up with a
15483
	/* We can't just switch on the pipe A, we need to set things up with a
15378
	 * proper mode and output configuration. As a gross hack, enable pipe A
15484
	 * proper mode and output configuration. As a gross hack, enable pipe A
15379
	 * by enabling the load detect pipe once. */
15485
	 * by enabling the load detect pipe once. */
15380
	for_each_intel_connector(dev, connector) {
15486
	for_each_intel_connector(dev, connector) {
15381
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15487
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15382
			crt = &connector->base;
15488
			crt = &connector->base;
15383
			break;
15489
			break;
15384
		}
15490
		}
15385
	}
15491
	}
15386
 
15492
 
15387
	if (!crt)
15493
	if (!crt)
15388
		return;
15494
		return;
15389
 
15495
 
15390
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15496
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15391
		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15497
		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15392
}
15498
}
15393
 
15499
 
15394
static bool
15500
static bool
15395
intel_check_plane_mapping(struct intel_crtc *crtc)
15501
intel_check_plane_mapping(struct intel_crtc *crtc)
15396
{
15502
{
15397
	struct drm_device *dev = crtc->base.dev;
15503
	struct drm_device *dev = crtc->base.dev;
15398
	struct drm_i915_private *dev_priv = dev->dev_private;
15504
	struct drm_i915_private *dev_priv = dev->dev_private;
15399
	u32 val;
15505
	u32 val;
15400
 
15506
 
15401
	if (INTEL_INFO(dev)->num_pipes == 1)
15507
	if (INTEL_INFO(dev)->num_pipes == 1)
15402
		return true;
15508
		return true;
15403
 
15509
 
15404
	val = I915_READ(DSPCNTR(!crtc->plane));
15510
	val = I915_READ(DSPCNTR(!crtc->plane));
15405
 
15511
 
15406
	if ((val & DISPLAY_PLANE_ENABLE) &&
15512
	if ((val & DISPLAY_PLANE_ENABLE) &&
15407
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15513
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15408
		return false;
15514
		return false;
15409
 
15515
 
15410
	return true;
15516
	return true;
15411
}
15517
}
15412
 
15518
 
15413
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15519
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15414
{
15520
{
15415
	struct drm_device *dev = crtc->base.dev;
15521
	struct drm_device *dev = crtc->base.dev;
15416
	struct intel_encoder *encoder;
15522
	struct intel_encoder *encoder;
15417
 
15523
 
15418
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15524
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15419
		return true;
15525
		return true;
15420
 
15526
 
15421
	return false;
15527
	return false;
15422
}
15528
}
-
 
15529
 
-
 
15530
static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
-
 
15531
{
-
 
15532
	struct drm_device *dev = encoder->base.dev;
-
 
15533
	struct intel_connector *connector;
-
 
15534
 
-
 
15535
	for_each_connector_on_encoder(dev, &encoder->base, connector)
-
 
15536
		return true;
-
 
15537
 
-
 
15538
	return false;
-
 
15539
}
15423
 
15540
 
15424
static void intel_sanitize_crtc(struct intel_crtc *crtc)
15541
static void intel_sanitize_crtc(struct intel_crtc *crtc)
15425
{
15542
{
15426
	struct drm_device *dev = crtc->base.dev;
15543
	struct drm_device *dev = crtc->base.dev;
15427
	struct drm_i915_private *dev_priv = dev->dev_private;
15544
	struct drm_i915_private *dev_priv = dev->dev_private;
15428
	i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
15545
	i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
15429
 
15546
 
15430
	/* Clear any frame start delays used for debugging left by the BIOS */
15547
	/* Clear any frame start delays used for debugging left by the BIOS */
15431
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15548
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15432
 
15549
 
15433
	/* restore vblank interrupts to correct state */
15550
	/* restore vblank interrupts to correct state */
15434
	drm_crtc_vblank_reset(&crtc->base);
15551
	drm_crtc_vblank_reset(&crtc->base);
15435
	if (crtc->active) {
15552
	if (crtc->active) {
15436
		struct intel_plane *plane;
15553
		struct intel_plane *plane;
15437
 
15554
 
15438
		drm_crtc_vblank_on(&crtc->base);
15555
		drm_crtc_vblank_on(&crtc->base);
15439
 
15556
 
15440
		/* Disable everything but the primary plane */
15557
		/* Disable everything but the primary plane */
15441
		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15558
		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15442
			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15559
			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15443
				continue;
15560
				continue;
15444
 
15561
 
15445
			plane->disable_plane(&plane->base, &crtc->base);
15562
			plane->disable_plane(&plane->base, &crtc->base);
15446
		}
15563
		}
15447
	}
15564
	}
15448
 
15565
 
15449
	/* We need to sanitize the plane -> pipe mapping first because this will
15566
	/* We need to sanitize the plane -> pipe mapping first because this will
15450
	 * disable the crtc (and hence change the state) if it is wrong. Note
15567
	 * disable the crtc (and hence change the state) if it is wrong. Note
15451
	 * that gen4+ has a fixed plane -> pipe mapping.  */
15568
	 * that gen4+ has a fixed plane -> pipe mapping.  */
15452
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15569
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15453
		bool plane;
15570
		bool plane;
15454
 
15571
 
15455
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15572
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15456
			      crtc->base.base.id);
15573
			      crtc->base.base.id);
15457
 
15574
 
15458
		/* Pipe has the wrong plane attached and the plane is active.
15575
		/* Pipe has the wrong plane attached and the plane is active.
15459
		 * Temporarily change the plane mapping and disable everything
15576
		 * Temporarily change the plane mapping and disable everything
15460
		 * ...  */
15577
		 * ...  */
15461
		plane = crtc->plane;
15578
		plane = crtc->plane;
15462
		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15579
		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15463
		crtc->plane = !plane;
15580
		crtc->plane = !plane;
15464
		intel_crtc_disable_noatomic(&crtc->base);
15581
		intel_crtc_disable_noatomic(&crtc->base);
15465
		crtc->plane = plane;
15582
		crtc->plane = plane;
15466
	}
15583
	}
15467
 
15584
 
15468
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15585
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15469
	    crtc->pipe == PIPE_A && !crtc->active) {
15586
	    crtc->pipe == PIPE_A && !crtc->active) {
15470
		/* BIOS forgot to enable pipe A, this mostly happens after
15587
		/* BIOS forgot to enable pipe A, this mostly happens after
15471
		 * resume. Force-enable the pipe to fix this, the update_dpms
15588
		 * resume. Force-enable the pipe to fix this, the update_dpms
15472
		 * call below we restore the pipe to the right state, but leave
15589
		 * call below we restore the pipe to the right state, but leave
15473
		 * the required bits on. */
15590
		 * the required bits on. */
15474
		intel_enable_pipe_a(dev);
15591
		intel_enable_pipe_a(dev);
15475
	}
15592
	}
15476
 
15593
 
15477
	/* Adjust the state of the output pipe according to whether we
15594
	/* Adjust the state of the output pipe according to whether we
15478
	 * have active connectors/encoders. */
15595
	 * have active connectors/encoders. */
15479
	if (!intel_crtc_has_encoders(crtc))
15596
	if (!intel_crtc_has_encoders(crtc))
15480
		intel_crtc_disable_noatomic(&crtc->base);
15597
		intel_crtc_disable_noatomic(&crtc->base);
15481
 
15598
 
15482
	if (crtc->active != crtc->base.state->active) {
15599
	if (crtc->active != crtc->base.state->active) {
15483
		struct intel_encoder *encoder;
15600
		struct intel_encoder *encoder;
15484
 
15601
 
15485
		/* This can happen either due to bugs in the get_hw_state
15602
		/* This can happen either due to bugs in the get_hw_state
15486
		 * functions or because of calls to intel_crtc_disable_noatomic,
15603
		 * functions or because of calls to intel_crtc_disable_noatomic,
15487
		 * or because the pipe is force-enabled due to the
15604
		 * or because the pipe is force-enabled due to the
15488
		 * pipe A quirk. */
15605
		 * pipe A quirk. */
15489
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15606
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15490
			      crtc->base.base.id,
15607
			      crtc->base.base.id,
15491
			      crtc->base.state->enable ? "enabled" : "disabled",
15608
			      crtc->base.state->enable ? "enabled" : "disabled",
15492
			      crtc->active ? "enabled" : "disabled");
15609
			      crtc->active ? "enabled" : "disabled");
15493
 
15610
 
15494
		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15611
		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15495
		crtc->base.state->active = crtc->active;
15612
		crtc->base.state->active = crtc->active;
15496
		crtc->base.enabled = crtc->active;
15613
		crtc->base.enabled = crtc->active;
15497
		crtc->base.state->connector_mask = 0;
15614
		crtc->base.state->connector_mask = 0;
-
 
15615
		crtc->base.state->encoder_mask = 0;
15498
 
15616
 
15499
		/* Because we only establish the connector -> encoder ->
15617
		/* Because we only establish the connector -> encoder ->
15500
		 * crtc links if something is active, this means the
15618
		 * crtc links if something is active, this means the
15501
		 * crtc is now deactivated. Break the links. connector
15619
		 * crtc is now deactivated. Break the links. connector
15502
		 * -> encoder links are only establish when things are
15620
		 * -> encoder links are only establish when things are
15503
		 *  actually up, hence no need to break them. */
15621
		 *  actually up, hence no need to break them. */
15504
		WARN_ON(crtc->active);
15622
		WARN_ON(crtc->active);
15505
 
15623
 
15506
		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15624
		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15507
			encoder->base.crtc = NULL;
15625
			encoder->base.crtc = NULL;
15508
	}
15626
	}
15509
 
15627
 
15510
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15628
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15511
		/*
15629
		/*
15512
		 * We start out with underrun reporting disabled to avoid races.
15630
		 * We start out with underrun reporting disabled to avoid races.
15513
		 * For correct bookkeeping mark this on active crtcs.
15631
		 * For correct bookkeeping mark this on active crtcs.
15514
		 *
15632
		 *
15515
		 * Also on gmch platforms we dont have any hardware bits to
15633
		 * Also on gmch platforms we dont have any hardware bits to
15516
		 * disable the underrun reporting. Which means we need to start
15634
		 * disable the underrun reporting. Which means we need to start
15517
		 * out with underrun reporting disabled also on inactive pipes,
15635
		 * out with underrun reporting disabled also on inactive pipes,
15518
		 * since otherwise we'll complain about the garbage we read when
15636
		 * since otherwise we'll complain about the garbage we read when
15519
		 * e.g. coming up after runtime pm.
15637
		 * e.g. coming up after runtime pm.
15520
		 *
15638
		 *
15521
		 * No protection against concurrent access is required - at
15639
		 * No protection against concurrent access is required - at
15522
		 * worst a fifo underrun happens which also sets this to false.
15640
		 * worst a fifo underrun happens which also sets this to false.
15523
		 */
15641
		 */
15524
		crtc->cpu_fifo_underrun_disabled = true;
15642
		crtc->cpu_fifo_underrun_disabled = true;
15525
		crtc->pch_fifo_underrun_disabled = true;
15643
		crtc->pch_fifo_underrun_disabled = true;
15526
	}
15644
	}
15527
}
15645
}
15528
 
15646
 
15529
static void intel_sanitize_encoder(struct intel_encoder *encoder)
15647
static void intel_sanitize_encoder(struct intel_encoder *encoder)
15530
{
15648
{
15531
	struct intel_connector *connector;
15649
	struct intel_connector *connector;
15532
	struct drm_device *dev = encoder->base.dev;
15650
	struct drm_device *dev = encoder->base.dev;
15533
	bool active = false;
-
 
15534
 
15651
 
15535
	/* We need to check both for a crtc link (meaning that the
15652
	/* We need to check both for a crtc link (meaning that the
15536
	 * encoder is active and trying to read from a pipe) and the
15653
	 * encoder is active and trying to read from a pipe) and the
15537
	 * pipe itself being active. */
15654
	 * pipe itself being active. */
15538
	bool has_active_crtc = encoder->base.crtc &&
15655
	bool has_active_crtc = encoder->base.crtc &&
15539
		to_intel_crtc(encoder->base.crtc)->active;
15656
		to_intel_crtc(encoder->base.crtc)->active;
15540
 
-
 
15541
	for_each_intel_connector(dev, connector) {
-
 
15542
		if (connector->base.encoder != &encoder->base)
-
 
15543
			continue;
-
 
15544
 
-
 
15545
		active = true;
-
 
15546
		break;
-
 
15547
	}
-
 
15548
 
15657
 
15549
	if (active && !has_active_crtc) {
15658
	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15550
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15659
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15551
			      encoder->base.base.id,
15660
			      encoder->base.base.id,
15552
			      encoder->base.name);
15661
			      encoder->base.name);
15553
 
15662
 
15554
		/* Connector is active, but has no active pipe. This is
15663
		/* Connector is active, but has no active pipe. This is
15555
		 * fallout from our resume register restoring. Disable
15664
		 * fallout from our resume register restoring. Disable
15556
		 * the encoder manually again. */
15665
		 * the encoder manually again. */
15557
		if (encoder->base.crtc) {
15666
		if (encoder->base.crtc) {
15558
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15667
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15559
				      encoder->base.base.id,
15668
				      encoder->base.base.id,
15560
				      encoder->base.name);
15669
				      encoder->base.name);
15561
			encoder->disable(encoder);
15670
			encoder->disable(encoder);
15562
			if (encoder->post_disable)
15671
			if (encoder->post_disable)
15563
				encoder->post_disable(encoder);
15672
				encoder->post_disable(encoder);
15564
		}
15673
		}
15565
		encoder->base.crtc = NULL;
15674
		encoder->base.crtc = NULL;
15566
 
15675
 
15567
		/* Inconsistent output/port/pipe state happens presumably due to
15676
		/* Inconsistent output/port/pipe state happens presumably due to
15568
		 * a bug in one of the get_hw_state functions. Or someplace else
15677
		 * a bug in one of the get_hw_state functions. Or someplace else
15569
		 * in our code, like the register restore mess on resume. Clamp
15678
		 * in our code, like the register restore mess on resume. Clamp
15570
		 * things to off as a safer default. */
15679
		 * things to off as a safer default. */
15571
		for_each_intel_connector(dev, connector) {
15680
		for_each_intel_connector(dev, connector) {
15572
			if (connector->encoder != encoder)
15681
			if (connector->encoder != encoder)
15573
				continue;
15682
				continue;
15574
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15683
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15575
			connector->base.encoder = NULL;
15684
			connector->base.encoder = NULL;
15576
		}
15685
		}
15577
	}
15686
	}
15578
	/* Enabled encoders without active connectors will be fixed in
15687
	/* Enabled encoders without active connectors will be fixed in
15579
	 * the crtc fixup. */
15688
	 * the crtc fixup. */
15580
}
15689
}
15581
 
15690
 
15582
void i915_redisable_vga_power_on(struct drm_device *dev)
15691
void i915_redisable_vga_power_on(struct drm_device *dev)
15583
{
15692
{
15584
	struct drm_i915_private *dev_priv = dev->dev_private;
15693
	struct drm_i915_private *dev_priv = dev->dev_private;
15585
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15694
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15586
 
15695
 
15587
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15696
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15588
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15697
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15589
		i915_disable_vga(dev);
15698
		i915_disable_vga(dev);
15590
	}
15699
	}
15591
}
15700
}
15592
 
15701
 
15593
void i915_redisable_vga(struct drm_device *dev)
15702
void i915_redisable_vga(struct drm_device *dev)
15594
{
15703
{
15595
	struct drm_i915_private *dev_priv = dev->dev_private;
15704
	struct drm_i915_private *dev_priv = dev->dev_private;
15596
 
15705
 
15597
	/* This function can be called both from intel_modeset_setup_hw_state or
15706
	/* This function can be called both from intel_modeset_setup_hw_state or
15598
	 * at a very early point in our resume sequence, where the power well
15707
	 * at a very early point in our resume sequence, where the power well
15599
	 * structures are not yet restored. Since this function is at a very
15708
	 * structures are not yet restored. Since this function is at a very
15600
	 * paranoid "someone might have enabled VGA while we were not looking"
15709
	 * paranoid "someone might have enabled VGA while we were not looking"
15601
	 * level, just check if the power well is enabled instead of trying to
15710
	 * level, just check if the power well is enabled instead of trying to
15602
	 * follow the "don't touch the power well if we don't need it" policy
15711
	 * follow the "don't touch the power well if we don't need it" policy
15603
	 * the rest of the driver uses. */
15712
	 * the rest of the driver uses. */
15604
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15713
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15605
		return;
15714
		return;
15606
 
15715
 
15607
	i915_redisable_vga_power_on(dev);
15716
	i915_redisable_vga_power_on(dev);
15608
 
15717
 
15609
	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15718
	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15610
}
15719
}
15611
 
15720
 
15612
static bool primary_get_hw_state(struct intel_plane *plane)
15721
static bool primary_get_hw_state(struct intel_plane *plane)
15613
{
15722
{
15614
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15723
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15615
 
15724
 
15616
	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15725
	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15617
}
15726
}
15618
 
15727
 
15619
/* FIXME read out full plane state for all planes */
15728
/* FIXME read out full plane state for all planes */
15620
static void readout_plane_state(struct intel_crtc *crtc)
15729
static void readout_plane_state(struct intel_crtc *crtc)
15621
{
15730
{
15622
	struct drm_plane *primary = crtc->base.primary;
15731
	struct drm_plane *primary = crtc->base.primary;
15623
	struct intel_plane_state *plane_state =
15732
	struct intel_plane_state *plane_state =
15624
		to_intel_plane_state(primary->state);
15733
		to_intel_plane_state(primary->state);
15625
 
15734
 
15626
	plane_state->visible = crtc->active &&
15735
	plane_state->visible = crtc->active &&
15627
		primary_get_hw_state(to_intel_plane(primary));
15736
		primary_get_hw_state(to_intel_plane(primary));
15628
 
15737
 
15629
	if (plane_state->visible)
15738
	if (plane_state->visible)
15630
		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15739
		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15631
}
15740
}
15632
 
15741
 
15633
static void intel_modeset_readout_hw_state(struct drm_device *dev)
15742
static void intel_modeset_readout_hw_state(struct drm_device *dev)
15634
{
15743
{
15635
	struct drm_i915_private *dev_priv = dev->dev_private;
15744
	struct drm_i915_private *dev_priv = dev->dev_private;
15636
	enum pipe pipe;
15745
	enum pipe pipe;
15637
	struct intel_crtc *crtc;
15746
	struct intel_crtc *crtc;
15638
	struct intel_encoder *encoder;
15747
	struct intel_encoder *encoder;
15639
	struct intel_connector *connector;
15748
	struct intel_connector *connector;
15640
	int i;
15749
	int i;
-
 
15750
 
-
 
15751
	dev_priv->active_crtcs = 0;
15641
 
15752
 
15642
	for_each_intel_crtc(dev, crtc) {
15753
	for_each_intel_crtc(dev, crtc) {
15643
		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
-
 
15644
		memset(crtc->config, 0, sizeof(*crtc->config));
15754
		struct intel_crtc_state *crtc_state = crtc->config;
15645
		crtc->config->base.crtc = &crtc->base;
15755
		int pixclk = 0;
-
 
15756
 
15646
 
15757
		__drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
15647
		crtc->active = dev_priv->display.get_pipe_config(crtc,
15758
		memset(crtc_state, 0, sizeof(*crtc_state));
-
 
15759
		crtc_state->base.crtc = &crtc->base;
-
 
15760
 
15648
								 crtc->config);
15761
		crtc_state->base.active = crtc_state->base.enable =
-
 
15762
			dev_priv->display.get_pipe_config(crtc, crtc_state);
-
 
15763
 
-
 
15764
		crtc->base.enabled = crtc_state->base.enable;
-
 
15765
		crtc->active = crtc_state->base.active;
-
 
15766
 
-
 
15767
		if (crtc_state->base.active) {
-
 
15768
			dev_priv->active_crtcs |= 1 << crtc->pipe;
-
 
15769
 
-
 
15770
			if (IS_BROADWELL(dev_priv)) {
-
 
15771
				pixclk = ilk_pipe_pixel_rate(crtc_state);
-
 
15772
 
-
 
15773
				/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-
 
15774
				if (crtc_state->ips_enabled)
-
 
15775
					pixclk = DIV_ROUND_UP(pixclk * 100, 95);
-
 
15776
			} else if (IS_VALLEYVIEW(dev_priv) ||
-
 
15777
				   IS_CHERRYVIEW(dev_priv) ||
-
 
15778
				   IS_BROXTON(dev_priv))
-
 
15779
				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
-
 
15780
			else
-
 
15781
				WARN_ON(dev_priv->display.modeset_calc_cdclk);
15649
 
15782
		}
15650
		crtc->base.state->active = crtc->active;
15783
 
15651
		crtc->base.enabled = crtc->active;
15784
		dev_priv->min_pixclk[crtc->pipe] = pixclk;
15652
 
15785
 
15653
		readout_plane_state(crtc);
15786
		readout_plane_state(crtc);
15654
 
15787
 
15655
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15788
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15656
			      crtc->base.base.id,
15789
			      crtc->base.base.id,
15657
			      crtc->active ? "enabled" : "disabled");
15790
			      crtc->active ? "enabled" : "disabled");
15658
	}
15791
	}
15659
 
15792
 
15660
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15793
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15661
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15794
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15662
 
15795
 
15663
		pll->on = pll->get_hw_state(dev_priv, pll,
15796
		pll->on = pll->get_hw_state(dev_priv, pll,
15664
					    &pll->config.hw_state);
15797
					    &pll->config.hw_state);
15665
		pll->active = 0;
15798
		pll->active = 0;
15666
		pll->config.crtc_mask = 0;
15799
		pll->config.crtc_mask = 0;
15667
		for_each_intel_crtc(dev, crtc) {
15800
		for_each_intel_crtc(dev, crtc) {
15668
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15801
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15669
				pll->active++;
15802
				pll->active++;
15670
				pll->config.crtc_mask |= 1 << crtc->pipe;
15803
				pll->config.crtc_mask |= 1 << crtc->pipe;
15671
			}
15804
			}
15672
		}
15805
		}
15673
 
15806
 
15674
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15807
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15675
			      pll->name, pll->config.crtc_mask, pll->on);
15808
			      pll->name, pll->config.crtc_mask, pll->on);
15676
 
15809
 
15677
		if (pll->config.crtc_mask)
15810
		if (pll->config.crtc_mask)
15678
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15811
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15679
	}
15812
	}
15680
 
15813
 
15681
	for_each_intel_encoder(dev, encoder) {
15814
	for_each_intel_encoder(dev, encoder) {
15682
		pipe = 0;
15815
		pipe = 0;
15683
 
15816
 
15684
		if (encoder->get_hw_state(encoder, &pipe)) {
15817
		if (encoder->get_hw_state(encoder, &pipe)) {
15685
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15818
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15686
			encoder->base.crtc = &crtc->base;
15819
			encoder->base.crtc = &crtc->base;
15687
			encoder->get_config(encoder, crtc->config);
15820
			encoder->get_config(encoder, crtc->config);
15688
		} else {
15821
		} else {
15689
			encoder->base.crtc = NULL;
15822
			encoder->base.crtc = NULL;
15690
		}
15823
		}
15691
 
15824
 
15692
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15825
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15693
			      encoder->base.base.id,
15826
			      encoder->base.base.id,
15694
			      encoder->base.name,
15827
			      encoder->base.name,
15695
			      encoder->base.crtc ? "enabled" : "disabled",
15828
			      encoder->base.crtc ? "enabled" : "disabled",
15696
			      pipe_name(pipe));
15829
			      pipe_name(pipe));
15697
	}
15830
	}
15698
 
15831
 
15699
	for_each_intel_connector(dev, connector) {
15832
	for_each_intel_connector(dev, connector) {
15700
		if (connector->get_hw_state(connector)) {
15833
		if (connector->get_hw_state(connector)) {
15701
			connector->base.dpms = DRM_MODE_DPMS_ON;
15834
			connector->base.dpms = DRM_MODE_DPMS_ON;
15702
 
15835
 
15703
			encoder = connector->encoder;
15836
			encoder = connector->encoder;
15704
			connector->base.encoder = &encoder->base;
15837
			connector->base.encoder = &encoder->base;
15705
 
15838
 
15706
			if (encoder->base.crtc &&
15839
			if (encoder->base.crtc &&
15707
			    encoder->base.crtc->state->active) {
15840
			    encoder->base.crtc->state->active) {
15708
				/*
15841
				/*
15709
				 * This has to be done during hardware readout
15842
				 * This has to be done during hardware readout
15710
				 * because anything calling .crtc_disable may
15843
				 * because anything calling .crtc_disable may
15711
				 * rely on the connector_mask being accurate.
15844
				 * rely on the connector_mask being accurate.
15712
				 */
15845
				 */
15713
				encoder->base.crtc->state->connector_mask |=
15846
				encoder->base.crtc->state->connector_mask |=
15714
					1 << drm_connector_index(&connector->base);
15847
					1 << drm_connector_index(&connector->base);
-
 
15848
				encoder->base.crtc->state->encoder_mask |=
-
 
15849
					1 << drm_encoder_index(&encoder->base);
15715
			}
15850
			}
15716
 
15851
 
15717
		} else {
15852
		} else {
15718
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15853
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15719
			connector->base.encoder = NULL;
15854
			connector->base.encoder = NULL;
15720
		}
15855
		}
15721
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15856
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15722
			      connector->base.base.id,
15857
			      connector->base.base.id,
15723
			      connector->base.name,
15858
			      connector->base.name,
15724
			      connector->base.encoder ? "enabled" : "disabled");
15859
			      connector->base.encoder ? "enabled" : "disabled");
15725
	}
15860
	}
15726
 
15861
 
15727
	for_each_intel_crtc(dev, crtc) {
15862
	for_each_intel_crtc(dev, crtc) {
15728
		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15863
		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15729
 
15864
 
15730
		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15865
		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15731
		if (crtc->base.state->active) {
15866
		if (crtc->base.state->active) {
15732
			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15867
			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15733
			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15868
			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15734
			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15869
			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15735
 
15870
 
15736
			/*
15871
			/*
15737
			 * The initial mode needs to be set in order to keep
15872
			 * The initial mode needs to be set in order to keep
15738
			 * the atomic core happy. It wants a valid mode if the
15873
			 * the atomic core happy. It wants a valid mode if the
15739
			 * crtc's enabled, so we do the above call.
15874
			 * crtc's enabled, so we do the above call.
15740
			 *
15875
			 *
15741
			 * At this point some state updated by the connectors
15876
			 * At this point some state updated by the connectors
15742
			 * in their ->detect() callback has not run yet, so
15877
			 * in their ->detect() callback has not run yet, so
15743
			 * no recalculation can be done yet.
15878
			 * no recalculation can be done yet.
15744
			 *
15879
			 *
15745
			 * Even if we could do a recalculation and modeset
15880
			 * Even if we could do a recalculation and modeset
15746
			 * right now it would cause a double modeset if
15881
			 * right now it would cause a double modeset if
15747
			 * fbdev or userspace chooses a different initial mode.
15882
			 * fbdev or userspace chooses a different initial mode.
15748
			 *
15883
			 *
15749
			 * If that happens, someone indicated they wanted a
15884
			 * If that happens, someone indicated they wanted a
15750
			 * mode change, which means it's safe to do a full
15885
			 * mode change, which means it's safe to do a full
15751
			 * recalculation.
15886
			 * recalculation.
15752
			 */
15887
			 */
15753
			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15888
			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15754
 
15889
 
15755
			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15890
			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15756
			update_scanline_offset(crtc);
15891
			update_scanline_offset(crtc);
15757
		}
15892
		}
15758
	}
15893
	}
15759
}
15894
}
15760
 
15895
 
15761
/* Scan out the current hw modeset state,
15896
/* Scan out the current hw modeset state,
15762
 * and sanitizes it to the current state
15897
 * and sanitizes it to the current state
15763
 */
15898
 */
15764
static void
15899
static void
15765
intel_modeset_setup_hw_state(struct drm_device *dev)
15900
intel_modeset_setup_hw_state(struct drm_device *dev)
15766
{
15901
{
15767
	struct drm_i915_private *dev_priv = dev->dev_private;
15902
	struct drm_i915_private *dev_priv = dev->dev_private;
15768
	enum pipe pipe;
15903
	enum pipe pipe;
15769
	struct intel_crtc *crtc;
15904
	struct intel_crtc *crtc;
15770
	struct intel_encoder *encoder;
15905
	struct intel_encoder *encoder;
15771
	int i;
15906
	int i;
15772
 
15907
 
15773
	intel_modeset_readout_hw_state(dev);
15908
	intel_modeset_readout_hw_state(dev);
15774
 
15909
 
15775
	/* HW state is read out, now we need to sanitize this mess. */
15910
	/* HW state is read out, now we need to sanitize this mess. */
15776
	for_each_intel_encoder(dev, encoder) {
15911
	for_each_intel_encoder(dev, encoder) {
15777
		intel_sanitize_encoder(encoder);
15912
		intel_sanitize_encoder(encoder);
15778
	}
15913
	}
15779
 
15914
 
15780
	for_each_pipe(dev_priv, pipe) {
15915
	for_each_pipe(dev_priv, pipe) {
15781
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15916
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15782
		intel_sanitize_crtc(crtc);
15917
		intel_sanitize_crtc(crtc);
15783
		intel_dump_pipe_config(crtc, crtc->config,
15918
		intel_dump_pipe_config(crtc, crtc->config,
15784
				       "[setup_hw_state]");
15919
				       "[setup_hw_state]");
15785
	}
15920
	}
15786
 
15921
 
15787
	intel_modeset_update_connector_atomic_state(dev);
15922
	intel_modeset_update_connector_atomic_state(dev);
15788
 
15923
 
15789
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15924
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15790
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15925
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15791
 
15926
 
15792
		if (!pll->on || pll->active)
15927
		if (!pll->on || pll->active)
15793
			continue;
15928
			continue;
15794
 
15929
 
15795
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15930
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15796
 
15931
 
15797
		pll->disable(dev_priv, pll);
15932
		pll->disable(dev_priv, pll);
15798
		pll->on = false;
15933
		pll->on = false;
15799
	}
15934
	}
15800
 
15935
 
15801
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15936
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15802
		vlv_wm_get_hw_state(dev);
15937
		vlv_wm_get_hw_state(dev);
15803
	else if (IS_GEN9(dev))
15938
	else if (IS_GEN9(dev))
15804
		skl_wm_get_hw_state(dev);
15939
		skl_wm_get_hw_state(dev);
15805
	else if (HAS_PCH_SPLIT(dev))
15940
	else if (HAS_PCH_SPLIT(dev))
15806
		ilk_wm_get_hw_state(dev);
15941
		ilk_wm_get_hw_state(dev);
15807
 
15942
 
15808
	for_each_intel_crtc(dev, crtc) {
15943
	for_each_intel_crtc(dev, crtc) {
15809
		unsigned long put_domains;
15944
		unsigned long put_domains;
15810
 
15945
 
15811
		put_domains = modeset_get_crtc_power_domains(&crtc->base);
15946
		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15812
		if (WARN_ON(put_domains))
15947
		if (WARN_ON(put_domains))
15813
			modeset_put_power_domains(dev_priv, put_domains);
15948
			modeset_put_power_domains(dev_priv, put_domains);
15814
	}
15949
	}
15815
	intel_display_set_init_power(dev_priv, false);
15950
	intel_display_set_init_power(dev_priv, false);
-
 
15951
 
-
 
15952
	intel_fbc_init_pipe_state(dev_priv);
15816
}
15953
}
15817
 
15954
 
15818
void intel_display_resume(struct drm_device *dev)
15955
void intel_display_resume(struct drm_device *dev)
15819
{
15956
{
15820
	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15957
	struct drm_i915_private *dev_priv = to_i915(dev);
15821
	struct intel_connector *conn;
15958
	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15822
	struct intel_plane *plane;
-
 
15823
	struct drm_crtc *crtc;
15959
	struct drm_modeset_acquire_ctx ctx;
15824
	int ret;
15960
	int ret;
-
 
15961
	bool setup = false;
15825
 
15962
 
15826
	if (!state)
-
 
-
 
15963
	dev_priv->modeset_restore_state = NULL;
-
 
15964
 
-
 
15965
	/*
-
 
15966
	 * This is a cludge because with real atomic modeset mode_config.mutex
-
 
15967
	 * won't be taken. Unfortunately some probed state like
-
 
15968
	 * audio_codec_enable is still protected by mode_config.mutex, so lock
15827
		return;
15969
	 * it here for now.
-
 
15970
	 */
15828
 
15971
	mutex_lock(&dev->mode_config.mutex);
15829
	state->acquire_ctx = dev->mode_config.acquire_ctx;
15972
	drm_modeset_acquire_init(&ctx, 0);
-
 
15973
 
-
 
15974
retry:
-
 
15975
	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
 
15976
 
15830
 
15977
	/*
-
 
15978
	 * With MST, the number of connectors can change between suspend and
15831
	/* preserve complete old state, including dpll */
15979
	 * resume, which means that the state we want to restore might now be
-
 
15980
	 * impossible to use since it'll be pointing to non-existant
15832
	intel_atomic_get_shared_dpll_state(state);
15981
	 * connectors.
-
 
15982
	 */
-
 
15983
	if (ret == 0 && state &&
15833
 
-
 
15834
	for_each_crtc(dev, crtc) {
15984
	    state->num_connector != dev->mode_config.num_connector) {
15835
		struct drm_crtc_state *crtc_state =
15985
		drm_atomic_state_free(state);
15836
			drm_atomic_get_crtc_state(state, crtc);
15986
		state = NULL;
15837
 
15987
	}
15838
		ret = PTR_ERR_OR_ZERO(crtc_state);
15988
 
15839
		if (ret)
15989
	if (ret == 0 && !setup) {
15840
			goto err;
15990
		setup = true;
-
 
15991
 
15841
 
15992
		intel_modeset_setup_hw_state(dev);
-
 
15993
		i915_redisable_vga(dev);
-
 
15994
	}
-
 
15995
 
-
 
15996
	if (ret == 0 && state) {
-
 
15997
		struct drm_crtc_state *crtc_state;
-
 
15998
		struct drm_crtc *crtc;
-
 
15999
		int i;
-
 
16000
 
15842
		/* force a restore */
16001
		state->acquire_ctx = &ctx;
-
 
16002
 
15843
		crtc_state->mode_changed = true;
16003
		for_each_crtc_in_state(state, crtc, crtc_state, i) {
15844
	}
-
 
15845
 
16004
			/*
15846
	for_each_intel_plane(dev, plane) {
-
 
15847
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
-
 
15848
		if (ret)
16005
			 * Force recalculation even if we restore
-
 
16006
			 * current state. With fast modeset this may not result
15849
			goto err;
16007
			 * in a modeset when the state is compatible.
-
 
16008
			 */
-
 
16009
			crtc_state->mode_changed = true;
15850
	}
16010
		}
15851
 
16011
 
15852
	for_each_intel_connector(dev, conn) {
16012
		ret = drm_atomic_commit(state);
15853
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
-
 
15854
		if (ret)
16013
	}
15855
			goto err;
16014
 
15856
	}
16015
	if (ret == -EDEADLK) {
15857
 
16016
		drm_modeset_backoff(&ctx);
15858
	intel_modeset_setup_hw_state(dev);
16017
		goto retry;
15859
 
16018
	}
15860
	i915_redisable_vga(dev);
16019
 
15861
	ret = drm_atomic_commit(state);
16020
	drm_modeset_drop_locks(&ctx);
15862
	if (!ret)
16021
	drm_modeset_acquire_fini(&ctx);
15863
		return;
16022
	mutex_unlock(&dev->mode_config.mutex);
15864
 
16023
 
15865
err:
16024
	if (ret) {
15866
	DRM_ERROR("Restoring old state failed with %i\n", ret);
16025
		DRM_ERROR("Restoring old state failed with %i\n", ret);
15867
	drm_atomic_state_free(state);
16026
		drm_atomic_state_free(state);
15868
}
16027
	}
-
 
16028
}
15869
 
16029
 
15870
void intel_modeset_gem_init(struct drm_device *dev)
16030
void intel_modeset_gem_init(struct drm_device *dev)
15871
{
16031
{
15872
	struct drm_crtc *c;
16032
	struct drm_crtc *c;
15873
	struct drm_i915_gem_object *obj;
16033
	struct drm_i915_gem_object *obj;
15874
	int ret;
16034
	int ret;
15875
 
-
 
15876
	mutex_lock(&dev->struct_mutex);
16035
 
15877
	intel_init_gt_powersave(dev);
-
 
15878
	mutex_unlock(&dev->struct_mutex);
16036
	intel_init_gt_powersave(dev);
15879
 
16037
 
15880
	intel_modeset_init_hw(dev);
16038
	intel_modeset_init_hw(dev);
15881
 
16039
 
15882
//   intel_setup_overlay(dev);
16040
//   intel_setup_overlay(dev);
15883
 
16041
 
15884
	/*
16042
	/*
15885
	 * Make sure any fbs we allocated at startup are properly
16043
	 * Make sure any fbs we allocated at startup are properly
15886
	 * pinned & fenced.  When we do the allocation it's too early
16044
	 * pinned & fenced.  When we do the allocation it's too early
15887
	 * for this.
16045
	 * for this.
15888
	 */
16046
	 */
15889
	for_each_crtc(dev, c) {
16047
	for_each_crtc(dev, c) {
15890
		obj = intel_fb_obj(c->primary->fb);
16048
		obj = intel_fb_obj(c->primary->fb);
15891
		if (obj == NULL)
16049
		if (obj == NULL)
15892
			continue;
16050
			continue;
15893
 
16051
 
15894
		mutex_lock(&dev->struct_mutex);
16052
		mutex_lock(&dev->struct_mutex);
15895
		ret = intel_pin_and_fence_fb_obj(c->primary,
16053
		ret = intel_pin_and_fence_fb_obj(c->primary,
15896
						 c->primary->fb,
16054
						 c->primary->fb,
15897
						 c->primary->state);
16055
						 c->primary->state);
15898
		mutex_unlock(&dev->struct_mutex);
16056
		mutex_unlock(&dev->struct_mutex);
15899
		if (ret) {
16057
		if (ret) {
15900
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
16058
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
15901
				  to_intel_crtc(c)->pipe);
16059
				  to_intel_crtc(c)->pipe);
15902
			drm_framebuffer_unreference(c->primary->fb);
16060
			drm_framebuffer_unreference(c->primary->fb);
15903
			c->primary->fb = NULL;
16061
			c->primary->fb = NULL;
15904
			c->primary->crtc = c->primary->state->crtc = NULL;
16062
			c->primary->crtc = c->primary->state->crtc = NULL;
15905
			update_state_fb(c->primary);
16063
			update_state_fb(c->primary);
15906
			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16064
			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15907
		}
16065
		}
15908
	}
16066
	}
15909
 
16067
 
15910
	intel_backlight_register(dev);
16068
	intel_backlight_register(dev);
15911
}
16069
}
15912
 
16070
 
15913
void intel_connector_unregister(struct intel_connector *intel_connector)
16071
void intel_connector_unregister(struct intel_connector *intel_connector)
15914
{
16072
{
15915
	struct drm_connector *connector = &intel_connector->base;
16073
	struct drm_connector *connector = &intel_connector->base;
15916
 
16074
 
15917
	intel_panel_destroy_backlight(connector);
16075
	intel_panel_destroy_backlight(connector);
15918
	drm_connector_unregister(connector);
16076
	drm_connector_unregister(connector);
15919
}
16077
}
15920
 
16078
 
15921
void intel_modeset_cleanup(struct drm_device *dev)
16079
void intel_modeset_cleanup(struct drm_device *dev)
15922
{
16080
{
15923
#if 0
16081
#if 0
15924
	struct drm_i915_private *dev_priv = dev->dev_private;
16082
	struct drm_i915_private *dev_priv = dev->dev_private;
15925
	struct intel_connector *connector;
16083
	struct intel_connector *connector;
15926
 
16084
 
15927
	intel_disable_gt_powersave(dev);
16085
	intel_disable_gt_powersave(dev);
15928
 
16086
 
15929
	intel_backlight_unregister(dev);
16087
	intel_backlight_unregister(dev);
15930
 
16088
 
15931
	/*
16089
	/*
15932
	 * Interrupts and polling as the first thing to avoid creating havoc.
16090
	 * Interrupts and polling as the first thing to avoid creating havoc.
15933
	 * Too much stuff here (turning of connectors, ...) would
16091
	 * Too much stuff here (turning of connectors, ...) would
15934
	 * experience fancy races otherwise.
16092
	 * experience fancy races otherwise.
15935
	 */
16093
	 */
15936
	intel_irq_uninstall(dev_priv);
16094
	intel_irq_uninstall(dev_priv);
15937
 
16095
 
15938
	/*
16096
	/*
15939
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16097
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
15940
	 * poll handlers. Hence disable polling after hpd handling is shut down.
16098
	 * poll handlers. Hence disable polling after hpd handling is shut down.
15941
	 */
16099
	 */
15942
	drm_kms_helper_poll_fini(dev);
16100
	drm_kms_helper_poll_fini(dev);
15943
 
16101
 
15944
	intel_unregister_dsm_handler();
16102
	intel_unregister_dsm_handler();
15945
 
16103
 
15946
	intel_fbc_disable(dev_priv);
16104
	intel_fbc_global_disable(dev_priv);
15947
 
16105
 
15948
	/* flush any delayed tasks or pending work */
16106
	/* flush any delayed tasks or pending work */
15949
	flush_scheduled_work();
16107
	flush_scheduled_work();
15950
 
16108
 
15951
	/* destroy the backlight and sysfs files before encoders/connectors */
16109
	/* destroy the backlight and sysfs files before encoders/connectors */
15952
	for_each_intel_connector(dev, connector)
16110
	for_each_intel_connector(dev, connector)
15953
		connector->unregister(connector);
16111
		connector->unregister(connector);
15954
 
16112
 
15955
	drm_mode_config_cleanup(dev);
16113
	drm_mode_config_cleanup(dev);
15956
 
16114
 
15957
	intel_cleanup_overlay(dev);
16115
	intel_cleanup_overlay(dev);
15958
 
-
 
15959
	mutex_lock(&dev->struct_mutex);
16116
 
15960
	intel_cleanup_gt_powersave(dev);
-
 
15961
	mutex_unlock(&dev->struct_mutex);
16117
	intel_cleanup_gt_powersave(dev);
15962
#endif
16118
#endif
15963
}
16119
}
15964
 
16120
 
15965
/*
16121
/*
15966
 * Return which encoder is currently attached for connector.
16122
 * Return which encoder is currently attached for connector.
15967
 */
16123
 */
15968
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16124
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
15969
{
16125
{
15970
	return &intel_attached_encoder(connector)->base;
16126
	return &intel_attached_encoder(connector)->base;
15971
}
16127
}
15972
 
16128
 
15973
void intel_connector_attach_encoder(struct intel_connector *connector,
16129
void intel_connector_attach_encoder(struct intel_connector *connector,
15974
				    struct intel_encoder *encoder)
16130
				    struct intel_encoder *encoder)
15975
{
16131
{
15976
	connector->encoder = encoder;
16132
	connector->encoder = encoder;
15977
	drm_mode_connector_attach_encoder(&connector->base,
16133
	drm_mode_connector_attach_encoder(&connector->base,
15978
					  &encoder->base);
16134
					  &encoder->base);
15979
}
16135
}
15980
 
16136
 
15981
/*
16137
/*
15982
 * set vga decode state - true == enable VGA decode
16138
 * set vga decode state - true == enable VGA decode
15983
 */
16139
 */
15984
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16140
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15985
{
16141
{
15986
	struct drm_i915_private *dev_priv = dev->dev_private;
16142
	struct drm_i915_private *dev_priv = dev->dev_private;
15987
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16143
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15988
	u16 gmch_ctrl;
16144
	u16 gmch_ctrl;
15989
 
16145
 
15990
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16146
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15991
		DRM_ERROR("failed to read control word\n");
16147
		DRM_ERROR("failed to read control word\n");
15992
		return -EIO;
16148
		return -EIO;
15993
	}
16149
	}
15994
 
16150
 
15995
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16151
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15996
		return 0;
16152
		return 0;
15997
 
16153
 
15998
	if (state)
16154
	if (state)
15999
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16155
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16000
	else
16156
	else
16001
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16157
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16002
 
16158
 
16003
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16159
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16004
		DRM_ERROR("failed to write control word\n");
16160
		DRM_ERROR("failed to write control word\n");
16005
		return -EIO;
16161
		return -EIO;
16006
	}
16162
	}
16007
 
16163
 
16008
	return 0;
16164
	return 0;
16009
}
16165
}
16010
 
-
 
16011
#ifdef CONFIG_DEBUG_FS
-
 
16012
 
16166
 
16013
struct intel_display_error_state {
16167
struct intel_display_error_state {
16014
 
16168
 
16015
	u32 power_well_driver;
16169
	u32 power_well_driver;
16016
 
16170
 
16017
	int num_transcoders;
16171
	int num_transcoders;
16018
 
16172
 
16019
	struct intel_cursor_error_state {
16173
	struct intel_cursor_error_state {
16020
		u32 control;
16174
		u32 control;
16021
		u32 position;
16175
		u32 position;
16022
		u32 base;
16176
		u32 base;
16023
		u32 size;
16177
		u32 size;
16024
	} cursor[I915_MAX_PIPES];
16178
	} cursor[I915_MAX_PIPES];
16025
 
16179
 
16026
	struct intel_pipe_error_state {
16180
	struct intel_pipe_error_state {
16027
		bool power_domain_on;
16181
		bool power_domain_on;
16028
		u32 source;
16182
		u32 source;
16029
		u32 stat;
16183
		u32 stat;
16030
	} pipe[I915_MAX_PIPES];
16184
	} pipe[I915_MAX_PIPES];
16031
 
16185
 
16032
	struct intel_plane_error_state {
16186
	struct intel_plane_error_state {
16033
		u32 control;
16187
		u32 control;
16034
		u32 stride;
16188
		u32 stride;
16035
		u32 size;
16189
		u32 size;
16036
		u32 pos;
16190
		u32 pos;
16037
		u32 addr;
16191
		u32 addr;
16038
		u32 surface;
16192
		u32 surface;
16039
		u32 tile_offset;
16193
		u32 tile_offset;
16040
	} plane[I915_MAX_PIPES];
16194
	} plane[I915_MAX_PIPES];
16041
 
16195
 
16042
	struct intel_transcoder_error_state {
16196
	struct intel_transcoder_error_state {
16043
		bool power_domain_on;
16197
		bool power_domain_on;
16044
		enum transcoder cpu_transcoder;
16198
		enum transcoder cpu_transcoder;
16045
 
16199
 
16046
		u32 conf;
16200
		u32 conf;
16047
 
16201
 
16048
		u32 htotal;
16202
		u32 htotal;
16049
		u32 hblank;
16203
		u32 hblank;
16050
		u32 hsync;
16204
		u32 hsync;
16051
		u32 vtotal;
16205
		u32 vtotal;
16052
		u32 vblank;
16206
		u32 vblank;
16053
		u32 vsync;
16207
		u32 vsync;
16054
	} transcoder[4];
16208
	} transcoder[4];
16055
};
16209
};
16056
 
16210
 
16057
struct intel_display_error_state *
16211
struct intel_display_error_state *
16058
intel_display_capture_error_state(struct drm_device *dev)
16212
intel_display_capture_error_state(struct drm_device *dev)
16059
{
16213
{
16060
	struct drm_i915_private *dev_priv = dev->dev_private;
16214
	struct drm_i915_private *dev_priv = dev->dev_private;
16061
	struct intel_display_error_state *error;
16215
	struct intel_display_error_state *error;
16062
	int transcoders[] = {
16216
	int transcoders[] = {
16063
		TRANSCODER_A,
16217
		TRANSCODER_A,
16064
		TRANSCODER_B,
16218
		TRANSCODER_B,
16065
		TRANSCODER_C,
16219
		TRANSCODER_C,
16066
		TRANSCODER_EDP,
16220
		TRANSCODER_EDP,
16067
	};
16221
	};
16068
	int i;
16222
	int i;
16069
 
16223
 
16070
	if (INTEL_INFO(dev)->num_pipes == 0)
16224
	if (INTEL_INFO(dev)->num_pipes == 0)
16071
		return NULL;
16225
		return NULL;
16072
 
16226
 
16073
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
16227
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
16074
	if (error == NULL)
16228
	if (error == NULL)
16075
		return NULL;
16229
		return NULL;
16076
 
16230
 
16077
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16231
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16078
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16232
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16079
 
16233
 
16080
	for_each_pipe(dev_priv, i) {
16234
	for_each_pipe(dev_priv, i) {
16081
		error->pipe[i].power_domain_on =
16235
		error->pipe[i].power_domain_on =
16082
			__intel_display_power_is_enabled(dev_priv,
16236
			__intel_display_power_is_enabled(dev_priv,
16083
							 POWER_DOMAIN_PIPE(i));
16237
							 POWER_DOMAIN_PIPE(i));
16084
		if (!error->pipe[i].power_domain_on)
16238
		if (!error->pipe[i].power_domain_on)
16085
			continue;
16239
			continue;
16086
 
16240
 
16087
		error->cursor[i].control = I915_READ(CURCNTR(i));
16241
		error->cursor[i].control = I915_READ(CURCNTR(i));
16088
		error->cursor[i].position = I915_READ(CURPOS(i));
16242
		error->cursor[i].position = I915_READ(CURPOS(i));
16089
		error->cursor[i].base = I915_READ(CURBASE(i));
16243
		error->cursor[i].base = I915_READ(CURBASE(i));
16090
 
16244
 
16091
		error->plane[i].control = I915_READ(DSPCNTR(i));
16245
		error->plane[i].control = I915_READ(DSPCNTR(i));
16092
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16246
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16093
		if (INTEL_INFO(dev)->gen <= 3) {
16247
		if (INTEL_INFO(dev)->gen <= 3) {
16094
			error->plane[i].size = I915_READ(DSPSIZE(i));
16248
			error->plane[i].size = I915_READ(DSPSIZE(i));
16095
			error->plane[i].pos = I915_READ(DSPPOS(i));
16249
			error->plane[i].pos = I915_READ(DSPPOS(i));
16096
		}
16250
		}
16097
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16251
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16098
			error->plane[i].addr = I915_READ(DSPADDR(i));
16252
			error->plane[i].addr = I915_READ(DSPADDR(i));
16099
		if (INTEL_INFO(dev)->gen >= 4) {
16253
		if (INTEL_INFO(dev)->gen >= 4) {
16100
			error->plane[i].surface = I915_READ(DSPSURF(i));
16254
			error->plane[i].surface = I915_READ(DSPSURF(i));
16101
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16255
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16102
		}
16256
		}
16103
 
16257
 
16104
		error->pipe[i].source = I915_READ(PIPESRC(i));
16258
		error->pipe[i].source = I915_READ(PIPESRC(i));
16105
 
16259
 
16106
		if (HAS_GMCH_DISPLAY(dev))
16260
		if (HAS_GMCH_DISPLAY(dev))
16107
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
16261
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
16108
	}
16262
	}
16109
 
16263
 
16110
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16264
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16111
	if (HAS_DDI(dev_priv->dev))
16265
	if (HAS_DDI(dev_priv->dev))
16112
		error->num_transcoders++; /* Account for eDP. */
16266
		error->num_transcoders++; /* Account for eDP. */
16113
 
16267
 
16114
	for (i = 0; i < error->num_transcoders; i++) {
16268
	for (i = 0; i < error->num_transcoders; i++) {
16115
		enum transcoder cpu_transcoder = transcoders[i];
16269
		enum transcoder cpu_transcoder = transcoders[i];
16116
 
16270
 
16117
		error->transcoder[i].power_domain_on =
16271
		error->transcoder[i].power_domain_on =
16118
			__intel_display_power_is_enabled(dev_priv,
16272
			__intel_display_power_is_enabled(dev_priv,
16119
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16273
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16120
		if (!error->transcoder[i].power_domain_on)
16274
		if (!error->transcoder[i].power_domain_on)
16121
			continue;
16275
			continue;
16122
 
16276
 
16123
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16277
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16124
 
16278
 
16125
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16279
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16126
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16280
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16127
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16281
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16128
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16282
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16129
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16283
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16130
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16284
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16131
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16285
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16132
	}
16286
	}
16133
 
16287
 
16134
	return error;
16288
	return error;
16135
}
16289
}
16136
 
16290
 
16137
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16291
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16138
 
16292
 
16139
void
16293
void
16140
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16294
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16141
				struct drm_device *dev,
16295
				struct drm_device *dev,
16142
				struct intel_display_error_state *error)
16296
				struct intel_display_error_state *error)
16143
{
16297
{
16144
	struct drm_i915_private *dev_priv = dev->dev_private;
16298
	struct drm_i915_private *dev_priv = dev->dev_private;
16145
	int i;
16299
	int i;
16146
 
16300
 
16147
	if (!error)
16301
	if (!error)
16148
		return;
16302
		return;
16149
 
16303
 
16150
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16304
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16151
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16305
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16152
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16306
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16153
			   error->power_well_driver);
16307
			   error->power_well_driver);
16154
	for_each_pipe(dev_priv, i) {
16308
	for_each_pipe(dev_priv, i) {
16155
		err_printf(m, "Pipe [%d]:\n", i);
16309
		err_printf(m, "Pipe [%d]:\n", i);
16156
		err_printf(m, "  Power: %s\n",
16310
		err_printf(m, "  Power: %s\n",
16157
			   error->pipe[i].power_domain_on ? "on" : "off");
16311
			   onoff(error->pipe[i].power_domain_on));
16158
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16312
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16159
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16313
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16160
 
16314
 
16161
		err_printf(m, "Plane [%d]:\n", i);
16315
		err_printf(m, "Plane [%d]:\n", i);
16162
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16316
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16163
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16317
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16164
		if (INTEL_INFO(dev)->gen <= 3) {
16318
		if (INTEL_INFO(dev)->gen <= 3) {
16165
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16319
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16166
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16320
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16167
		}
16321
		}
16168
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16322
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16169
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16323
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16170
		if (INTEL_INFO(dev)->gen >= 4) {
16324
		if (INTEL_INFO(dev)->gen >= 4) {
16171
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16325
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16172
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16326
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16173
		}
16327
		}
16174
 
16328
 
16175
		err_printf(m, "Cursor [%d]:\n", i);
16329
		err_printf(m, "Cursor [%d]:\n", i);
16176
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16330
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16177
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16331
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16178
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16332
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16179
	}
16333
	}
16180
 
16334
 
16181
	for (i = 0; i < error->num_transcoders; i++) {
16335
	for (i = 0; i < error->num_transcoders; i++) {
16182
		err_printf(m, "CPU transcoder: %c\n",
16336
		err_printf(m, "CPU transcoder: %c\n",
16183
			   transcoder_name(error->transcoder[i].cpu_transcoder));
16337
			   transcoder_name(error->transcoder[i].cpu_transcoder));
16184
		err_printf(m, "  Power: %s\n",
16338
		err_printf(m, "  Power: %s\n",
16185
			   error->transcoder[i].power_domain_on ? "on" : "off");
16339
			   onoff(error->transcoder[i].power_domain_on));
16186
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16340
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16187
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16341
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16188
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16342
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16189
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16343
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16190
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16344
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16191
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16345
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16192
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16346
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16193
	}
16347
	}
16194
}
-
 
16195
#endif
-
 
16196
 
-
 
16197
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
-
 
16198
{
-
 
16199
	struct intel_crtc *crtc;
-
 
16200
 
-
 
16201
	for_each_intel_crtc(dev, crtc) {
-
 
16202
		struct intel_unpin_work *work;
-
 
16203
 
-
 
16204
		spin_lock_irq(&dev->event_lock);
-
 
16205
 
-
 
16206
		work = crtc->unpin_work;
-
 
16207
 
-
 
16208
		if (work && work->event &&
-
 
16209
		    work->event->base.file_priv == file) {
-
 
16210
			kfree(work->event);
-
 
16211
			work->event = NULL;
-
 
16212
		}
-
 
16213
 
-
 
16214
		spin_unlock_irq(&dev->event_lock);
-
 
16215
	}
-
 
16216
}
16348
}
16217
>
16349
>