Subversion Repositories Kolibri OS

Rev

Rev 6935 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
6084 serge 24
 *	Eric Anholt 
2327 Serge 25
 */
26
 
5097 serge 27
#include 
2327 Serge 28
#include 
6088 serge 29
#include 
2327 Serge 30
#include 
31
#include 
2330 Serge 32
#include 
5354 serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
6084 serge 40
#include 
41
#include 
3031 serge 42
#include 
43
#include 
5060 serge 44
#include 
45
#include 
46
#include 
6937 serge 47
#include 
48
#include 
2327 Serge 49
 
5060 serge 50
/* Primary plane formats for gen <= 3 */
6084 serge 51
static const uint32_t i8xx_primary_formats[] = {
52
	DRM_FORMAT_C8,
53
	DRM_FORMAT_RGB565,
5060 serge 54
	DRM_FORMAT_XRGB1555,
6084 serge 55
	DRM_FORMAT_XRGB8888,
5060 serge 56
};
57
 
58
/* Primary plane formats for gen >= 4 */
6084 serge 59
static const uint32_t i965_primary_formats[] = {
60
	DRM_FORMAT_C8,
61
	DRM_FORMAT_RGB565,
62
	DRM_FORMAT_XRGB8888,
5060 serge 63
	DRM_FORMAT_XBGR8888,
6084 serge 64
	DRM_FORMAT_XRGB2101010,
65
	DRM_FORMAT_XBGR2101010,
66
};
67
 
68
static const uint32_t skl_primary_formats[] = {
69
	DRM_FORMAT_C8,
70
	DRM_FORMAT_RGB565,
71
	DRM_FORMAT_XRGB8888,
72
	DRM_FORMAT_XBGR8888,
73
	DRM_FORMAT_ARGB8888,
5060 serge 74
	DRM_FORMAT_ABGR8888,
75
	DRM_FORMAT_XRGB2101010,
76
	DRM_FORMAT_XBGR2101010,
6084 serge 77
	DRM_FORMAT_YUYV,
78
	DRM_FORMAT_YVYU,
79
	DRM_FORMAT_UYVY,
80
	DRM_FORMAT_VYUY,
5060 serge 81
};
82
 
83
/* Cursor formats */
84
static const uint32_t intel_cursor_formats[] = {
85
	DRM_FORMAT_ARGB8888,
86
};
87
 
88
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 89
 
4104 Serge 90
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 91
				struct intel_crtc_state *pipe_config);
4560 Serge 92
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
6084 serge 93
				   struct intel_crtc_state *pipe_config);
2327 Serge 94
 
5060 serge 95
static int intel_framebuffer_init(struct drm_device *dev,
96
				  struct intel_framebuffer *ifb,
97
				  struct drm_mode_fb_cmd2 *mode_cmd,
98
				  struct drm_i915_gem_object *obj);
99
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
100
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
101
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 102
					 struct intel_link_m_n *m_n,
103
					 struct intel_link_m_n *m2_n2);
5060 serge 104
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
105
static void haswell_set_pipeconf(struct drm_crtc *crtc);
106
static void intel_set_pipe_csc(struct drm_crtc *crtc);
5354 serge 107
static void vlv_prepare_pll(struct intel_crtc *crtc,
6084 serge 108
			    const struct intel_crtc_state *pipe_config);
5354 serge 109
static void chv_prepare_pll(struct intel_crtc *crtc,
6084 serge 110
			    const struct intel_crtc_state *pipe_config);
111
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
112
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
113
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
114
	struct intel_crtc_state *crtc_state);
115
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
116
			   int num_connectors);
117
static void skylake_pfit_enable(struct intel_crtc *crtc);
118
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
119
static void ironlake_pfit_enable(struct intel_crtc *crtc);
120
static void intel_modeset_setup_hw_state(struct drm_device *dev);
121
static void intel_pre_disable_primary(struct drm_crtc *crtc);
4104 Serge 122
 
2327 Serge 123
typedef struct {
6084 serge 124
	int	min, max;
2327 Serge 125
} intel_range_t;
126
 
127
typedef struct {
6084 serge 128
	int	dot_limit;
129
	int	p2_slow, p2_fast;
2327 Serge 130
} intel_p2_t;
131
 
132
typedef struct intel_limit intel_limit_t;
133
struct intel_limit {
6084 serge 134
	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
135
	intel_p2_t	    p2;
2327 Serge 136
};
137
 
6084 serge 138
/* returns HPLL frequency in kHz */
139
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
140
{
141
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
142
 
143
	/* Obtain SKU information */
144
	mutex_lock(&dev_priv->sb_lock);
145
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
146
		CCK_FUSE_HPLL_FREQ_MASK;
147
	mutex_unlock(&dev_priv->sb_lock);
148
 
149
	return vco_freq[hpll_freq] * 1000;
150
}
151
 
152
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153
				  const char *name, u32 reg)
154
{
155
	u32 val;
156
	int divider;
157
 
158
	if (dev_priv->hpll_freq == 0)
159
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
160
 
161
	mutex_lock(&dev_priv->sb_lock);
162
	val = vlv_cck_read(dev_priv, reg);
163
	mutex_unlock(&dev_priv->sb_lock);
164
 
165
	divider = val & CCK_FREQUENCY_VALUES;
166
 
167
	WARN((val & CCK_FREQUENCY_STATUS) !=
168
	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
169
	     "%s change in progress\n", name);
170
 
171
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
172
}
173
 
3243 Serge 174
int
175
intel_pch_rawclk(struct drm_device *dev)
176
{
177
	struct drm_i915_private *dev_priv = dev->dev_private;
178
 
179
	WARN_ON(!HAS_PCH_SPLIT(dev));
180
 
181
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
182
}
183
 
6084 serge 184
/* hrawclock is 1/4 the FSB frequency */
185
int intel_hrawclk(struct drm_device *dev)
186
{
187
	struct drm_i915_private *dev_priv = dev->dev_private;
188
	uint32_t clkcfg;
189
 
190
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
6937 serge 191
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 192
		return 200;
193
 
194
	clkcfg = I915_READ(CLKCFG);
195
	switch (clkcfg & CLKCFG_FSB_MASK) {
196
	case CLKCFG_FSB_400:
197
		return 100;
198
	case CLKCFG_FSB_533:
199
		return 133;
200
	case CLKCFG_FSB_667:
201
		return 166;
202
	case CLKCFG_FSB_800:
203
		return 200;
204
	case CLKCFG_FSB_1067:
205
		return 266;
206
	case CLKCFG_FSB_1333:
207
		return 333;
208
	/* these two are just a guess; one of them might be right */
209
	case CLKCFG_FSB_1600:
210
	case CLKCFG_FSB_1600_ALT:
211
		return 400;
212
	default:
213
		return 133;
214
	}
215
}
216
 
217
static void intel_update_czclk(struct drm_i915_private *dev_priv)
218
{
6937 serge 219
	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
6084 serge 220
		return;
221
 
222
	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
223
						      CCK_CZ_CLOCK_CONTROL);
224
 
225
	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
226
}
227
 
2327 Serge 228
static inline u32 /* units of 100MHz */
229
intel_fdi_link_freq(struct drm_device *dev)
230
{
231
	if (IS_GEN5(dev)) {
232
		struct drm_i915_private *dev_priv = dev->dev_private;
233
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
234
	} else
235
		return 27;
236
}
237
 
4104 Serge 238
static const intel_limit_t intel_limits_i8xx_dac = {
239
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 240
	.vco = { .min = 908000, .max = 1512000 },
241
	.n = { .min = 2, .max = 16 },
4104 Serge 242
	.m = { .min = 96, .max = 140 },
243
	.m1 = { .min = 18, .max = 26 },
244
	.m2 = { .min = 6, .max = 16 },
245
	.p = { .min = 4, .max = 128 },
246
	.p1 = { .min = 2, .max = 33 },
247
	.p2 = { .dot_limit = 165000,
248
		.p2_slow = 4, .p2_fast = 2 },
249
};
250
 
2327 Serge 251
static const intel_limit_t intel_limits_i8xx_dvo = {
6084 serge 252
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 253
	.vco = { .min = 908000, .max = 1512000 },
254
	.n = { .min = 2, .max = 16 },
6084 serge 255
	.m = { .min = 96, .max = 140 },
256
	.m1 = { .min = 18, .max = 26 },
257
	.m2 = { .min = 6, .max = 16 },
258
	.p = { .min = 4, .max = 128 },
259
	.p1 = { .min = 2, .max = 33 },
2327 Serge 260
	.p2 = { .dot_limit = 165000,
4104 Serge 261
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 262
};
263
 
264
static const intel_limit_t intel_limits_i8xx_lvds = {
6084 serge 265
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 266
	.vco = { .min = 908000, .max = 1512000 },
267
	.n = { .min = 2, .max = 16 },
6084 serge 268
	.m = { .min = 96, .max = 140 },
269
	.m1 = { .min = 18, .max = 26 },
270
	.m2 = { .min = 6, .max = 16 },
271
	.p = { .min = 4, .max = 128 },
272
	.p1 = { .min = 1, .max = 6 },
2327 Serge 273
	.p2 = { .dot_limit = 165000,
274
		.p2_slow = 14, .p2_fast = 7 },
275
};
276
 
277
static const intel_limit_t intel_limits_i9xx_sdvo = {
6084 serge 278
	.dot = { .min = 20000, .max = 400000 },
279
	.vco = { .min = 1400000, .max = 2800000 },
280
	.n = { .min = 1, .max = 6 },
281
	.m = { .min = 70, .max = 120 },
3480 Serge 282
	.m1 = { .min = 8, .max = 18 },
283
	.m2 = { .min = 3, .max = 7 },
6084 serge 284
	.p = { .min = 5, .max = 80 },
285
	.p1 = { .min = 1, .max = 8 },
2327 Serge 286
	.p2 = { .dot_limit = 200000,
287
		.p2_slow = 10, .p2_fast = 5 },
288
};
289
 
290
static const intel_limit_t intel_limits_i9xx_lvds = {
6084 serge 291
	.dot = { .min = 20000, .max = 400000 },
292
	.vco = { .min = 1400000, .max = 2800000 },
293
	.n = { .min = 1, .max = 6 },
294
	.m = { .min = 70, .max = 120 },
3480 Serge 295
	.m1 = { .min = 8, .max = 18 },
296
	.m2 = { .min = 3, .max = 7 },
6084 serge 297
	.p = { .min = 7, .max = 98 },
298
	.p1 = { .min = 1, .max = 8 },
2327 Serge 299
	.p2 = { .dot_limit = 112000,
300
		.p2_slow = 14, .p2_fast = 7 },
301
};
302
 
303
 
304
static const intel_limit_t intel_limits_g4x_sdvo = {
305
	.dot = { .min = 25000, .max = 270000 },
306
	.vco = { .min = 1750000, .max = 3500000},
307
	.n = { .min = 1, .max = 4 },
308
	.m = { .min = 104, .max = 138 },
309
	.m1 = { .min = 17, .max = 23 },
310
	.m2 = { .min = 5, .max = 11 },
311
	.p = { .min = 10, .max = 30 },
312
	.p1 = { .min = 1, .max = 3},
313
	.p2 = { .dot_limit = 270000,
314
		.p2_slow = 10,
315
		.p2_fast = 10
316
	},
317
};
318
 
319
static const intel_limit_t intel_limits_g4x_hdmi = {
320
	.dot = { .min = 22000, .max = 400000 },
321
	.vco = { .min = 1750000, .max = 3500000},
322
	.n = { .min = 1, .max = 4 },
323
	.m = { .min = 104, .max = 138 },
324
	.m1 = { .min = 16, .max = 23 },
325
	.m2 = { .min = 5, .max = 11 },
326
	.p = { .min = 5, .max = 80 },
327
	.p1 = { .min = 1, .max = 8},
328
	.p2 = { .dot_limit = 165000,
329
		.p2_slow = 10, .p2_fast = 5 },
330
};
331
 
332
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
333
	.dot = { .min = 20000, .max = 115000 },
334
	.vco = { .min = 1750000, .max = 3500000 },
335
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 104, .max = 138 },
337
	.m1 = { .min = 17, .max = 23 },
338
	.m2 = { .min = 5, .max = 11 },
339
	.p = { .min = 28, .max = 112 },
340
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 0,
342
		.p2_slow = 14, .p2_fast = 14
343
	},
344
};
345
 
346
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
347
	.dot = { .min = 80000, .max = 224000 },
348
	.vco = { .min = 1750000, .max = 3500000 },
349
	.n = { .min = 1, .max = 3 },
350
	.m = { .min = 104, .max = 138 },
351
	.m1 = { .min = 17, .max = 23 },
352
	.m2 = { .min = 5, .max = 11 },
353
	.p = { .min = 14, .max = 42 },
354
	.p1 = { .min = 2, .max = 6 },
355
	.p2 = { .dot_limit = 0,
356
		.p2_slow = 7, .p2_fast = 7
357
	},
358
};
359
 
360
static const intel_limit_t intel_limits_pineview_sdvo = {
6084 serge 361
	.dot = { .min = 20000, .max = 400000},
362
	.vco = { .min = 1700000, .max = 3500000 },
2327 Serge 363
	/* Pineview's Ncounter is a ring counter */
6084 serge 364
	.n = { .min = 3, .max = 6 },
365
	.m = { .min = 2, .max = 256 },
2327 Serge 366
	/* Pineview only has one combined m divider, which we treat as m2. */
6084 serge 367
	.m1 = { .min = 0, .max = 0 },
368
	.m2 = { .min = 0, .max = 254 },
369
	.p = { .min = 5, .max = 80 },
370
	.p1 = { .min = 1, .max = 8 },
2327 Serge 371
	.p2 = { .dot_limit = 200000,
372
		.p2_slow = 10, .p2_fast = 5 },
373
};
374
 
375
static const intel_limit_t intel_limits_pineview_lvds = {
6084 serge 376
	.dot = { .min = 20000, .max = 400000 },
377
	.vco = { .min = 1700000, .max = 3500000 },
378
	.n = { .min = 3, .max = 6 },
379
	.m = { .min = 2, .max = 256 },
380
	.m1 = { .min = 0, .max = 0 },
381
	.m2 = { .min = 0, .max = 254 },
382
	.p = { .min = 7, .max = 112 },
383
	.p1 = { .min = 1, .max = 8 },
2327 Serge 384
	.p2 = { .dot_limit = 112000,
385
		.p2_slow = 14, .p2_fast = 14 },
386
};
387
 
388
/* Ironlake / Sandybridge
389
 *
390
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
391
 * the range value for them is (actual_value - 2).
392
 */
393
static const intel_limit_t intel_limits_ironlake_dac = {
394
	.dot = { .min = 25000, .max = 350000 },
395
	.vco = { .min = 1760000, .max = 3510000 },
396
	.n = { .min = 1, .max = 5 },
397
	.m = { .min = 79, .max = 127 },
398
	.m1 = { .min = 12, .max = 22 },
399
	.m2 = { .min = 5, .max = 9 },
400
	.p = { .min = 5, .max = 80 },
401
	.p1 = { .min = 1, .max = 8 },
402
	.p2 = { .dot_limit = 225000,
403
		.p2_slow = 10, .p2_fast = 5 },
404
};
405
 
406
static const intel_limit_t intel_limits_ironlake_single_lvds = {
407
	.dot = { .min = 25000, .max = 350000 },
408
	.vco = { .min = 1760000, .max = 3510000 },
409
	.n = { .min = 1, .max = 3 },
410
	.m = { .min = 79, .max = 118 },
411
	.m1 = { .min = 12, .max = 22 },
412
	.m2 = { .min = 5, .max = 9 },
413
	.p = { .min = 28, .max = 112 },
414
	.p1 = { .min = 2, .max = 8 },
415
	.p2 = { .dot_limit = 225000,
416
		.p2_slow = 14, .p2_fast = 14 },
417
};
418
 
419
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
420
	.dot = { .min = 25000, .max = 350000 },
421
	.vco = { .min = 1760000, .max = 3510000 },
422
	.n = { .min = 1, .max = 3 },
423
	.m = { .min = 79, .max = 127 },
424
	.m1 = { .min = 12, .max = 22 },
425
	.m2 = { .min = 5, .max = 9 },
426
	.p = { .min = 14, .max = 56 },
427
	.p1 = { .min = 2, .max = 8 },
428
	.p2 = { .dot_limit = 225000,
429
		.p2_slow = 7, .p2_fast = 7 },
430
};
431
 
432
/* LVDS 100mhz refclk limits. */
433
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
434
	.dot = { .min = 25000, .max = 350000 },
435
	.vco = { .min = 1760000, .max = 3510000 },
436
	.n = { .min = 1, .max = 2 },
437
	.m = { .min = 79, .max = 126 },
438
	.m1 = { .min = 12, .max = 22 },
439
	.m2 = { .min = 5, .max = 9 },
440
	.p = { .min = 28, .max = 112 },
2342 Serge 441
	.p1 = { .min = 2, .max = 8 },
2327 Serge 442
	.p2 = { .dot_limit = 225000,
443
		.p2_slow = 14, .p2_fast = 14 },
444
};
445
 
446
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
447
	.dot = { .min = 25000, .max = 350000 },
448
	.vco = { .min = 1760000, .max = 3510000 },
449
	.n = { .min = 1, .max = 3 },
450
	.m = { .min = 79, .max = 126 },
451
	.m1 = { .min = 12, .max = 22 },
452
	.m2 = { .min = 5, .max = 9 },
453
	.p = { .min = 14, .max = 42 },
2342 Serge 454
	.p1 = { .min = 2, .max = 6 },
2327 Serge 455
	.p2 = { .dot_limit = 225000,
456
		.p2_slow = 7, .p2_fast = 7 },
457
};
458
 
4560 Serge 459
static const intel_limit_t intel_limits_vlv = {
460
	 /*
461
	  * These are the data rate limits (measured in fast clocks)
462
	  * since those are the strictest limits we have. The fast
463
	  * clock and actual rate limits are more relaxed, so checking
464
	  * them would make no difference.
465
	  */
466
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 467
	.vco = { .min = 4000000, .max = 6000000 },
468
	.n = { .min = 1, .max = 7 },
469
	.m1 = { .min = 2, .max = 3 },
470
	.m2 = { .min = 11, .max = 156 },
471
	.p1 = { .min = 2, .max = 3 },
4560 Serge 472
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 473
};
474
 
5060 serge 475
static const intel_limit_t intel_limits_chv = {
476
	/*
477
	 * These are the data rate limits (measured in fast clocks)
478
	 * since those are the strictest limits we have.  The fast
479
	 * clock and actual rate limits are more relaxed, so checking
480
	 * them would make no difference.
481
	 */
482
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
6084 serge 483
	.vco = { .min = 4800000, .max = 6480000 },
5060 serge 484
	.n = { .min = 1, .max = 1 },
485
	.m1 = { .min = 2, .max = 2 },
486
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
487
	.p1 = { .min = 2, .max = 4 },
488
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
489
};
490
 
6084 serge 491
static const intel_limit_t intel_limits_bxt = {
492
	/* FIXME: find real dot limits */
493
	.dot = { .min = 0, .max = INT_MAX },
494
	.vco = { .min = 4800000, .max = 6700000 },
495
	.n = { .min = 1, .max = 1 },
496
	.m1 = { .min = 2, .max = 2 },
497
	/* FIXME: find real m2 limits */
498
	.m2 = { .min = 2 << 22, .max = 255 << 22 },
499
	.p1 = { .min = 2, .max = 4 },
500
	.p2 = { .p2_slow = 1, .p2_fast = 20 },
501
};
502
 
503
static bool
504
needs_modeset(struct drm_crtc_state *state)
4560 Serge 505
{
6084 serge 506
	return drm_atomic_crtc_needs_modeset(state);
4560 Serge 507
}
3031 serge 508
 
4560 Serge 509
/**
510
 * Returns whether any output on the specified pipe is of the specified type
511
 */
5354 serge 512
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
4560 Serge 513
{
5354 serge 514
	struct drm_device *dev = crtc->base.dev;
4560 Serge 515
	struct intel_encoder *encoder;
516
 
5354 serge 517
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4560 Serge 518
		if (encoder->type == type)
519
			return true;
520
 
521
	return false;
522
}
523
 
5354 serge 524
/**
525
 * Returns whether any output on the specified pipe will have the specified
526
 * type after a staged modeset is complete, i.e., the same as
527
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
528
 * encoder->crtc.
529
 */
6084 serge 530
static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
531
				      int type)
5354 serge 532
{
6084 serge 533
	struct drm_atomic_state *state = crtc_state->base.state;
534
	struct drm_connector *connector;
535
	struct drm_connector_state *connector_state;
5354 serge 536
	struct intel_encoder *encoder;
6084 serge 537
	int i, num_connectors = 0;
5354 serge 538
 
6084 serge 539
	for_each_connector_in_state(state, connector, connector_state, i) {
540
		if (connector_state->crtc != crtc_state->base.crtc)
541
			continue;
542
 
543
		num_connectors++;
544
 
545
		encoder = to_intel_encoder(connector_state->best_encoder);
546
		if (encoder->type == type)
5354 serge 547
			return true;
6084 serge 548
	}
5354 serge 549
 
6084 serge 550
	WARN_ON(num_connectors == 0);
551
 
5354 serge 552
	return false;
553
}
554
 
6084 serge 555
static const intel_limit_t *
556
intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
2327 Serge 557
{
6084 serge 558
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 559
	const intel_limit_t *limit;
560
 
6084 serge 561
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3480 Serge 562
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 563
			if (refclk == 100000)
564
				limit = &intel_limits_ironlake_dual_lvds_100m;
565
			else
566
				limit = &intel_limits_ironlake_dual_lvds;
567
		} else {
568
			if (refclk == 100000)
569
				limit = &intel_limits_ironlake_single_lvds_100m;
570
			else
571
				limit = &intel_limits_ironlake_single_lvds;
572
		}
4104 Serge 573
	} else
2327 Serge 574
		limit = &intel_limits_ironlake_dac;
575
 
576
	return limit;
577
}
578
 
6084 serge 579
static const intel_limit_t *
580
intel_g4x_limit(struct intel_crtc_state *crtc_state)
2327 Serge 581
{
6084 serge 582
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 583
	const intel_limit_t *limit;
584
 
6084 serge 585
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3480 Serge 586
		if (intel_is_dual_link_lvds(dev))
2327 Serge 587
			limit = &intel_limits_g4x_dual_channel_lvds;
588
		else
589
			limit = &intel_limits_g4x_single_channel_lvds;
6084 serge 590
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
591
		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
2327 Serge 592
		limit = &intel_limits_g4x_hdmi;
6084 serge 593
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
2327 Serge 594
		limit = &intel_limits_g4x_sdvo;
595
	} else /* The option is for other outputs */
596
		limit = &intel_limits_i9xx_sdvo;
597
 
598
	return limit;
599
}
600
 
6084 serge 601
static const intel_limit_t *
602
intel_limit(struct intel_crtc_state *crtc_state, int refclk)
2327 Serge 603
{
6084 serge 604
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 605
	const intel_limit_t *limit;
606
 
6084 serge 607
	if (IS_BROXTON(dev))
608
		limit = &intel_limits_bxt;
609
	else if (HAS_PCH_SPLIT(dev))
610
		limit = intel_ironlake_limit(crtc_state, refclk);
2327 Serge 611
	else if (IS_G4X(dev)) {
6084 serge 612
		limit = intel_g4x_limit(crtc_state);
2327 Serge 613
	} else if (IS_PINEVIEW(dev)) {
6084 serge 614
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 615
			limit = &intel_limits_pineview_lvds;
616
		else
617
			limit = &intel_limits_pineview_sdvo;
5060 serge 618
	} else if (IS_CHERRYVIEW(dev)) {
619
		limit = &intel_limits_chv;
3031 serge 620
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 621
		limit = &intel_limits_vlv;
2327 Serge 622
	} else if (!IS_GEN2(dev)) {
6084 serge 623
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 624
			limit = &intel_limits_i9xx_lvds;
625
		else
626
			limit = &intel_limits_i9xx_sdvo;
627
	} else {
6084 serge 628
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 629
			limit = &intel_limits_i8xx_lvds;
6084 serge 630
		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4104 Serge 631
			limit = &intel_limits_i8xx_dvo;
2327 Serge 632
		else
4104 Serge 633
			limit = &intel_limits_i8xx_dac;
2327 Serge 634
	}
635
	return limit;
636
}
637
 
6084 serge 638
/*
639
 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
640
 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
641
 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
642
 * The helpers' return value is the rate of the clock that is fed to the
643
 * display engine's pipe which can be the above fast dot clock rate or a
644
 * divided-down version of it.
645
 */
2327 Serge 646
/* m1 is reserved as 0 in Pineview, n is a ring counter */
6084 serge 647
static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
2327 Serge 648
{
649
	clock->m = clock->m2 + 2;
650
	clock->p = clock->p1 * clock->p2;
4560 Serge 651
	if (WARN_ON(clock->n == 0 || clock->p == 0))
6084 serge 652
		return 0;
4560 Serge 653
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
654
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 655
 
656
	return clock->dot;
2327 Serge 657
}
658
 
4104 Serge 659
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 660
{
4104 Serge 661
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
662
}
663
 
6084 serge 664
static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
4104 Serge 665
{
666
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 667
	clock->p = clock->p1 * clock->p2;
4560 Serge 668
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
6084 serge 669
		return 0;
4560 Serge 670
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
671
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 672
 
673
	return clock->dot;
2327 Serge 674
}
675
 
6084 serge 676
static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
5060 serge 677
{
678
	clock->m = clock->m1 * clock->m2;
679
	clock->p = clock->p1 * clock->p2;
680
	if (WARN_ON(clock->n == 0 || clock->p == 0))
6084 serge 681
		return 0;
682
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
683
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
684
 
685
	return clock->dot / 5;
686
}
687
 
688
int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
689
{
690
	clock->m = clock->m1 * clock->m2;
691
	clock->p = clock->p1 * clock->p2;
692
	if (WARN_ON(clock->n == 0 || clock->p == 0))
693
		return 0;
5060 serge 694
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
695
			clock->n << 22);
696
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 697
 
698
	return clock->dot / 5;
5060 serge 699
}
700
 
2327 Serge 701
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
702
/**
703
 * Returns whether the given set of divisors are valid for a given refclk with
704
 * the given connectors.
705
 */
706
 
707
static bool intel_PLL_is_valid(struct drm_device *dev,
708
			       const intel_limit_t *limit,
709
			       const intel_clock_t *clock)
710
{
4560 Serge 711
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
712
		INTELPllInvalid("n out of range\n");
2327 Serge 713
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 714
		INTELPllInvalid("p1 out of range\n");
2327 Serge 715
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 716
		INTELPllInvalid("m2 out of range\n");
2327 Serge 717
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 718
		INTELPllInvalid("m1 out of range\n");
4560 Serge 719
 
6937 serge 720
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
721
	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
4560 Serge 722
		if (clock->m1 <= clock->m2)
6084 serge 723
			INTELPllInvalid("m1 <= m2\n");
4560 Serge 724
 
6937 serge 725
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
4560 Serge 726
		if (clock->p < limit->p.min || limit->p.max < clock->p)
727
			INTELPllInvalid("p out of range\n");
6084 serge 728
		if (clock->m < limit->m.min || limit->m.max < clock->m)
729
			INTELPllInvalid("m out of range\n");
4560 Serge 730
	}
731
 
2327 Serge 732
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 733
		INTELPllInvalid("vco out of range\n");
2327 Serge 734
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
735
	 * connector, etc., rather than just a single range.
736
	 */
737
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 738
		INTELPllInvalid("dot out of range\n");
2327 Serge 739
 
740
	return true;
741
}
742
 
6084 serge 743
static int
744
i9xx_select_p2_div(const intel_limit_t *limit,
745
		   const struct intel_crtc_state *crtc_state,
746
		   int target)
2327 Serge 747
{
6084 serge 748
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 749
 
6084 serge 750
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
2327 Serge 751
		/*
3480 Serge 752
		 * For LVDS just rely on its current settings for dual-channel.
753
		 * We haven't figured out how to reliably set up different
754
		 * single/dual channel state, if we even can.
2327 Serge 755
		 */
3480 Serge 756
		if (intel_is_dual_link_lvds(dev))
6084 serge 757
			return limit->p2.p2_fast;
2327 Serge 758
		else
6084 serge 759
			return limit->p2.p2_slow;
2327 Serge 760
	} else {
761
		if (target < limit->p2.dot_limit)
6084 serge 762
			return limit->p2.p2_slow;
2327 Serge 763
		else
6084 serge 764
			return limit->p2.p2_fast;
2327 Serge 765
	}
6084 serge 766
}
2327 Serge 767
 
6084 serge 768
static bool
769
i9xx_find_best_dpll(const intel_limit_t *limit,
770
		    struct intel_crtc_state *crtc_state,
771
		    int target, int refclk, intel_clock_t *match_clock,
772
		    intel_clock_t *best_clock)
773
{
774
	struct drm_device *dev = crtc_state->base.crtc->dev;
775
	intel_clock_t clock;
776
	int err = target;
777
 
2342 Serge 778
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 779
 
6084 serge 780
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
781
 
2327 Serge 782
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
783
	     clock.m1++) {
784
		for (clock.m2 = limit->m2.min;
785
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 786
			if (clock.m2 >= clock.m1)
2327 Serge 787
				break;
788
			for (clock.n = limit->n.min;
789
			     clock.n <= limit->n.max; clock.n++) {
790
				for (clock.p1 = limit->p1.min;
791
					clock.p1 <= limit->p1.max; clock.p1++) {
792
					int this_err;
793
 
6084 serge 794
					i9xx_calc_dpll_params(refclk, &clock);
2327 Serge 795
					if (!intel_PLL_is_valid(dev, limit,
796
								&clock))
797
						continue;
3031 serge 798
					if (match_clock &&
799
					    clock.p != match_clock->p)
800
						continue;
2327 Serge 801
 
802
					this_err = abs(clock.dot - target);
803
					if (this_err < err) {
804
						*best_clock = clock;
805
						err = this_err;
806
					}
807
				}
808
			}
809
		}
810
	}
811
 
812
	return (err != target);
813
}
814
 
815
static bool
6084 serge 816
pnv_find_best_dpll(const intel_limit_t *limit,
817
		   struct intel_crtc_state *crtc_state,
4104 Serge 818
		   int target, int refclk, intel_clock_t *match_clock,
819
		   intel_clock_t *best_clock)
820
{
6084 serge 821
	struct drm_device *dev = crtc_state->base.crtc->dev;
4104 Serge 822
	intel_clock_t clock;
823
	int err = target;
824
 
825
	memset(best_clock, 0, sizeof(*best_clock));
826
 
6084 serge 827
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
828
 
4104 Serge 829
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
830
	     clock.m1++) {
831
		for (clock.m2 = limit->m2.min;
832
		     clock.m2 <= limit->m2.max; clock.m2++) {
833
			for (clock.n = limit->n.min;
834
			     clock.n <= limit->n.max; clock.n++) {
835
				for (clock.p1 = limit->p1.min;
836
					clock.p1 <= limit->p1.max; clock.p1++) {
837
					int this_err;
838
 
6084 serge 839
					pnv_calc_dpll_params(refclk, &clock);
4104 Serge 840
					if (!intel_PLL_is_valid(dev, limit,
841
								&clock))
842
						continue;
843
					if (match_clock &&
844
					    clock.p != match_clock->p)
845
						continue;
846
 
847
					this_err = abs(clock.dot - target);
848
					if (this_err < err) {
849
						*best_clock = clock;
850
						err = this_err;
851
					}
852
				}
853
			}
854
		}
855
	}
856
 
857
	return (err != target);
858
}
859
 
860
static bool
6084 serge 861
g4x_find_best_dpll(const intel_limit_t *limit,
862
		   struct intel_crtc_state *crtc_state,
863
		   int target, int refclk, intel_clock_t *match_clock,
864
		   intel_clock_t *best_clock)
2327 Serge 865
{
6084 serge 866
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 867
	intel_clock_t clock;
868
	int max_n;
6084 serge 869
	bool found = false;
2327 Serge 870
	/* approximately equals target * 0.00585 */
871
	int err_most = (target >> 8) + (target >> 9);
872
 
6084 serge 873
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 874
 
6084 serge 875
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
876
 
2327 Serge 877
	max_n = limit->n.max;
878
	/* based on hardware requirement, prefer smaller n to precision */
879
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
880
		/* based on hardware requirement, prefere larger m1,m2 */
881
		for (clock.m1 = limit->m1.max;
882
		     clock.m1 >= limit->m1.min; clock.m1--) {
883
			for (clock.m2 = limit->m2.max;
884
			     clock.m2 >= limit->m2.min; clock.m2--) {
885
				for (clock.p1 = limit->p1.max;
886
				     clock.p1 >= limit->p1.min; clock.p1--) {
887
					int this_err;
888
 
6084 serge 889
					i9xx_calc_dpll_params(refclk, &clock);
2327 Serge 890
					if (!intel_PLL_is_valid(dev, limit,
891
								&clock))
892
						continue;
893
 
894
					this_err = abs(clock.dot - target);
895
					if (this_err < err_most) {
896
						*best_clock = clock;
897
						err_most = this_err;
898
						max_n = clock.n;
899
						found = true;
900
					}
901
				}
902
			}
903
		}
904
	}
905
	return found;
906
}
907
 
6084 serge 908
/*
909
 * Check if the calculated PLL configuration is more optimal compared to the
910
 * best configuration and error found so far. Return the calculated error.
911
 */
912
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
913
			       const intel_clock_t *calculated_clock,
914
			       const intel_clock_t *best_clock,
915
			       unsigned int best_error_ppm,
916
			       unsigned int *error_ppm)
917
{
918
	/*
919
	 * For CHV ignore the error and consider only the P value.
920
	 * Prefer a bigger P value based on HW requirements.
921
	 */
922
	if (IS_CHERRYVIEW(dev)) {
923
		*error_ppm = 0;
924
 
925
		return calculated_clock->p > best_clock->p;
926
	}
927
 
928
	if (WARN_ON_ONCE(!target_freq))
929
		return false;
930
 
931
	*error_ppm = div_u64(1000000ULL *
932
				abs(target_freq - calculated_clock->dot),
933
			     target_freq);
934
	/*
935
	 * Prefer a better P value over a better (smaller) error if the error
936
	 * is small. Ensure this preference for future configurations too by
937
	 * setting the error to 0.
938
	 */
939
	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
940
		*error_ppm = 0;
941
 
942
		return true;
943
	}
944
 
945
	return *error_ppm + 10 < best_error_ppm;
946
}
947
 
2327 Serge 948
static bool
6084 serge 949
vlv_find_best_dpll(const intel_limit_t *limit,
950
		   struct intel_crtc_state *crtc_state,
951
		   int target, int refclk, intel_clock_t *match_clock,
952
		   intel_clock_t *best_clock)
3031 serge 953
{
6084 serge 954
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5354 serge 955
	struct drm_device *dev = crtc->base.dev;
4560 Serge 956
	intel_clock_t clock;
957
	unsigned int bestppm = 1000000;
958
	/* min update 19.2 MHz */
959
	int max_n = min(limit->n.max, refclk / 19200);
960
	bool found = false;
2327 Serge 961
 
4560 Serge 962
	target *= 5; /* fast clock */
3031 serge 963
 
4560 Serge 964
	memset(best_clock, 0, sizeof(*best_clock));
965
 
3031 serge 966
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 967
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
968
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
969
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
970
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
971
				clock.p = clock.p1 * clock.p2;
3031 serge 972
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 973
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
6084 serge 974
					unsigned int ppm;
4560 Serge 975
 
976
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
977
								     refclk * clock.m1);
978
 
6084 serge 979
					vlv_calc_dpll_params(refclk, &clock);
4560 Serge 980
 
981
					if (!intel_PLL_is_valid(dev, limit,
982
								&clock))
983
						continue;
984
 
6084 serge 985
					if (!vlv_PLL_is_optimal(dev, target,
986
								&clock,
987
								best_clock,
988
								bestppm, &ppm))
989
						continue;
4560 Serge 990
 
6084 serge 991
					*best_clock = clock;
992
					bestppm = ppm;
993
					found = true;
3031 serge 994
				}
995
			}
6084 serge 996
		}
997
	}
3031 serge 998
 
4560 Serge 999
	return found;
3031 serge 1000
}
1001
 
5060 serge 1002
static bool
6084 serge 1003
chv_find_best_dpll(const intel_limit_t *limit,
1004
		   struct intel_crtc_state *crtc_state,
5060 serge 1005
		   int target, int refclk, intel_clock_t *match_clock,
1006
		   intel_clock_t *best_clock)
1007
{
6084 serge 1008
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5354 serge 1009
	struct drm_device *dev = crtc->base.dev;
6084 serge 1010
	unsigned int best_error_ppm;
5060 serge 1011
	intel_clock_t clock;
1012
	uint64_t m2;
1013
	int found = false;
1014
 
1015
	memset(best_clock, 0, sizeof(*best_clock));
6084 serge 1016
	best_error_ppm = 1000000;
5060 serge 1017
 
1018
	/*
1019
	 * Based on hardware doc, the n always set to 1, and m1 always
1020
	 * set to 2.  If requires to support 200Mhz refclk, we need to
1021
	 * revisit this because n may not 1 anymore.
1022
	 */
1023
	clock.n = 1, clock.m1 = 2;
1024
	target *= 5;	/* fast clock */
1025
 
1026
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1027
		for (clock.p2 = limit->p2.p2_fast;
1028
				clock.p2 >= limit->p2.p2_slow;
1029
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6084 serge 1030
			unsigned int error_ppm;
5060 serge 1031
 
1032
			clock.p = clock.p1 * clock.p2;
1033
 
1034
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1035
					clock.n) << 22, refclk * clock.m1);
1036
 
1037
			if (m2 > INT_MAX/clock.m1)
1038
				continue;
1039
 
1040
			clock.m2 = m2;
1041
 
6084 serge 1042
			chv_calc_dpll_params(refclk, &clock);
5060 serge 1043
 
1044
			if (!intel_PLL_is_valid(dev, limit, &clock))
1045
				continue;
1046
 
6084 serge 1047
			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1048
						best_error_ppm, &error_ppm))
1049
				continue;
1050
 
1051
			*best_clock = clock;
1052
			best_error_ppm = error_ppm;
1053
			found = true;
5060 serge 1054
		}
1055
	}
1056
 
1057
	return found;
1058
}
1059
 
6084 serge 1060
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1061
			intel_clock_t *best_clock)
1062
{
1063
	int refclk = i9xx_get_refclk(crtc_state, 0);
1064
 
1065
	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1066
				  target_clock, refclk, NULL, best_clock);
1067
}
1068
 
4560 Serge 1069
bool intel_crtc_active(struct drm_crtc *crtc)
1070
{
1071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1072
 
1073
	/* Be paranoid as we can arrive here with only partial
1074
	 * state retrieved from the hardware during setup.
1075
	 *
1076
	 * We can ditch the adjusted_mode.crtc_clock check as soon
1077
	 * as Haswell has gained clock readout/fastboot support.
1078
	 *
5060 serge 1079
	 * We can ditch the crtc->primary->fb check as soon as we can
4560 Serge 1080
	 * properly reconstruct framebuffers.
6084 serge 1081
	 *
1082
	 * FIXME: The intel_crtc->active here should be switched to
1083
	 * crtc->state->active once we have proper CRTC states wired up
1084
	 * for atomic.
4560 Serge 1085
	 */
6084 serge 1086
	return intel_crtc->active && crtc->primary->state->fb &&
1087
		intel_crtc->config->base.adjusted_mode.crtc_clock;
4560 Serge 1088
}
1089
 
3243 Serge 1090
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1091
					     enum pipe pipe)
1092
{
1093
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1094
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1095
 
6084 serge 1096
	return intel_crtc->config->cpu_transcoder;
3243 Serge 1097
}
1098
 
4560 Serge 1099
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1100
{
1101
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1102
	i915_reg_t reg = PIPEDSL(pipe);
4560 Serge 1103
	u32 line1, line2;
1104
	u32 line_mask;
1105
 
1106
	if (IS_GEN2(dev))
1107
		line_mask = DSL_LINEMASK_GEN2;
1108
	else
1109
		line_mask = DSL_LINEMASK_GEN3;
1110
 
1111
	line1 = I915_READ(reg) & line_mask;
6084 serge 1112
	msleep(5);
4560 Serge 1113
	line2 = I915_READ(reg) & line_mask;
1114
 
1115
	return line1 == line2;
1116
}
1117
 
2327 Serge 1118
/*
1119
 * intel_wait_for_pipe_off - wait for pipe to turn off
5354 serge 1120
 * @crtc: crtc whose pipe to wait for
2327 Serge 1121
 *
1122
 * After disabling a pipe, we can't wait for vblank in the usual way,
1123
 * spinning on the vblank interrupt status bit, since we won't actually
1124
 * see an interrupt when the pipe is disabled.
1125
 *
1126
 * On Gen4 and above:
1127
 *   wait for the pipe register state bit to turn off
1128
 *
1129
 * Otherwise:
1130
 *   wait for the display line value to settle (it usually
1131
 *   ends up stopping at the start of the next frame).
1132
 *
1133
 */
5354 serge 1134
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
2327 Serge 1135
{
5354 serge 1136
	struct drm_device *dev = crtc->base.dev;
2327 Serge 1137
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1138
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
5354 serge 1139
	enum pipe pipe = crtc->pipe;
2327 Serge 1140
 
1141
	if (INTEL_INFO(dev)->gen >= 4) {
6937 serge 1142
		i915_reg_t reg = PIPECONF(cpu_transcoder);
2327 Serge 1143
 
1144
		/* Wait for the Pipe State to go off */
1145
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1146
			     100))
3031 serge 1147
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1148
	} else {
1149
		/* Wait for the display line to settle */
4560 Serge 1150
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 1151
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1152
	}
1153
}
1154
 
1155
static const char *state_string(bool enabled)
1156
{
1157
	return enabled ? "on" : "off";
1158
}
1159
 
1160
/* Only for pre-ILK configs */
4104 Serge 1161
void assert_pll(struct drm_i915_private *dev_priv,
6084 serge 1162
		enum pipe pipe, bool state)
2327 Serge 1163
{
1164
	u32 val;
1165
	bool cur_state;
1166
 
6084 serge 1167
	val = I915_READ(DPLL(pipe));
2327 Serge 1168
	cur_state = !!(val & DPLL_VCO_ENABLE);
6084 serge 1169
	I915_STATE_WARN(cur_state != state,
2327 Serge 1170
	     "PLL state assertion failure (expected %s, current %s)\n",
1171
	     state_string(state), state_string(cur_state));
1172
}
1173
 
4560 Serge 1174
/* XXX: the dsi pll is shared between MIPI DSI ports */
1175
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1176
{
1177
	u32 val;
1178
	bool cur_state;
1179
 
6084 serge 1180
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 1181
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
6084 serge 1182
	mutex_unlock(&dev_priv->sb_lock);
4560 Serge 1183
 
1184
	cur_state = val & DSI_PLL_VCO_EN;
6084 serge 1185
	I915_STATE_WARN(cur_state != state,
4560 Serge 1186
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1187
	     state_string(state), state_string(cur_state));
1188
}
1189
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1190
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1191
 
4104 Serge 1192
struct intel_shared_dpll *
1193
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1194
{
1195
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1196
 
6084 serge 1197
	if (crtc->config->shared_dpll < 0)
4104 Serge 1198
		return NULL;
1199
 
6084 serge 1200
	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
4104 Serge 1201
}
1202
 
2327 Serge 1203
/* For ILK+ */
4104 Serge 1204
void assert_shared_dpll(struct drm_i915_private *dev_priv,
6084 serge 1205
			struct intel_shared_dpll *pll,
1206
			bool state)
2327 Serge 1207
{
1208
	bool cur_state;
4104 Serge 1209
	struct intel_dpll_hw_state hw_state;
2327 Serge 1210
 
3031 serge 1211
	if (WARN (!pll,
4104 Serge 1212
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 1213
		return;
2342 Serge 1214
 
4104 Serge 1215
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
6084 serge 1216
	I915_STATE_WARN(cur_state != state,
4104 Serge 1217
	     "%s assertion failure (expected %s, current %s)\n",
1218
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 1219
}
1220
 
1221
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1222
			  enum pipe pipe, bool state)
1223
{
1224
	bool cur_state;
3243 Serge 1225
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1226
								      pipe);
2327 Serge 1227
 
3480 Serge 1228
	if (HAS_DDI(dev_priv->dev)) {
1229
		/* DDI does not have a specific FDI_TX register */
6084 serge 1230
		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
3243 Serge 1231
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1232
	} else {
6084 serge 1233
		u32 val = I915_READ(FDI_TX_CTL(pipe));
1234
		cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1235
	}
6084 serge 1236
	I915_STATE_WARN(cur_state != state,
2327 Serge 1237
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1238
	     state_string(state), state_string(cur_state));
1239
}
1240
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1241
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1242
 
1243
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1244
			  enum pipe pipe, bool state)
1245
{
1246
	u32 val;
1247
	bool cur_state;
1248
 
6084 serge 1249
	val = I915_READ(FDI_RX_CTL(pipe));
2327 Serge 1250
	cur_state = !!(val & FDI_RX_ENABLE);
6084 serge 1251
	I915_STATE_WARN(cur_state != state,
2327 Serge 1252
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1253
	     state_string(state), state_string(cur_state));
1254
}
1255
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1256
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1257
 
1258
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1259
				      enum pipe pipe)
1260
{
1261
	u32 val;
1262
 
1263
	/* ILK FDI PLL is always enabled */
5060 serge 1264
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
2327 Serge 1265
		return;
1266
 
3031 serge 1267
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1268
	if (HAS_DDI(dev_priv->dev))
3031 serge 1269
		return;
1270
 
6084 serge 1271
	val = I915_READ(FDI_TX_CTL(pipe));
1272
	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
2327 Serge 1273
}
1274
 
4104 Serge 1275
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1276
		       enum pipe pipe, bool state)
2327 Serge 1277
{
1278
	u32 val;
4104 Serge 1279
	bool cur_state;
2327 Serge 1280
 
6084 serge 1281
	val = I915_READ(FDI_RX_CTL(pipe));
4104 Serge 1282
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
6084 serge 1283
	I915_STATE_WARN(cur_state != state,
4104 Serge 1284
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1285
	     state_string(state), state_string(cur_state));
2327 Serge 1286
}
1287
 
5354 serge 1288
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
6084 serge 1289
			   enum pipe pipe)
2327 Serge 1290
{
5354 serge 1291
	struct drm_device *dev = dev_priv->dev;
6937 serge 1292
	i915_reg_t pp_reg;
2327 Serge 1293
	u32 val;
1294
	enum pipe panel_pipe = PIPE_A;
1295
	bool locked = true;
1296
 
5354 serge 1297
	if (WARN_ON(HAS_DDI(dev)))
1298
		return;
1299
 
1300
	if (HAS_PCH_SPLIT(dev)) {
1301
		u32 port_sel;
1302
 
2327 Serge 1303
		pp_reg = PCH_PP_CONTROL;
5354 serge 1304
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1305
 
1306
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1307
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1308
			panel_pipe = PIPE_B;
1309
		/* XXX: else fix for eDP */
6937 serge 1310
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 1311
		/* presumably write lock depends on pipe, not port select */
1312
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1313
		panel_pipe = pipe;
2327 Serge 1314
	} else {
1315
		pp_reg = PP_CONTROL;
5354 serge 1316
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1317
			panel_pipe = PIPE_B;
2327 Serge 1318
	}
1319
 
1320
	val = I915_READ(pp_reg);
1321
	if (!(val & PANEL_POWER_ON) ||
5354 serge 1322
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
2327 Serge 1323
		locked = false;
1324
 
6084 serge 1325
	I915_STATE_WARN(panel_pipe == pipe && locked,
2327 Serge 1326
	     "panel assertion failure, pipe %c regs locked\n",
1327
	     pipe_name(pipe));
1328
}
1329
 
4560 Serge 1330
static void assert_cursor(struct drm_i915_private *dev_priv,
1331
			  enum pipe pipe, bool state)
1332
{
1333
	struct drm_device *dev = dev_priv->dev;
1334
	bool cur_state;
1335
 
5060 serge 1336
	if (IS_845G(dev) || IS_I865G(dev))
6084 serge 1337
		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
4560 Serge 1338
	else
1339
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1340
 
6084 serge 1341
	I915_STATE_WARN(cur_state != state,
4560 Serge 1342
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1343
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1344
}
1345
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1346
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1347
 
2342 Serge 1348
void assert_pipe(struct drm_i915_private *dev_priv,
6084 serge 1349
		 enum pipe pipe, bool state)
2327 Serge 1350
{
1351
	bool cur_state;
3243 Serge 1352
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1353
								      pipe);
6937 serge 1354
	enum intel_display_power_domain power_domain;
2327 Serge 1355
 
5354 serge 1356
	/* if we need the pipe quirk it must be always on */
1357
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1358
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3031 serge 1359
		state = true;
1360
 
6937 serge 1361
	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1362
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
6084 serge 1363
		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1364
		cur_state = !!(val & PIPECONF_ENABLE);
6937 serge 1365
 
1366
		intel_display_power_put(dev_priv, power_domain);
1367
	} else {
1368
		cur_state = false;
3480 Serge 1369
	}
1370
 
6084 serge 1371
	I915_STATE_WARN(cur_state != state,
2327 Serge 1372
	     "pipe %c assertion failure (expected %s, current %s)\n",
1373
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1374
}
1375
 
3031 serge 1376
static void assert_plane(struct drm_i915_private *dev_priv,
1377
			 enum plane plane, bool state)
2327 Serge 1378
{
1379
	u32 val;
3031 serge 1380
	bool cur_state;
2327 Serge 1381
 
6084 serge 1382
	val = I915_READ(DSPCNTR(plane));
3031 serge 1383
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
6084 serge 1384
	I915_STATE_WARN(cur_state != state,
3031 serge 1385
	     "plane %c assertion failure (expected %s, current %s)\n",
1386
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1387
}
1388
 
3031 serge 1389
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1390
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1391
 
2327 Serge 1392
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1393
				   enum pipe pipe)
1394
{
4104 Serge 1395
	struct drm_device *dev = dev_priv->dev;
6084 serge 1396
	int i;
2327 Serge 1397
 
4104 Serge 1398
	/* Primary planes are fixed to pipes on gen4+ */
1399
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 1400
		u32 val = I915_READ(DSPCNTR(pipe));
1401
		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
3031 serge 1402
		     "plane %c assertion failure, should be disabled but not\n",
1403
		     plane_name(pipe));
2327 Serge 1404
		return;
3031 serge 1405
	}
2327 Serge 1406
 
1407
	/* Need to check both planes against the pipe */
5354 serge 1408
	for_each_pipe(dev_priv, i) {
6084 serge 1409
		u32 val = I915_READ(DSPCNTR(i));
1410
		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
2327 Serge 1411
			DISPPLANE_SEL_PIPE_SHIFT;
6084 serge 1412
		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
2327 Serge 1413
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1414
		     plane_name(i), pipe_name(pipe));
1415
	}
1416
}
1417
 
3746 Serge 1418
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1419
				    enum pipe pipe)
1420
{
4104 Serge 1421
	struct drm_device *dev = dev_priv->dev;
6084 serge 1422
	int sprite;
3746 Serge 1423
 
5354 serge 1424
	if (INTEL_INFO(dev)->gen >= 9) {
6084 serge 1425
		for_each_sprite(dev_priv, pipe, sprite) {
1426
			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1427
			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
5354 serge 1428
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1429
			     sprite, pipe_name(pipe));
1430
		}
6937 serge 1431
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 1432
		for_each_sprite(dev_priv, pipe, sprite) {
1433
			u32 val = I915_READ(SPCNTR(pipe, sprite));
1434
			I915_STATE_WARN(val & SP_ENABLE,
4104 Serge 1435
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
5060 serge 1436
			     sprite_name(pipe, sprite), pipe_name(pipe));
4104 Serge 1437
		}
1438
	} else if (INTEL_INFO(dev)->gen >= 7) {
6084 serge 1439
		u32 val = I915_READ(SPRCTL(pipe));
1440
		I915_STATE_WARN(val & SPRITE_ENABLE,
4104 Serge 1441
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1442
		     plane_name(pipe), pipe_name(pipe));
1443
	} else if (INTEL_INFO(dev)->gen >= 5) {
6084 serge 1444
		u32 val = I915_READ(DVSCNTR(pipe));
1445
		I915_STATE_WARN(val & DVS_ENABLE,
4104 Serge 1446
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1447
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1448
	}
1449
}
1450
 
5354 serge 1451
static void assert_vblank_disabled(struct drm_crtc *crtc)
1452
{
6084 serge 1453
	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
5354 serge 1454
		drm_crtc_vblank_put(crtc);
1455
}
1456
 
4560 Serge 1457
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1458
{
1459
	u32 val;
1460
	bool enabled;
1461
 
6084 serge 1462
	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1463
 
2327 Serge 1464
	val = I915_READ(PCH_DREF_CONTROL);
1465
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1466
			    DREF_SUPERSPREAD_SOURCE_MASK));
6084 serge 1467
	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
2327 Serge 1468
}
1469
 
4104 Serge 1470
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
6084 serge 1471
					   enum pipe pipe)
2327 Serge 1472
{
1473
	u32 val;
1474
	bool enabled;
1475
 
6084 serge 1476
	val = I915_READ(PCH_TRANSCONF(pipe));
2327 Serge 1477
	enabled = !!(val & TRANS_ENABLE);
6084 serge 1478
	I915_STATE_WARN(enabled,
2327 Serge 1479
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1480
	     pipe_name(pipe));
1481
}
1482
 
1483
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1484
			    enum pipe pipe, u32 port_sel, u32 val)
1485
{
1486
	if ((val & DP_PORT_EN) == 0)
1487
		return false;
1488
 
1489
	if (HAS_PCH_CPT(dev_priv->dev)) {
6937 serge 1490
		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
2327 Serge 1491
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1492
			return false;
5060 serge 1493
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1494
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1495
			return false;
2327 Serge 1496
	} else {
1497
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1498
			return false;
1499
	}
1500
	return true;
1501
}
1502
 
1503
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1504
			      enum pipe pipe, u32 val)
1505
{
3746 Serge 1506
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1507
		return false;
1508
 
1509
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1510
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1511
			return false;
5060 serge 1512
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1513
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1514
			return false;
2327 Serge 1515
	} else {
3746 Serge 1516
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1517
			return false;
1518
	}
1519
	return true;
1520
}
1521
 
1522
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1523
			      enum pipe pipe, u32 val)
1524
{
1525
	if ((val & LVDS_PORT_EN) == 0)
1526
		return false;
1527
 
1528
	if (HAS_PCH_CPT(dev_priv->dev)) {
1529
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1530
			return false;
1531
	} else {
1532
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1533
			return false;
1534
	}
1535
	return true;
1536
}
1537
 
1538
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1539
			      enum pipe pipe, u32 val)
1540
{
1541
	if ((val & ADPA_DAC_ENABLE) == 0)
1542
		return false;
1543
	if (HAS_PCH_CPT(dev_priv->dev)) {
1544
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1545
			return false;
1546
	} else {
1547
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1548
			return false;
1549
	}
1550
	return true;
1551
}
1552
 
1553
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
6937 serge 1554
				   enum pipe pipe, i915_reg_t reg,
1555
				   u32 port_sel)
2327 Serge 1556
{
1557
	u32 val = I915_READ(reg);
6084 serge 1558
	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
2327 Serge 1559
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
6937 serge 1560
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
3031 serge 1561
 
6084 serge 1562
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
3031 serge 1563
	     && (val & DP_PIPEB_SELECT),
1564
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1565
}
1566
 
1567
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
6937 serge 1568
				     enum pipe pipe, i915_reg_t reg)
2327 Serge 1569
{
1570
	u32 val = I915_READ(reg);
6084 serge 1571
	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
3031 serge 1572
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
6937 serge 1573
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
3031 serge 1574
 
6084 serge 1575
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1576
	     && (val & SDVO_PIPE_B_SELECT),
1577
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1578
}
1579
 
1580
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1581
				      enum pipe pipe)
1582
{
1583
	u32 val;
1584
 
1585
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1586
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1587
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1588
 
6084 serge 1589
	val = I915_READ(PCH_ADPA);
1590
	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1591
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1592
	     pipe_name(pipe));
1593
 
6084 serge 1594
	val = I915_READ(PCH_LVDS);
1595
	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1596
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1597
	     pipe_name(pipe));
1598
 
3746 Serge 1599
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1600
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1601
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1602
}
1603
 
5354 serge 1604
static void vlv_enable_pll(struct intel_crtc *crtc,
6084 serge 1605
			   const struct intel_crtc_state *pipe_config)
4560 Serge 1606
{
4104 Serge 1607
	struct drm_device *dev = crtc->base.dev;
1608
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1609
	i915_reg_t reg = DPLL(crtc->pipe);
5354 serge 1610
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2327 Serge 1611
 
4104 Serge 1612
	assert_pipe_disabled(dev_priv, crtc->pipe);
1613
 
6084 serge 1614
	/* PLL is protected by panel, make sure we can write it */
5354 serge 1615
	if (IS_MOBILE(dev_priv->dev))
4104 Serge 1616
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1617
 
4104 Serge 1618
	I915_WRITE(reg, dpll);
1619
	POSTING_READ(reg);
1620
	udelay(150);
2327 Serge 1621
 
4104 Serge 1622
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1623
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1624
 
5354 serge 1625
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
4104 Serge 1626
	POSTING_READ(DPLL_MD(crtc->pipe));
1627
 
1628
	/* We do this three times for luck */
1629
	I915_WRITE(reg, dpll);
1630
	POSTING_READ(reg);
1631
	udelay(150); /* wait for warmup */
1632
	I915_WRITE(reg, dpll);
1633
	POSTING_READ(reg);
1634
	udelay(150); /* wait for warmup */
1635
	I915_WRITE(reg, dpll);
1636
	POSTING_READ(reg);
1637
	udelay(150); /* wait for warmup */
1638
}
1639
 
5354 serge 1640
static void chv_enable_pll(struct intel_crtc *crtc,
6084 serge 1641
			   const struct intel_crtc_state *pipe_config)
5060 serge 1642
{
1643
	struct drm_device *dev = crtc->base.dev;
1644
	struct drm_i915_private *dev_priv = dev->dev_private;
1645
	int pipe = crtc->pipe;
1646
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1647
	u32 tmp;
1648
 
1649
	assert_pipe_disabled(dev_priv, crtc->pipe);
1650
 
6084 serge 1651
	mutex_lock(&dev_priv->sb_lock);
5060 serge 1652
 
1653
	/* Enable back the 10bit clock to display controller */
1654
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1655
	tmp |= DPIO_DCLKP_EN;
1656
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1657
 
6084 serge 1658
	mutex_unlock(&dev_priv->sb_lock);
1659
 
5060 serge 1660
	/*
1661
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1662
	 */
1663
	udelay(1);
1664
 
1665
	/* Enable PLL */
5354 serge 1666
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
5060 serge 1667
 
1668
	/* Check PLL is locked */
1669
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1670
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1671
 
1672
	/* not sure when this should be written */
5354 serge 1673
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
5060 serge 1674
	POSTING_READ(DPLL_MD(pipe));
1675
}
1676
 
5354 serge 1677
static int intel_num_dvo_pipes(struct drm_device *dev)
1678
{
1679
	struct intel_crtc *crtc;
1680
	int count = 0;
1681
 
1682
	for_each_intel_crtc(dev, crtc)
6084 serge 1683
		count += crtc->base.state->active &&
5354 serge 1684
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1685
 
1686
	return count;
1687
}
1688
 
4104 Serge 1689
static void i9xx_enable_pll(struct intel_crtc *crtc)
1690
{
1691
	struct drm_device *dev = crtc->base.dev;
1692
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1693
	i915_reg_t reg = DPLL(crtc->pipe);
6084 serge 1694
	u32 dpll = crtc->config->dpll_hw_state.dpll;
4104 Serge 1695
 
1696
	assert_pipe_disabled(dev_priv, crtc->pipe);
1697
 
1698
	/* No really, not for ILK+ */
5060 serge 1699
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
4104 Serge 1700
 
1701
	/* PLL is protected by panel, make sure we can write it */
1702
	if (IS_MOBILE(dev) && !IS_I830(dev))
1703
		assert_panel_unlocked(dev_priv, crtc->pipe);
1704
 
5354 serge 1705
	/* Enable DVO 2x clock on both PLLs if necessary */
1706
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1707
		/*
1708
		 * It appears to be important that we don't enable this
1709
		 * for the current pipe before otherwise configuring the
1710
		 * PLL. No idea how this should be handled if multiple
1711
		 * DVO outputs are enabled simultaneosly.
1712
		 */
1713
		dpll |= DPLL_DVO_2X_MODE;
1714
		I915_WRITE(DPLL(!crtc->pipe),
1715
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1716
	}
4104 Serge 1717
 
6084 serge 1718
	/*
1719
	 * Apparently we need to have VGA mode enabled prior to changing
1720
	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1721
	 * dividers, even though the register value does change.
1722
	 */
1723
	I915_WRITE(reg, 0);
1724
 
1725
	I915_WRITE(reg, dpll);
1726
 
4104 Serge 1727
	/* Wait for the clocks to stabilize. */
1728
	POSTING_READ(reg);
1729
	udelay(150);
1730
 
1731
	if (INTEL_INFO(dev)->gen >= 4) {
1732
		I915_WRITE(DPLL_MD(crtc->pipe),
6084 serge 1733
			   crtc->config->dpll_hw_state.dpll_md);
4104 Serge 1734
	} else {
1735
		/* The pixel multiplier can only be updated once the
1736
		 * DPLL is enabled and the clocks are stable.
1737
		 *
1738
		 * So write it again.
1739
		 */
1740
		I915_WRITE(reg, dpll);
1741
	}
1742
 
6084 serge 1743
	/* We do this three times for luck */
4104 Serge 1744
	I915_WRITE(reg, dpll);
6084 serge 1745
	POSTING_READ(reg);
1746
	udelay(150); /* wait for warmup */
4104 Serge 1747
	I915_WRITE(reg, dpll);
6084 serge 1748
	POSTING_READ(reg);
1749
	udelay(150); /* wait for warmup */
4104 Serge 1750
	I915_WRITE(reg, dpll);
6084 serge 1751
	POSTING_READ(reg);
1752
	udelay(150); /* wait for warmup */
2327 Serge 1753
}
1754
 
1755
/**
4104 Serge 1756
 * i9xx_disable_pll - disable a PLL
2327 Serge 1757
 * @dev_priv: i915 private structure
1758
 * @pipe: pipe PLL to disable
1759
 *
1760
 * Disable the PLL for @pipe, making sure the pipe is off first.
1761
 *
1762
 * Note!  This is for pre-ILK only.
1763
 */
5354 serge 1764
static void i9xx_disable_pll(struct intel_crtc *crtc)
2327 Serge 1765
{
5354 serge 1766
	struct drm_device *dev = crtc->base.dev;
1767
	struct drm_i915_private *dev_priv = dev->dev_private;
1768
	enum pipe pipe = crtc->pipe;
1769
 
1770
	/* Disable DVO 2x clock on both PLLs if necessary */
1771
	if (IS_I830(dev) &&
1772
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
6084 serge 1773
	    !intel_num_dvo_pipes(dev)) {
5354 serge 1774
		I915_WRITE(DPLL(PIPE_B),
1775
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1776
		I915_WRITE(DPLL(PIPE_A),
1777
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1778
	}
1779
 
1780
	/* Don't disable pipe or pipe PLLs if needed */
1781
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1782
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2327 Serge 1783
		return;
1784
 
1785
	/* Make sure the pipe isn't still relying on us */
1786
	assert_pipe_disabled(dev_priv, pipe);
1787
 
6084 serge 1788
	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
4104 Serge 1789
	POSTING_READ(DPLL(pipe));
2327 Serge 1790
}
1791
 
4539 Serge 1792
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1793
{
6084 serge 1794
	u32 val;
4539 Serge 1795
 
1796
	/* Make sure the pipe isn't still relying on us */
1797
	assert_pipe_disabled(dev_priv, pipe);
1798
 
4560 Serge 1799
	/*
1800
	 * Leave integrated clock source and reference clock enabled for pipe B.
1801
	 * The latter is needed for VGA hotplug / manual detection.
1802
	 */
6084 serge 1803
	val = DPLL_VGA_MODE_DIS;
4539 Serge 1804
	if (pipe == PIPE_B)
6084 serge 1805
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
4539 Serge 1806
	I915_WRITE(DPLL(pipe), val);
1807
	POSTING_READ(DPLL(pipe));
5060 serge 1808
 
4539 Serge 1809
}
1810
 
5060 serge 1811
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1812
{
1813
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1814
	u32 val;
1815
 
1816
	/* Make sure the pipe isn't still relying on us */
1817
	assert_pipe_disabled(dev_priv, pipe);
1818
 
1819
	/* Set PLL en = 0 */
6084 serge 1820
	val = DPLL_SSC_REF_CLK_CHV |
1821
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
5060 serge 1822
	if (pipe != PIPE_A)
1823
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1824
	I915_WRITE(DPLL(pipe), val);
1825
	POSTING_READ(DPLL(pipe));
1826
 
6084 serge 1827
	mutex_lock(&dev_priv->sb_lock);
5060 serge 1828
 
1829
	/* Disable 10bit clock to display controller */
1830
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1831
	val &= ~DPIO_DCLKP_EN;
1832
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1833
 
6084 serge 1834
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 1835
}
1836
 
4560 Serge 1837
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
6084 serge 1838
			 struct intel_digital_port *dport,
1839
			 unsigned int expected_mask)
3031 serge 1840
{
4104 Serge 1841
	u32 port_mask;
6937 serge 1842
	i915_reg_t dpll_reg;
3031 serge 1843
 
4560 Serge 1844
	switch (dport->port) {
1845
	case PORT_B:
4104 Serge 1846
		port_mask = DPLL_PORTB_READY_MASK;
5060 serge 1847
		dpll_reg = DPLL(0);
4560 Serge 1848
		break;
1849
	case PORT_C:
4104 Serge 1850
		port_mask = DPLL_PORTC_READY_MASK;
5060 serge 1851
		dpll_reg = DPLL(0);
6084 serge 1852
		expected_mask <<= 4;
4560 Serge 1853
		break;
5060 serge 1854
	case PORT_D:
1855
		port_mask = DPLL_PORTD_READY_MASK;
1856
		dpll_reg = DPIO_PHY_STATUS;
1857
		break;
4560 Serge 1858
	default:
1859
		BUG();
1860
	}
3243 Serge 1861
 
6084 serge 1862
	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1863
		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1864
		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
3031 serge 1865
}
1866
 
5060 serge 1867
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1868
{
1869
	struct drm_device *dev = crtc->base.dev;
1870
	struct drm_i915_private *dev_priv = dev->dev_private;
1871
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1872
 
1873
	if (WARN_ON(pll == NULL))
1874
		return;
1875
 
5354 serge 1876
	WARN_ON(!pll->config.crtc_mask);
5060 serge 1877
	if (pll->active == 0) {
1878
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1879
		WARN_ON(pll->on);
1880
		assert_shared_dpll_disabled(dev_priv, pll);
1881
 
1882
		pll->mode_set(dev_priv, pll);
1883
	}
1884
}
1885
 
2327 Serge 1886
/**
5060 serge 1887
 * intel_enable_shared_dpll - enable PCH PLL
2327 Serge 1888
 * @dev_priv: i915 private structure
1889
 * @pipe: pipe PLL to enable
1890
 *
1891
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1892
 * drives the transcoder clock.
1893
 */
5060 serge 1894
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1895
{
5060 serge 1896
	struct drm_device *dev = crtc->base.dev;
1897
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1898
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1899
 
4104 Serge 1900
	if (WARN_ON(pll == NULL))
2342 Serge 1901
		return;
1902
 
5354 serge 1903
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1904
		return;
2327 Serge 1905
 
5354 serge 1906
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
4104 Serge 1907
		      pll->name, pll->active, pll->on,
1908
		      crtc->base.base.id);
3031 serge 1909
 
4104 Serge 1910
	if (pll->active++) {
1911
		WARN_ON(!pll->on);
1912
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1913
		return;
1914
	}
4104 Serge 1915
	WARN_ON(pll->on);
3031 serge 1916
 
5060 serge 1917
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1918
 
4104 Serge 1919
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1920
	pll->enable(dev_priv, pll);
3031 serge 1921
	pll->on = true;
2327 Serge 1922
}
1923
 
5354 serge 1924
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1925
{
5060 serge 1926
	struct drm_device *dev = crtc->base.dev;
1927
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1928
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1929
 
1930
	/* PCH only available on ILK+ */
6084 serge 1931
	if (INTEL_INFO(dev)->gen < 5)
1932
		return;
2327 Serge 1933
 
6084 serge 1934
	if (pll == NULL)
3031 serge 1935
		return;
2327 Serge 1936
 
6084 serge 1937
	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1938
		return;
1939
 
4104 Serge 1940
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1941
		      pll->name, pll->active, pll->on,
1942
		      crtc->base.base.id);
2342 Serge 1943
 
3031 serge 1944
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1945
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1946
		return;
1947
	}
2342 Serge 1948
 
4104 Serge 1949
	assert_shared_dpll_enabled(dev_priv, pll);
1950
	WARN_ON(!pll->on);
1951
	if (--pll->active)
2342 Serge 1952
		return;
1953
 
4104 Serge 1954
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1955
	pll->disable(dev_priv, pll);
3031 serge 1956
	pll->on = false;
5060 serge 1957
 
1958
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2327 Serge 1959
}
1960
 
3243 Serge 1961
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
6084 serge 1962
					   enum pipe pipe)
2327 Serge 1963
{
3243 Serge 1964
	struct drm_device *dev = dev_priv->dev;
3031 serge 1965
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1966
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6937 serge 1967
	i915_reg_t reg;
1968
	uint32_t val, pipeconf_val;
2327 Serge 1969
 
1970
	/* PCH only available on ILK+ */
5354 serge 1971
	BUG_ON(!HAS_PCH_SPLIT(dev));
2327 Serge 1972
 
1973
	/* Make sure PCH DPLL is enabled */
4104 Serge 1974
	assert_shared_dpll_enabled(dev_priv,
1975
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1976
 
1977
	/* FDI must be feeding us bits for PCH ports */
1978
	assert_fdi_tx_enabled(dev_priv, pipe);
1979
	assert_fdi_rx_enabled(dev_priv, pipe);
1980
 
3243 Serge 1981
	if (HAS_PCH_CPT(dev)) {
1982
		/* Workaround: Set the timing override bit before enabling the
1983
		 * pch transcoder. */
1984
		reg = TRANS_CHICKEN2(pipe);
1985
		val = I915_READ(reg);
1986
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1987
		I915_WRITE(reg, val);
3031 serge 1988
	}
3243 Serge 1989
 
4104 Serge 1990
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1991
	val = I915_READ(reg);
3031 serge 1992
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1993
 
1994
	if (HAS_PCH_IBX(dev_priv->dev)) {
1995
		/*
6084 serge 1996
		 * Make the BPC in transcoder be consistent with
1997
		 * that in pipeconf reg. For HDMI we must use 8bpc
1998
		 * here for both 8bpc and 12bpc.
2327 Serge 1999
		 */
3480 Serge 2000
		val &= ~PIPECONF_BPC_MASK;
6084 serge 2001
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2002
			val |= PIPECONF_8BPC;
2003
		else
2004
			val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 2005
	}
3031 serge 2006
 
2007
	val &= ~TRANS_INTERLACE_MASK;
2008
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2009
		if (HAS_PCH_IBX(dev_priv->dev) &&
5354 serge 2010
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3031 serge 2011
			val |= TRANS_LEGACY_INTERLACED_ILK;
2012
		else
2013
			val |= TRANS_INTERLACED;
2014
	else
2015
		val |= TRANS_PROGRESSIVE;
2016
 
2327 Serge 2017
	I915_WRITE(reg, val | TRANS_ENABLE);
2018
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 2019
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 2020
}
2021
 
3243 Serge 2022
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2023
				      enum transcoder cpu_transcoder)
2024
{
2025
	u32 val, pipeconf_val;
2026
 
2027
	/* PCH only available on ILK+ */
5354 serge 2028
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
3243 Serge 2029
 
2030
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 2031
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 2032
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2033
 
2034
	/* Workaround: set timing override bit. */
6084 serge 2035
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
3243 Serge 2036
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6084 serge 2037
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
3243 Serge 2038
 
2039
	val = TRANS_ENABLE;
2040
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2041
 
2042
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2043
	    PIPECONF_INTERLACED_ILK)
2044
		val |= TRANS_INTERLACED;
2045
	else
2046
		val |= TRANS_PROGRESSIVE;
2047
 
4104 Serge 2048
	I915_WRITE(LPT_TRANSCONF, val);
2049
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 2050
		DRM_ERROR("Failed to enable PCH transcoder\n");
2051
}
2052
 
2053
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
6084 serge 2054
					    enum pipe pipe)
2327 Serge 2055
{
3243 Serge 2056
	struct drm_device *dev = dev_priv->dev;
6937 serge 2057
	i915_reg_t reg;
2058
	uint32_t val;
2327 Serge 2059
 
2060
	/* FDI relies on the transcoder */
2061
	assert_fdi_tx_disabled(dev_priv, pipe);
2062
	assert_fdi_rx_disabled(dev_priv, pipe);
2063
 
2064
	/* Ports must be off as well */
2065
	assert_pch_ports_disabled(dev_priv, pipe);
2066
 
4104 Serge 2067
	reg = PCH_TRANSCONF(pipe);
2327 Serge 2068
	val = I915_READ(reg);
2069
	val &= ~TRANS_ENABLE;
2070
	I915_WRITE(reg, val);
2071
	/* wait for PCH transcoder off, transcoder state */
2072
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 2073
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 2074
 
6937 serge 2075
	if (HAS_PCH_CPT(dev)) {
3243 Serge 2076
		/* Workaround: Clear the timing override chicken bit again. */
2077
		reg = TRANS_CHICKEN2(pipe);
2078
		val = I915_READ(reg);
2079
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2080
		I915_WRITE(reg, val);
2081
	}
2327 Serge 2082
}
2083
 
3243 Serge 2084
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2085
{
2086
	u32 val;
2087
 
4104 Serge 2088
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 2089
	val &= ~TRANS_ENABLE;
4104 Serge 2090
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 2091
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 2092
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 2093
		DRM_ERROR("Failed to disable PCH transcoder\n");
2094
 
2095
	/* Workaround: clear timing override bit. */
6084 serge 2096
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
3243 Serge 2097
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
6084 serge 2098
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
3243 Serge 2099
}
2100
 
2327 Serge 2101
/**
2102
 * intel_enable_pipe - enable a pipe, asserting requirements
5060 serge 2103
 * @crtc: crtc responsible for the pipe
2327 Serge 2104
 *
5060 serge 2105
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2327 Serge 2106
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2107
 */
5060 serge 2108
static void intel_enable_pipe(struct intel_crtc *crtc)
2327 Serge 2109
{
5060 serge 2110
	struct drm_device *dev = crtc->base.dev;
2111
	struct drm_i915_private *dev_priv = dev->dev_private;
2112
	enum pipe pipe = crtc->pipe;
6937 serge 2113
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3480 Serge 2114
	enum pipe pch_transcoder;
6937 serge 2115
	i915_reg_t reg;
2327 Serge 2116
	u32 val;
2117
 
6084 serge 2118
	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2119
 
4104 Serge 2120
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2121
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 2122
	assert_sprites_disabled(dev_priv, pipe);
2123
 
3480 Serge 2124
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 2125
		pch_transcoder = TRANSCODER_A;
2126
	else
2127
		pch_transcoder = pipe;
2128
 
2327 Serge 2129
	/*
2130
	 * A pipe without a PLL won't actually be able to drive bits from
2131
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2132
	 * need the check.
2133
	 */
6084 serge 2134
	if (HAS_GMCH_DISPLAY(dev_priv->dev))
6937 serge 2135
		if (crtc->config->has_dsi_encoder)
4560 Serge 2136
			assert_dsi_pll_enabled(dev_priv);
2137
		else
6084 serge 2138
			assert_pll_enabled(dev_priv, pipe);
2327 Serge 2139
	else {
6084 serge 2140
		if (crtc->config->has_pch_encoder) {
2327 Serge 2141
			/* if driving the PCH, we need FDI enabled */
3243 Serge 2142
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 2143
			assert_fdi_tx_pll_enabled(dev_priv,
2144
						  (enum pipe) cpu_transcoder);
2327 Serge 2145
		}
2146
		/* FIXME: assert CPU port conditions for SNB+ */
2147
	}
2148
 
3243 Serge 2149
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2150
	val = I915_READ(reg);
5060 serge 2151
	if (val & PIPECONF_ENABLE) {
5354 serge 2152
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2153
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2327 Serge 2154
		return;
5060 serge 2155
	}
2327 Serge 2156
 
2157
	I915_WRITE(reg, val | PIPECONF_ENABLE);
5060 serge 2158
	POSTING_READ(reg);
2327 Serge 2159
}
2160
 
2161
/**
2162
 * intel_disable_pipe - disable a pipe, asserting requirements
5354 serge 2163
 * @crtc: crtc whose pipes is to be disabled
2327 Serge 2164
 *
5354 serge 2165
 * Disable the pipe of @crtc, making sure that various hardware
2166
 * specific requirements are met, if applicable, e.g. plane
2167
 * disabled, panel fitter off, etc.
2327 Serge 2168
 *
2169
 * Will wait until the pipe has shut down before returning.
2170
 */
5354 serge 2171
static void intel_disable_pipe(struct intel_crtc *crtc)
2327 Serge 2172
{
5354 serge 2173
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
6084 serge 2174
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
5354 serge 2175
	enum pipe pipe = crtc->pipe;
6937 serge 2176
	i915_reg_t reg;
2327 Serge 2177
	u32 val;
2178
 
6084 serge 2179
	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2180
 
2181
	/*
2327 Serge 2182
	 * Make sure planes won't keep trying to pump pixels to us,
2183
	 * or we might hang the display.
2184
	 */
2185
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2186
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 2187
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 2188
 
3243 Serge 2189
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2190
	val = I915_READ(reg);
2191
	if ((val & PIPECONF_ENABLE) == 0)
2192
		return;
2193
 
5354 serge 2194
	/*
2195
	 * Double wide has implications for planes
2196
	 * so best keep it disabled when not needed.
2197
	 */
6084 serge 2198
	if (crtc->config->double_wide)
5354 serge 2199
		val &= ~PIPECONF_DOUBLE_WIDE;
2200
 
2201
	/* Don't disable pipe or pipe PLLs if needed */
2202
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2203
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2204
		val &= ~PIPECONF_ENABLE;
2205
 
2206
	I915_WRITE(reg, val);
2207
	if ((val & PIPECONF_ENABLE) == 0)
2208
		intel_wait_for_pipe_off(crtc);
2327 Serge 2209
}
2210
 
6084 serge 2211
static bool need_vtd_wa(struct drm_device *dev)
2327 Serge 2212
{
6084 serge 2213
#ifdef CONFIG_INTEL_IOMMU
2214
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2215
		return true;
2216
#endif
2217
	return false;
2327 Serge 2218
}
2219
 
6084 serge 2220
unsigned int
2221
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2222
		  uint64_t fb_format_modifier, unsigned int plane)
2327 Serge 2223
{
6084 serge 2224
	unsigned int tile_height;
2225
	uint32_t pixel_bytes;
2327 Serge 2226
 
6084 serge 2227
	switch (fb_format_modifier) {
2228
	case DRM_FORMAT_MOD_NONE:
2229
		tile_height = 1;
2230
		break;
2231
	case I915_FORMAT_MOD_X_TILED:
2232
		tile_height = IS_GEN2(dev) ? 16 : 8;
2233
		break;
2234
	case I915_FORMAT_MOD_Y_TILED:
2235
		tile_height = 32;
2236
		break;
2237
	case I915_FORMAT_MOD_Yf_TILED:
2238
		pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2239
		switch (pixel_bytes) {
2240
		default:
2241
		case 1:
2242
			tile_height = 64;
2243
			break;
2244
		case 2:
2245
		case 4:
2246
			tile_height = 32;
2247
			break;
2248
		case 8:
2249
			tile_height = 16;
2250
			break;
2251
		case 16:
2252
			WARN_ONCE(1,
2253
				  "128-bit pixels are not supported for display!");
2254
			tile_height = 16;
2255
			break;
2256
		}
2257
		break;
2258
	default:
2259
		MISSING_CASE(fb_format_modifier);
2260
		tile_height = 1;
2261
		break;
2262
	}
2327 Serge 2263
 
6084 serge 2264
	return tile_height;
2265
}
4560 Serge 2266
 
6084 serge 2267
unsigned int
2268
intel_fb_align_height(struct drm_device *dev, unsigned int height,
2269
		      uint32_t pixel_format, uint64_t fb_format_modifier)
2270
{
2271
	return ALIGN(height, intel_tile_height(dev, pixel_format,
2272
					       fb_format_modifier, 0));
2327 Serge 2273
}
2274
 
6937 serge 2275
static void
6084 serge 2276
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2277
			const struct drm_plane_state *plane_state)
2327 Serge 2278
{
6937 serge 2279
	struct intel_rotation_info *info = &view->params.rotation_info;
6084 serge 2280
	unsigned int tile_height, tile_pitch;
2327 Serge 2281
 
6084 serge 2282
	*view = i915_ggtt_view_normal;
5354 serge 2283
 
6084 serge 2284
	if (!plane_state)
6937 serge 2285
		return;
4560 Serge 2286
 
6084 serge 2287
	if (!intel_rotation_90_or_270(plane_state->rotation))
6937 serge 2288
		return;
4560 Serge 2289
 
6084 serge 2290
	*view = i915_ggtt_view_rotated;
2327 Serge 2291
 
6084 serge 2292
	info->height = fb->height;
2293
	info->pixel_format = fb->pixel_format;
2294
	info->pitch = fb->pitches[0];
2295
	info->uv_offset = fb->offsets[1];
2296
	info->fb_modifier = fb->modifier[0];
2297
 
2298
	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2299
					fb->modifier[0], 0);
2300
	tile_pitch = PAGE_SIZE / tile_height;
2301
	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2302
	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2303
	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2304
 
2305
	if (info->pixel_format == DRM_FORMAT_NV12) {
2306
		tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2307
						fb->modifier[0], 1);
2308
		tile_pitch = PAGE_SIZE / tile_height;
2309
		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2310
		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2311
						     tile_height);
2312
		info->size_uv = info->width_pages_uv * info->height_pages_uv *
2313
				PAGE_SIZE;
2314
	}
3746 Serge 2315
}
2316
 
6084 serge 2317
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
5060 serge 2318
{
6084 serge 2319
	if (INTEL_INFO(dev_priv)->gen >= 9)
2320
		return 256 * 1024;
2321
	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
6937 serge 2322
		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6084 serge 2323
		return 128 * 1024;
2324
	else if (INTEL_INFO(dev_priv)->gen >= 4)
2325
		return 4 * 1024;
2326
	else
2327
		return 0;
5060 serge 2328
}
2329
 
2335 Serge 2330
int
5354 serge 2331
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2332
			   struct drm_framebuffer *fb,
6937 serge 2333
			   const struct drm_plane_state *plane_state)
2335 Serge 2334
{
5354 serge 2335
	struct drm_device *dev = fb->dev;
2335 Serge 2336
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2337
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 2338
	struct i915_ggtt_view view;
2335 Serge 2339
	u32 alignment;
2340
	int ret;
2327 Serge 2341
 
5060 serge 2342
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2343
 
6084 serge 2344
	switch (fb->modifier[0]) {
2345
	case DRM_FORMAT_MOD_NONE:
2346
		alignment = intel_linear_alignment(dev_priv);
2335 Serge 2347
		break;
6084 serge 2348
	case I915_FORMAT_MOD_X_TILED:
5354 serge 2349
		if (INTEL_INFO(dev)->gen >= 9)
2350
			alignment = 256 * 1024;
2351
		else {
6084 serge 2352
			/* pin() will align the object as required by fence */
2353
			alignment = 0;
5354 serge 2354
		}
2335 Serge 2355
		break;
6084 serge 2356
	case I915_FORMAT_MOD_Y_TILED:
2357
	case I915_FORMAT_MOD_Yf_TILED:
2358
		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2359
			  "Y tiling bo slipped through, driver bug!\n"))
2360
			return -EINVAL;
2361
		alignment = 1 * 1024 * 1024;
2362
		break;
2363
	default:
2364
		MISSING_CASE(fb->modifier[0]);
2335 Serge 2365
		return -EINVAL;
2366
	}
2327 Serge 2367
 
6937 serge 2368
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
6084 serge 2369
 
3746 Serge 2370
	/* Note that the w/a also requires 64 PTE of padding following the
2371
	 * bo. We currently fill all unused PTE with the shadow page and so
2372
	 * we should always have valid PTE following the scanout preventing
2373
	 * the VT-d warning.
2374
	 */
2375
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2376
		alignment = 256 * 1024;
2377
 
5097 serge 2378
	/*
2379
	 * Global gtt pte registers are special registers which actually forward
2380
	 * writes to a chunk of system memory. Which means that there is no risk
2381
	 * that the register values disappear as soon as we call
2382
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2383
	 * pin/unpin/fence and not more.
2384
	 */
2385
	intel_runtime_pm_get(dev_priv);
2386
 
6937 serge 2387
	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2388
						   &view);
2335 Serge 2389
	if (ret)
6937 serge 2390
		goto err_pm;
2327 Serge 2391
 
2335 Serge 2392
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2393
	 * fence, whereas 965+ only requires a fence if using
2394
	 * framebuffer compression.  For simplicity, we always install
2395
	 * a fence as the cost is not that onerous.
2396
	 */
6084 serge 2397
	if (view.type == I915_GGTT_VIEW_NORMAL) {
2398
		ret = i915_gem_object_get_fence(obj);
2399
		if (ret == -EDEADLK) {
2400
			/*
2401
			 * -EDEADLK means there are no free fences
2402
			 * no pending flips.
2403
			 *
2404
			 * This is propagated to atomic, but it uses
2405
			 * -EDEADLK to force a locking recovery, so
2406
			 * change the returned error to -EBUSY.
2407
			 */
2408
			ret = -EBUSY;
2409
			goto err_unpin;
2410
		} else if (ret)
2411
			goto err_unpin;
2327 Serge 2412
 
6084 serge 2413
		i915_gem_object_pin_fence(obj);
2414
	}
3480 Serge 2415
 
5097 serge 2416
	intel_runtime_pm_put(dev_priv);
2335 Serge 2417
	return 0;
2327 Serge 2418
 
2335 Serge 2419
err_unpin:
6084 serge 2420
	i915_gem_object_unpin_from_display_plane(obj, &view);
6937 serge 2421
err_pm:
5097 serge 2422
	intel_runtime_pm_put(dev_priv);
2335 Serge 2423
	return ret;
2424
}
2327 Serge 2425
 
6084 serge 2426
static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2427
			       const struct drm_plane_state *plane_state)
3031 serge 2428
{
6084 serge 2429
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2430
	struct i915_ggtt_view view;
2431
 
5060 serge 2432
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2433
 
6937 serge 2434
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
6084 serge 2435
 
2436
	if (view.type == I915_GGTT_VIEW_NORMAL)
2437
		i915_gem_object_unpin_fence(obj);
2438
 
2439
	i915_gem_object_unpin_from_display_plane(obj, &view);
3031 serge 2440
}
2441
 
2442
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2443
 * is assumed to be a power-of-two. */
6084 serge 2444
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2445
					     int *x, int *y,
3480 Serge 2446
					     unsigned int tiling_mode,
2447
					     unsigned int cpp,
6084 serge 2448
					     unsigned int pitch)
3031 serge 2449
{
3480 Serge 2450
	if (tiling_mode != I915_TILING_NONE) {
2451
		unsigned int tile_rows, tiles;
3031 serge 2452
 
6084 serge 2453
		tile_rows = *y / 8;
2454
		*y %= 8;
3031 serge 2455
 
3480 Serge 2456
		tiles = *x / (512/cpp);
2457
		*x %= 512/cpp;
2458
 
6084 serge 2459
		return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 2460
	} else {
6084 serge 2461
		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
3480 Serge 2462
		unsigned int offset;
2463
 
2464
		offset = *y * pitch + *x * cpp;
6084 serge 2465
		*y = (offset & alignment) / pitch;
2466
		*x = ((offset & alignment) - *y * pitch) / cpp;
2467
		return offset & ~alignment;
3480 Serge 2468
	}
3031 serge 2469
}
2470
 
6084 serge 2471
static int i9xx_format_to_fourcc(int format)
2327 Serge 2472
{
5060 serge 2473
	switch (format) {
2474
	case DISPPLANE_8BPP:
2475
		return DRM_FORMAT_C8;
2476
	case DISPPLANE_BGRX555:
2477
		return DRM_FORMAT_XRGB1555;
2478
	case DISPPLANE_BGRX565:
2479
		return DRM_FORMAT_RGB565;
2480
	default:
2481
	case DISPPLANE_BGRX888:
2482
		return DRM_FORMAT_XRGB8888;
2483
	case DISPPLANE_RGBX888:
2484
		return DRM_FORMAT_XBGR8888;
2485
	case DISPPLANE_BGRX101010:
2486
		return DRM_FORMAT_XRGB2101010;
2487
	case DISPPLANE_RGBX101010:
2488
		return DRM_FORMAT_XBGR2101010;
2489
	}
2490
}
2491
 
6084 serge 2492
static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
5060 serge 2493
{
6084 serge 2494
	switch (format) {
2495
	case PLANE_CTL_FORMAT_RGB_565:
2496
		return DRM_FORMAT_RGB565;
2497
	default:
2498
	case PLANE_CTL_FORMAT_XRGB_8888:
2499
		if (rgb_order) {
2500
			if (alpha)
2501
				return DRM_FORMAT_ABGR8888;
2502
			else
2503
				return DRM_FORMAT_XBGR8888;
2504
		} else {
2505
			if (alpha)
2506
				return DRM_FORMAT_ARGB8888;
2507
			else
2508
				return DRM_FORMAT_XRGB8888;
2509
		}
2510
	case PLANE_CTL_FORMAT_XRGB_2101010:
2511
		if (rgb_order)
2512
			return DRM_FORMAT_XBGR2101010;
2513
		else
2514
			return DRM_FORMAT_XRGB2101010;
2515
	}
2516
}
2517
 
2518
static bool
2519
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2520
			      struct intel_initial_plane_config *plane_config)
2521
{
5060 serge 2522
	struct drm_device *dev = crtc->base.dev;
6084 serge 2523
	struct drm_i915_private *dev_priv = to_i915(dev);
5060 serge 2524
	struct drm_i915_gem_object *obj = NULL;
2525
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
6084 serge 2526
	struct drm_framebuffer *fb = &plane_config->fb->base;
2527
	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2528
	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2529
				    PAGE_SIZE);
5060 serge 2530
 
6084 serge 2531
	size_aligned -= base_aligned;
2532
 
5060 serge 2533
	if (plane_config->size == 0)
2534
		return false;
2535
 
6084 serge 2536
	/* If the FB is too big, just don't use it since fbdev is not very
2537
	 * important and we should probably use that space with FBC or other
2538
	 * features. */
2539
	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2540
		return false;
2541
 
2542
	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2543
							     base_aligned,
2544
							     base_aligned,
2545
							     size_aligned);
5060 serge 2546
	if (!obj)
2547
		return false;
2548
 
6084 serge 2549
	obj->tiling_mode = plane_config->tiling;
2550
	if (obj->tiling_mode == I915_TILING_X)
2551
		obj->stride = fb->pitches[0];
5060 serge 2552
 
6084 serge 2553
	mode_cmd.pixel_format = fb->pixel_format;
2554
	mode_cmd.width = fb->width;
2555
	mode_cmd.height = fb->height;
2556
	mode_cmd.pitches[0] = fb->pitches[0];
2557
	mode_cmd.modifier[0] = fb->modifier[0];
2558
	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
5060 serge 2559
 
2560
	mutex_lock(&dev->struct_mutex);
6084 serge 2561
	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
5060 serge 2562
				   &mode_cmd, obj)) {
2563
		DRM_DEBUG_KMS("intel fb init failed\n");
2564
		goto out_unref_obj;
2565
	}
2566
	mutex_unlock(&dev->struct_mutex);
2567
 
6084 serge 2568
	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
5060 serge 2569
	return true;
2570
 
2571
out_unref_obj:
2572
	drm_gem_object_unreference(&obj->base);
2573
	mutex_unlock(&dev->struct_mutex);
2574
	return false;
2575
}
2576
 
6084 serge 2577
/* Update plane->state->fb to match plane->fb after driver-internal updates */
2578
static void
2579
update_state_fb(struct drm_plane *plane)
5060 serge 2580
{
6084 serge 2581
	if (plane->fb == plane->state->fb)
2582
		return;
2583
 
2584
	if (plane->state->fb)
2585
		drm_framebuffer_unreference(plane->state->fb);
2586
	plane->state->fb = plane->fb;
2587
	if (plane->state->fb)
2588
		drm_framebuffer_reference(plane->state->fb);
2589
}
2590
 
2591
static void
2592
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2593
			     struct intel_initial_plane_config *plane_config)
2594
{
5060 serge 2595
	struct drm_device *dev = intel_crtc->base.dev;
5354 serge 2596
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2597
	struct drm_crtc *c;
2598
	struct intel_crtc *i;
2599
	struct drm_i915_gem_object *obj;
6084 serge 2600
	struct drm_plane *primary = intel_crtc->base.primary;
2601
	struct drm_plane_state *plane_state = primary->state;
2602
	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2603
	struct intel_plane *intel_plane = to_intel_plane(primary);
2604
	struct drm_framebuffer *fb;
5060 serge 2605
 
6084 serge 2606
	if (!plane_config->fb)
5060 serge 2607
		return;
2608
 
6084 serge 2609
	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2610
		fb = &plane_config->fb->base;
2611
		goto valid_fb;
2612
	}
5060 serge 2613
 
6084 serge 2614
	kfree(plane_config->fb);
5060 serge 2615
 
2616
	/*
2617
	 * Failed to alloc the obj, check to see if we should share
2618
	 * an fb with another CRTC instead
2619
	 */
2620
	for_each_crtc(dev, c) {
2621
		i = to_intel_crtc(c);
2622
 
2623
		if (c == &intel_crtc->base)
2624
			continue;
2625
 
2626
		if (!i->active)
2627
			continue;
2628
 
6084 serge 2629
		fb = c->primary->fb;
2630
		if (!fb)
5060 serge 2631
			continue;
2632
 
6084 serge 2633
		obj = intel_fb_obj(fb);
5060 serge 2634
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
6084 serge 2635
			drm_framebuffer_reference(fb);
2636
			goto valid_fb;
5060 serge 2637
		}
2638
	}
6084 serge 2639
 
2640
	/*
2641
	 * We've failed to reconstruct the BIOS FB.  Current display state
2642
	 * indicates that the primary plane is visible, but has a NULL FB,
2643
	 * which will lead to problems later if we don't fix it up.  The
2644
	 * simplest solution is to just disable the primary plane now and
2645
	 * pretend the BIOS never had it enabled.
2646
	 */
2647
	to_intel_plane_state(plane_state)->visible = false;
2648
	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2649
	intel_pre_disable_primary(&intel_crtc->base);
2650
	intel_plane->disable_plane(primary, &intel_crtc->base);
2651
 
2652
	return;
2653
 
2654
valid_fb:
2655
	plane_state->src_x = 0;
2656
	plane_state->src_y = 0;
2657
	plane_state->src_w = fb->width << 16;
2658
	plane_state->src_h = fb->height << 16;
2659
 
2660
	plane_state->crtc_x = 0;
2661
	plane_state->crtc_y = 0;
2662
	plane_state->crtc_w = fb->width;
2663
	plane_state->crtc_h = fb->height;
2664
 
2665
	obj = intel_fb_obj(fb);
2666
	if (obj->tiling_mode != I915_TILING_NONE)
2667
		dev_priv->preserve_bios_swizzle = true;
2668
 
2669
	drm_framebuffer_reference(fb);
2670
	primary->fb = primary->state->fb = fb;
2671
	primary->crtc = primary->state->crtc = &intel_crtc->base;
2672
	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2673
	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
5060 serge 2674
}
2675
 
2676
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
6084 serge 2677
				      struct drm_framebuffer *fb,
2678
				      int x, int y)
5060 serge 2679
{
6084 serge 2680
	struct drm_device *dev = crtc->dev;
2681
	struct drm_i915_private *dev_priv = dev->dev_private;
2682
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2683
	struct drm_plane *primary = crtc->primary;
2684
	bool visible = to_intel_plane_state(primary->state)->visible;
5354 serge 2685
	struct drm_i915_gem_object *obj;
6084 serge 2686
	int plane = intel_crtc->plane;
3031 serge 2687
	unsigned long linear_offset;
6084 serge 2688
	u32 dspcntr;
6937 serge 2689
	i915_reg_t reg = DSPCNTR(plane);
5354 serge 2690
	int pixel_size;
2327 Serge 2691
 
6084 serge 2692
	if (!visible || !fb) {
5354 serge 2693
		I915_WRITE(reg, 0);
2694
		if (INTEL_INFO(dev)->gen >= 4)
2695
			I915_WRITE(DSPSURF(plane), 0);
2696
		else
2697
			I915_WRITE(DSPADDR(plane), 0);
2698
		POSTING_READ(reg);
2699
		return;
2700
	}
2701
 
2702
	obj = intel_fb_obj(fb);
2703
	if (WARN_ON(obj == NULL))
2704
		return;
2705
 
2706
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2707
 
2708
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2709
 
2710
	dspcntr |= DISPLAY_PLANE_ENABLE;
2711
 
2712
	if (INTEL_INFO(dev)->gen < 4) {
2713
		if (intel_crtc->pipe == PIPE_B)
2714
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2715
 
2716
		/* pipesrc and dspsize control the size that is scaled from,
2717
		 * which should always be the user's requested size.
2718
		 */
2719
		I915_WRITE(DSPSIZE(plane),
6084 serge 2720
			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2721
			   (intel_crtc->config->pipe_src_w - 1));
5354 serge 2722
		I915_WRITE(DSPPOS(plane), 0);
2723
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2724
		I915_WRITE(PRIMSIZE(plane),
6084 serge 2725
			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2726
			   (intel_crtc->config->pipe_src_w - 1));
5354 serge 2727
		I915_WRITE(PRIMPOS(plane), 0);
2728
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2729
	}
2730
 
3243 Serge 2731
	switch (fb->pixel_format) {
2732
	case DRM_FORMAT_C8:
6084 serge 2733
		dspcntr |= DISPPLANE_8BPP;
2734
		break;
3243 Serge 2735
	case DRM_FORMAT_XRGB1555:
2736
		dspcntr |= DISPPLANE_BGRX555;
2737
		break;
2738
	case DRM_FORMAT_RGB565:
2739
		dspcntr |= DISPPLANE_BGRX565;
2740
		break;
2741
	case DRM_FORMAT_XRGB8888:
2742
		dspcntr |= DISPPLANE_BGRX888;
2743
		break;
2744
	case DRM_FORMAT_XBGR8888:
2745
		dspcntr |= DISPPLANE_RGBX888;
2746
		break;
2747
	case DRM_FORMAT_XRGB2101010:
2748
		dspcntr |= DISPPLANE_BGRX101010;
6084 serge 2749
		break;
3243 Serge 2750
	case DRM_FORMAT_XBGR2101010:
2751
		dspcntr |= DISPPLANE_RGBX101010;
6084 serge 2752
		break;
2753
	default:
3746 Serge 2754
		BUG();
6084 serge 2755
	}
3243 Serge 2756
 
5354 serge 2757
	if (INTEL_INFO(dev)->gen >= 4 &&
2758
	    obj->tiling_mode != I915_TILING_NONE)
6084 serge 2759
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2760
 
4104 Serge 2761
	if (IS_G4X(dev))
2762
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2763
 
5354 serge 2764
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2327 Serge 2765
 
3031 serge 2766
	if (INTEL_INFO(dev)->gen >= 4) {
2767
		intel_crtc->dspaddr_offset =
6084 serge 2768
			intel_gen4_compute_page_offset(dev_priv,
2769
						       &x, &y, obj->tiling_mode,
5354 serge 2770
						       pixel_size,
6084 serge 2771
						       fb->pitches[0]);
3031 serge 2772
		linear_offset -= intel_crtc->dspaddr_offset;
2773
	} else {
2774
		intel_crtc->dspaddr_offset = linear_offset;
2775
	}
2776
 
6084 serge 2777
	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
5354 serge 2778
		dspcntr |= DISPPLANE_ROTATE_180;
2779
 
6084 serge 2780
		x += (intel_crtc->config->pipe_src_w - 1);
2781
		y += (intel_crtc->config->pipe_src_h - 1);
5354 serge 2782
 
2783
		/* Finding the last pixel of the last line of the display
2784
		data and adding to linear_offset*/
2785
		linear_offset +=
6084 serge 2786
			(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2787
			(intel_crtc->config->pipe_src_w - 1) * pixel_size;
5354 serge 2788
	}
2789
 
6084 serge 2790
	intel_crtc->adjusted_x = x;
2791
	intel_crtc->adjusted_y = y;
2792
 
5354 serge 2793
	I915_WRITE(reg, dspcntr);
2794
 
2342 Serge 2795
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
6084 serge 2796
	if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2797
		I915_WRITE(DSPSURF(plane),
6084 serge 2798
			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2799
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2800
		I915_WRITE(DSPLINOFF(plane), linear_offset);
6084 serge 2801
	} else
4104 Serge 2802
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
6084 serge 2803
	POSTING_READ(reg);
2327 Serge 2804
}
2805
 
5060 serge 2806
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
6084 serge 2807
					  struct drm_framebuffer *fb,
2808
					  int x, int y)
2327 Serge 2809
{
6084 serge 2810
	struct drm_device *dev = crtc->dev;
2811
	struct drm_i915_private *dev_priv = dev->dev_private;
2812
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2813
	struct drm_plane *primary = crtc->primary;
2814
	bool visible = to_intel_plane_state(primary->state)->visible;
5354 serge 2815
	struct drm_i915_gem_object *obj;
6084 serge 2816
	int plane = intel_crtc->plane;
3031 serge 2817
	unsigned long linear_offset;
6084 serge 2818
	u32 dspcntr;
6937 serge 2819
	i915_reg_t reg = DSPCNTR(plane);
5354 serge 2820
	int pixel_size;
2327 Serge 2821
 
6084 serge 2822
	if (!visible || !fb) {
5354 serge 2823
		I915_WRITE(reg, 0);
2824
		I915_WRITE(DSPSURF(plane), 0);
2825
		POSTING_READ(reg);
2826
		return;
2827
	}
2828
 
2829
	obj = intel_fb_obj(fb);
2830
	if (WARN_ON(obj == NULL))
2831
		return;
2832
 
2833
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2834
 
2835
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2836
 
2837
	dspcntr |= DISPLAY_PLANE_ENABLE;
2838
 
2839
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2840
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2841
 
3243 Serge 2842
	switch (fb->pixel_format) {
2843
	case DRM_FORMAT_C8:
6084 serge 2844
		dspcntr |= DISPPLANE_8BPP;
2845
		break;
3243 Serge 2846
	case DRM_FORMAT_RGB565:
2847
		dspcntr |= DISPPLANE_BGRX565;
6084 serge 2848
		break;
3243 Serge 2849
	case DRM_FORMAT_XRGB8888:
2850
		dspcntr |= DISPPLANE_BGRX888;
2851
		break;
2852
	case DRM_FORMAT_XBGR8888:
2853
		dspcntr |= DISPPLANE_RGBX888;
2854
		break;
2855
	case DRM_FORMAT_XRGB2101010:
2856
		dspcntr |= DISPPLANE_BGRX101010;
2857
		break;
2858
	case DRM_FORMAT_XBGR2101010:
2859
		dspcntr |= DISPPLANE_RGBX101010;
6084 serge 2860
		break;
2861
	default:
3746 Serge 2862
		BUG();
6084 serge 2863
	}
2327 Serge 2864
 
3480 Serge 2865
	if (obj->tiling_mode != I915_TILING_NONE)
2866
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2867
 
5354 serge 2868
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
6084 serge 2869
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2327 Serge 2870
 
5354 serge 2871
	linear_offset = y * fb->pitches[0] + x * pixel_size;
3031 serge 2872
	intel_crtc->dspaddr_offset =
6084 serge 2873
		intel_gen4_compute_page_offset(dev_priv,
2874
					       &x, &y, obj->tiling_mode,
5354 serge 2875
					       pixel_size,
6084 serge 2876
					       fb->pitches[0]);
3031 serge 2877
	linear_offset -= intel_crtc->dspaddr_offset;
6084 serge 2878
	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
5354 serge 2879
		dspcntr |= DISPPLANE_ROTATE_180;
2327 Serge 2880
 
5354 serge 2881
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
6084 serge 2882
			x += (intel_crtc->config->pipe_src_w - 1);
2883
			y += (intel_crtc->config->pipe_src_h - 1);
5354 serge 2884
 
2885
			/* Finding the last pixel of the last line of the display
2886
			data and adding to linear_offset*/
2887
			linear_offset +=
6084 serge 2888
				(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2889
				(intel_crtc->config->pipe_src_w - 1) * pixel_size;
5354 serge 2890
		}
2891
	}
2892
 
6084 serge 2893
	intel_crtc->adjusted_x = x;
2894
	intel_crtc->adjusted_y = y;
2895
 
5354 serge 2896
	I915_WRITE(reg, dspcntr);
2897
 
2342 Serge 2898
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2899
	I915_WRITE(DSPSURF(plane),
6084 serge 2900
		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2901
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2902
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2903
	} else {
6084 serge 2904
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2905
		I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2906
	}
2330 Serge 2907
	POSTING_READ(reg);
2327 Serge 2908
}
2909
 
6084 serge 2910
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2911
			      uint32_t pixel_format)
2912
{
2913
	u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2914
 
2915
	/*
2916
	 * The stride is either expressed as a multiple of 64 bytes
2917
	 * chunks for linear buffers or in number of tiles for tiled
2918
	 * buffers.
2919
	 */
2920
	switch (fb_modifier) {
2921
	case DRM_FORMAT_MOD_NONE:
2922
		return 64;
2923
	case I915_FORMAT_MOD_X_TILED:
2924
		if (INTEL_INFO(dev)->gen == 2)
2925
			return 128;
2926
		return 512;
2927
	case I915_FORMAT_MOD_Y_TILED:
2928
		/* No need to check for old gens and Y tiling since this is
2929
		 * about the display engine and those will be blocked before
2930
		 * we get here.
2931
		 */
2932
		return 128;
2933
	case I915_FORMAT_MOD_Yf_TILED:
2934
		if (bits_per_pixel == 8)
2935
			return 64;
2936
		else
2937
			return 128;
2938
	default:
2939
		MISSING_CASE(fb_modifier);
2940
		return 64;
2941
	}
2942
}
2943
 
6660 serge 2944
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
6084 serge 2945
				     struct drm_i915_gem_object *obj,
2946
				     unsigned int plane)
2947
{
6937 serge 2948
	struct i915_ggtt_view view;
6084 serge 2949
	struct i915_vma *vma;
6660 serge 2950
	u64 offset;
6084 serge 2951
 
6937 serge 2952
	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2953
				intel_plane->base.state);
6084 serge 2954
 
6937 serge 2955
	vma = i915_gem_obj_to_ggtt_view(obj, &view);
6084 serge 2956
	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
6937 serge 2957
		view.type))
6084 serge 2958
		return -1;
2959
 
6660 serge 2960
	offset = vma->node.start;
6084 serge 2961
 
2962
	if (plane == 1) {
6937 serge 2963
		offset += vma->ggtt_view.params.rotation_info.uv_start_page *
6084 serge 2964
			  PAGE_SIZE;
2965
	}
2966
 
6660 serge 2967
	WARN_ON(upper_32_bits(offset));
2968
 
2969
	return lower_32_bits(offset);
6084 serge 2970
}
2971
 
2972
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2973
{
2974
	struct drm_device *dev = intel_crtc->base.dev;
2975
	struct drm_i915_private *dev_priv = dev->dev_private;
2976
 
2977
	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2978
	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2979
	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2980
}
2981
 
2982
/*
2983
 * This function detaches (aka. unbinds) unused scalers in hardware
2984
 */
2985
static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2986
{
2987
	struct intel_crtc_scaler_state *scaler_state;
2988
	int i;
2989
 
2990
	scaler_state = &intel_crtc->config->scaler_state;
2991
 
2992
	/* loop through and disable scalers that aren't in use */
2993
	for (i = 0; i < intel_crtc->num_scalers; i++) {
2994
		if (!scaler_state->scalers[i].in_use)
2995
			skl_detach_scaler(intel_crtc, i);
2996
	}
2997
}
2998
 
2999
u32 skl_plane_ctl_format(uint32_t pixel_format)
3000
{
3001
	switch (pixel_format) {
3002
	case DRM_FORMAT_C8:
3003
		return PLANE_CTL_FORMAT_INDEXED;
3004
	case DRM_FORMAT_RGB565:
3005
		return PLANE_CTL_FORMAT_RGB_565;
3006
	case DRM_FORMAT_XBGR8888:
3007
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3008
	case DRM_FORMAT_XRGB8888:
3009
		return PLANE_CTL_FORMAT_XRGB_8888;
3010
	/*
3011
	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3012
	 * to be already pre-multiplied. We need to add a knob (or a different
3013
	 * DRM_FORMAT) for user-space to configure that.
3014
	 */
3015
	case DRM_FORMAT_ABGR8888:
3016
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3017
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3018
	case DRM_FORMAT_ARGB8888:
3019
		return PLANE_CTL_FORMAT_XRGB_8888 |
3020
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3021
	case DRM_FORMAT_XRGB2101010:
3022
		return PLANE_CTL_FORMAT_XRGB_2101010;
3023
	case DRM_FORMAT_XBGR2101010:
3024
		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3025
	case DRM_FORMAT_YUYV:
3026
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3027
	case DRM_FORMAT_YVYU:
3028
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3029
	case DRM_FORMAT_UYVY:
3030
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3031
	case DRM_FORMAT_VYUY:
3032
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3033
	default:
3034
		MISSING_CASE(pixel_format);
3035
	}
3036
 
3037
	return 0;
3038
}
3039
 
3040
u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3041
{
3042
	switch (fb_modifier) {
3043
	case DRM_FORMAT_MOD_NONE:
3044
		break;
3045
	case I915_FORMAT_MOD_X_TILED:
3046
		return PLANE_CTL_TILED_X;
3047
	case I915_FORMAT_MOD_Y_TILED:
3048
		return PLANE_CTL_TILED_Y;
3049
	case I915_FORMAT_MOD_Yf_TILED:
3050
		return PLANE_CTL_TILED_YF;
3051
	default:
3052
		MISSING_CASE(fb_modifier);
3053
	}
3054
 
3055
	return 0;
3056
}
3057
 
3058
u32 skl_plane_ctl_rotation(unsigned int rotation)
3059
{
3060
	switch (rotation) {
3061
	case BIT(DRM_ROTATE_0):
3062
		break;
3063
	/*
3064
	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3065
	 * while i915 HW rotation is clockwise, thats why this swapping.
3066
	 */
3067
	case BIT(DRM_ROTATE_90):
3068
		return PLANE_CTL_ROTATE_270;
3069
	case BIT(DRM_ROTATE_180):
3070
		return PLANE_CTL_ROTATE_180;
3071
	case BIT(DRM_ROTATE_270):
3072
		return PLANE_CTL_ROTATE_90;
3073
	default:
3074
		MISSING_CASE(rotation);
3075
	}
3076
 
3077
	return 0;
3078
}
3079
 
5354 serge 3080
static void skylake_update_primary_plane(struct drm_crtc *crtc,
3081
					 struct drm_framebuffer *fb,
3082
					 int x, int y)
3083
{
3084
	struct drm_device *dev = crtc->dev;
3085
	struct drm_i915_private *dev_priv = dev->dev_private;
3086
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 3087
	struct drm_plane *plane = crtc->primary;
3088
	bool visible = to_intel_plane_state(plane->state)->visible;
5354 serge 3089
	struct drm_i915_gem_object *obj;
3090
	int pipe = intel_crtc->pipe;
6084 serge 3091
	u32 plane_ctl, stride_div, stride;
3092
	u32 tile_height, plane_offset, plane_size;
3093
	unsigned int rotation;
3094
	int x_offset, y_offset;
6660 serge 3095
	u32 surf_addr;
6084 serge 3096
	struct intel_crtc_state *crtc_state = intel_crtc->config;
3097
	struct intel_plane_state *plane_state;
3098
	int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3099
	int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3100
	int scaler_id = -1;
5354 serge 3101
 
6084 serge 3102
	plane_state = to_intel_plane_state(plane->state);
3103
 
3104
	if (!visible || !fb) {
5354 serge 3105
		I915_WRITE(PLANE_CTL(pipe, 0), 0);
3106
		I915_WRITE(PLANE_SURF(pipe, 0), 0);
3107
		POSTING_READ(PLANE_CTL(pipe, 0));
3108
		return;
3109
	}
3110
 
3111
	plane_ctl = PLANE_CTL_ENABLE |
3112
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3113
		    PLANE_CTL_PIPE_CSC_ENABLE;
3114
 
6084 serge 3115
	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3116
	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3117
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
5354 serge 3118
 
6084 serge 3119
	rotation = plane->state->rotation;
3120
	plane_ctl |= skl_plane_ctl_rotation(rotation);
5354 serge 3121
 
6084 serge 3122
	obj = intel_fb_obj(fb);
3123
	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3124
					       fb->pixel_format);
3125
	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3126
 
3127
	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3128
 
3129
	scaler_id = plane_state->scaler_id;
3130
	src_x = plane_state->src.x1 >> 16;
3131
	src_y = plane_state->src.y1 >> 16;
3132
	src_w = drm_rect_width(&plane_state->src) >> 16;
3133
	src_h = drm_rect_height(&plane_state->src) >> 16;
3134
	dst_x = plane_state->dst.x1;
3135
	dst_y = plane_state->dst.y1;
3136
	dst_w = drm_rect_width(&plane_state->dst);
3137
	dst_h = drm_rect_height(&plane_state->dst);
3138
 
3139
	WARN_ON(x != src_x || y != src_y);
3140
 
3141
	if (intel_rotation_90_or_270(rotation)) {
3142
		/* stride = Surface height in tiles */
3143
		tile_height = intel_tile_height(dev, fb->pixel_format,
3144
						fb->modifier[0], 0);
3145
		stride = DIV_ROUND_UP(fb->height, tile_height);
3146
		x_offset = stride * tile_height - y - src_h;
3147
		y_offset = x;
3148
		plane_size = (src_w - 1) << 16 | (src_h - 1);
3149
	} else {
3150
		stride = fb->pitches[0] / stride_div;
3151
		x_offset = x;
3152
		y_offset = y;
3153
		plane_size = (src_h - 1) << 16 | (src_w - 1);
5354 serge 3154
	}
6084 serge 3155
	plane_offset = y_offset << 16 | x_offset;
5354 serge 3156
 
6084 serge 3157
	intel_crtc->adjusted_x = x_offset;
3158
	intel_crtc->adjusted_y = y_offset;
5354 serge 3159
 
3160
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
6084 serge 3161
	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3162
	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3163
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
5354 serge 3164
 
6084 serge 3165
	if (scaler_id >= 0) {
3166
		uint32_t ps_ctrl = 0;
5354 serge 3167
 
6084 serge 3168
		WARN_ON(!dst_w || !dst_h);
3169
		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3170
			crtc_state->scaler_state.scalers[scaler_id].mode;
3171
		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3172
		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3173
		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3174
		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3175
		I915_WRITE(PLANE_POS(pipe, 0), 0);
3176
	} else {
3177
		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3178
	}
5354 serge 3179
 
6084 serge 3180
	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3181
 
5354 serge 3182
	POSTING_READ(PLANE_SURF(pipe, 0));
3183
}
3184
 
2327 Serge 3185
/* Assume fb object is pinned & idle & fenced and just update base pointers */
3186
static int
3187
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3188
			   int x, int y, enum mode_set_atomic state)
3189
{
3190
	struct drm_device *dev = crtc->dev;
3191
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3192
 
6937 serge 3193
	if (dev_priv->fbc.deactivate)
3194
		dev_priv->fbc.deactivate(dev_priv);
3031 serge 3195
 
5060 serge 3196
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
3197
 
3198
	return 0;
3031 serge 3199
}
3200
 
5354 serge 3201
static void intel_complete_page_flips(struct drm_device *dev)
4104 Serge 3202
{
3203
	struct drm_crtc *crtc;
3204
 
5060 serge 3205
	for_each_crtc(dev, crtc) {
4104 Serge 3206
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3207
		enum plane plane = intel_crtc->plane;
3208
 
3209
		intel_prepare_page_flip(dev, plane);
3210
		intel_finish_page_flip_plane(dev, plane);
3211
	}
5354 serge 3212
}
4104 Serge 3213
 
5354 serge 3214
static void intel_update_primary_planes(struct drm_device *dev)
3215
{
3216
	struct drm_crtc *crtc;
3217
 
5060 serge 3218
	for_each_crtc(dev, crtc) {
6084 serge 3219
		struct intel_plane *plane = to_intel_plane(crtc->primary);
3220
		struct intel_plane_state *plane_state;
4104 Serge 3221
 
6084 serge 3222
		drm_modeset_lock_crtc(crtc, &plane->base);
3223
		plane_state = to_intel_plane_state(plane->base.state);
3224
 
6937 serge 3225
		if (crtc->state->active && plane_state->base.fb)
6084 serge 3226
			plane->commit_plane(&plane->base, plane_state);
3227
 
3228
		drm_modeset_unlock_crtc(crtc);
4104 Serge 3229
	}
3230
}
3231
 
5354 serge 3232
void intel_prepare_reset(struct drm_device *dev)
3233
{
3234
	/* no reset support for gen2 */
3235
	if (IS_GEN2(dev))
3236
		return;
3237
 
3238
	/* reset doesn't touch the display */
3239
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3240
		return;
3241
 
3242
	drm_modeset_lock_all(dev);
3243
	/*
3244
	 * Disabling the crtcs gracefully seems nicer. Also the
3245
	 * g33 docs say we should at least disable all the planes.
3246
	 */
6084 serge 3247
	intel_display_suspend(dev);
5354 serge 3248
}
3249
 
3250
void intel_finish_reset(struct drm_device *dev)
3251
{
3252
	struct drm_i915_private *dev_priv = to_i915(dev);
3253
 
3254
	/*
3255
	 * Flips in the rings will be nuked by the reset,
3256
	 * so complete all pending flips so that user space
3257
	 * will get its events and not get stuck.
3258
	 */
3259
	intel_complete_page_flips(dev);
3260
 
3261
	/* no reset support for gen2 */
3262
	if (IS_GEN2(dev))
3263
		return;
3264
 
3265
	/* reset doesn't touch the display */
3266
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3267
		/*
3268
		 * Flips in the rings have been nuked by the reset,
3269
		 * so update the base address of all primary
3270
		 * planes to the the last fb to make sure we're
3271
		 * showing the correct fb after a reset.
6084 serge 3272
		 *
3273
		 * FIXME: Atomic will make this obsolete since we won't schedule
3274
		 * CS-based flips (which might get lost in gpu resets) any more.
5354 serge 3275
		 */
3276
		intel_update_primary_planes(dev);
3277
		return;
3278
	}
3279
 
3280
	/*
3281
	 * The display has been reset as well,
3282
	 * so need a full re-initialization.
3283
	 */
3284
	intel_runtime_pm_disable_interrupts(dev_priv);
3285
	intel_runtime_pm_enable_interrupts(dev_priv);
3286
 
3287
	intel_modeset_init_hw(dev);
3288
 
3289
	spin_lock_irq(&dev_priv->irq_lock);
3290
	if (dev_priv->display.hpd_irq_setup)
3291
		dev_priv->display.hpd_irq_setup(dev);
3292
	spin_unlock_irq(&dev_priv->irq_lock);
3293
 
6084 serge 3294
	intel_display_resume(dev);
5354 serge 3295
 
6296 serge 3296
	intel_hpd_init(dev_priv);
5354 serge 3297
 
3298
	drm_modeset_unlock_all(dev);
3299
}
3300
 
5060 serge 3301
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
4104 Serge 3302
{
3303
	struct drm_device *dev = crtc->dev;
5060 serge 3304
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3305
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 3306
	bool pending;
4104 Serge 3307
 
5060 serge 3308
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3309
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3310
		return false;
4104 Serge 3311
 
5354 serge 3312
	spin_lock_irq(&dev->event_lock);
5060 serge 3313
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
5354 serge 3314
	spin_unlock_irq(&dev->event_lock);
4104 Serge 3315
 
5060 serge 3316
	return pending;
4104 Serge 3317
}
2327 Serge 3318
 
6084 serge 3319
static void intel_update_pipe_config(struct intel_crtc *crtc,
3320
				     struct intel_crtc_state *old_crtc_state)
5354 serge 3321
{
3322
	struct drm_device *dev = crtc->base.dev;
3323
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3324
	struct intel_crtc_state *pipe_config =
3325
		to_intel_crtc_state(crtc->base.state);
5354 serge 3326
 
6084 serge 3327
	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3328
	crtc->base.mode = crtc->base.state->mode;
5354 serge 3329
 
6084 serge 3330
	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3331
		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3332
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3333
 
3334
	if (HAS_DDI(dev))
3335
		intel_set_pipe_csc(&crtc->base);
3336
 
5354 serge 3337
	/*
3338
	 * Update pipe size and adjust fitter if needed: the reason for this is
3339
	 * that in compute_mode_changes we check the native mode (not the pfit
3340
	 * mode) to see if we can flip rather than do a full mode set. In the
3341
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3342
	 * pfit state, we'll end up with a big fb scanned out into the wrong
3343
	 * sized surface.
3344
	 */
3345
 
3346
	I915_WRITE(PIPESRC(crtc->pipe),
6084 serge 3347
		   ((pipe_config->pipe_src_w - 1) << 16) |
3348
		   (pipe_config->pipe_src_h - 1));
5354 serge 3349
 
6084 serge 3350
	/* on skylake this is done by detaching scalers */
3351
	if (INTEL_INFO(dev)->gen >= 9) {
3352
		skl_detach_scalers(crtc);
2327 Serge 3353
 
6084 serge 3354
		if (pipe_config->pch_pfit.enabled)
3355
			skylake_pfit_enable(crtc);
3356
	} else if (HAS_PCH_SPLIT(dev)) {
3357
		if (pipe_config->pch_pfit.enabled)
3358
			ironlake_pfit_enable(crtc);
3359
		else if (old_crtc_state->pch_pfit.enabled)
3360
			ironlake_pfit_disable(crtc, true);
2327 Serge 3361
	}
3362
}
3363
 
3364
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3365
{
3366
	struct drm_device *dev = crtc->dev;
3367
	struct drm_i915_private *dev_priv = dev->dev_private;
3368
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3369
	int pipe = intel_crtc->pipe;
6937 serge 3370
	i915_reg_t reg;
3371
	u32 temp;
2327 Serge 3372
 
3373
	/* enable normal train */
3374
	reg = FDI_TX_CTL(pipe);
3375
	temp = I915_READ(reg);
3376
	if (IS_IVYBRIDGE(dev)) {
3377
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3378
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3379
	} else {
3380
		temp &= ~FDI_LINK_TRAIN_NONE;
3381
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3382
	}
3383
	I915_WRITE(reg, temp);
3384
 
3385
	reg = FDI_RX_CTL(pipe);
3386
	temp = I915_READ(reg);
3387
	if (HAS_PCH_CPT(dev)) {
3388
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3389
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3390
	} else {
3391
		temp &= ~FDI_LINK_TRAIN_NONE;
3392
		temp |= FDI_LINK_TRAIN_NONE;
3393
	}
3394
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3395
 
3396
	/* wait one idle pattern time */
3397
	POSTING_READ(reg);
3398
	udelay(1000);
3399
 
3400
	/* IVB wants error correction enabled */
3401
	if (IS_IVYBRIDGE(dev))
3402
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3403
			   FDI_FE_ERRC_ENABLE);
3404
}
3405
 
3406
/* The FDI link training functions for ILK/Ibexpeak. */
3407
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3408
{
6084 serge 3409
	struct drm_device *dev = crtc->dev;
3410
	struct drm_i915_private *dev_priv = dev->dev_private;
3411
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3412
	int pipe = intel_crtc->pipe;
6937 serge 3413
	i915_reg_t reg;
3414
	u32 temp, tries;
2327 Serge 3415
 
5060 serge 3416
	/* FDI needs bits from pipe first */
6084 serge 3417
	assert_pipe_enabled(dev_priv, pipe);
2327 Serge 3418
 
6084 serge 3419
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3420
	   for train result */
3421
	reg = FDI_RX_IMR(pipe);
3422
	temp = I915_READ(reg);
3423
	temp &= ~FDI_RX_SYMBOL_LOCK;
3424
	temp &= ~FDI_RX_BIT_LOCK;
3425
	I915_WRITE(reg, temp);
3426
	I915_READ(reg);
3427
	udelay(150);
2327 Serge 3428
 
6084 serge 3429
	/* enable CPU FDI TX and PCH FDI RX */
3430
	reg = FDI_TX_CTL(pipe);
3431
	temp = I915_READ(reg);
4104 Serge 3432
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
6084 serge 3433
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3434
	temp &= ~FDI_LINK_TRAIN_NONE;
3435
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3436
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3437
 
6084 serge 3438
	reg = FDI_RX_CTL(pipe);
3439
	temp = I915_READ(reg);
3440
	temp &= ~FDI_LINK_TRAIN_NONE;
3441
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3442
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3443
 
6084 serge 3444
	POSTING_READ(reg);
3445
	udelay(150);
2327 Serge 3446
 
6084 serge 3447
	/* Ironlake workaround, enable clock pointer after FDI enable*/
3448
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3449
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3450
		   FDI_RX_PHASE_SYNC_POINTER_EN);
2327 Serge 3451
 
6084 serge 3452
	reg = FDI_RX_IIR(pipe);
3453
	for (tries = 0; tries < 5; tries++) {
3454
		temp = I915_READ(reg);
3455
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3456
 
6084 serge 3457
		if ((temp & FDI_RX_BIT_LOCK)) {
3458
			DRM_DEBUG_KMS("FDI train 1 done.\n");
3459
			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3460
			break;
3461
		}
3462
	}
3463
	if (tries == 5)
3464
		DRM_ERROR("FDI train 1 fail!\n");
2327 Serge 3465
 
6084 serge 3466
	/* Train 2 */
3467
	reg = FDI_TX_CTL(pipe);
3468
	temp = I915_READ(reg);
3469
	temp &= ~FDI_LINK_TRAIN_NONE;
3470
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3471
	I915_WRITE(reg, temp);
2327 Serge 3472
 
6084 serge 3473
	reg = FDI_RX_CTL(pipe);
3474
	temp = I915_READ(reg);
3475
	temp &= ~FDI_LINK_TRAIN_NONE;
3476
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3477
	I915_WRITE(reg, temp);
2327 Serge 3478
 
6084 serge 3479
	POSTING_READ(reg);
3480
	udelay(150);
2327 Serge 3481
 
6084 serge 3482
	reg = FDI_RX_IIR(pipe);
3483
	for (tries = 0; tries < 5; tries++) {
3484
		temp = I915_READ(reg);
3485
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3486
 
6084 serge 3487
		if (temp & FDI_RX_SYMBOL_LOCK) {
3488
			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3489
			DRM_DEBUG_KMS("FDI train 2 done.\n");
3490
			break;
3491
		}
3492
	}
3493
	if (tries == 5)
3494
		DRM_ERROR("FDI train 2 fail!\n");
2327 Serge 3495
 
6084 serge 3496
	DRM_DEBUG_KMS("FDI train done\n");
2327 Serge 3497
 
3498
}
3499
 
2342 Serge 3500
static const int snb_b_fdi_train_param[] = {
6084 serge 3501
	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3502
	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3503
	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3504
	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2327 Serge 3505
};
3506
 
3507
/* The FDI link training functions for SNB/Cougarpoint. */
3508
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3509
{
6084 serge 3510
	struct drm_device *dev = crtc->dev;
3511
	struct drm_i915_private *dev_priv = dev->dev_private;
3512
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3513
	int pipe = intel_crtc->pipe;
6937 serge 3514
	i915_reg_t reg;
3515
	u32 temp, i, retry;
2327 Serge 3516
 
6084 serge 3517
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3518
	   for train result */
3519
	reg = FDI_RX_IMR(pipe);
3520
	temp = I915_READ(reg);
3521
	temp &= ~FDI_RX_SYMBOL_LOCK;
3522
	temp &= ~FDI_RX_BIT_LOCK;
3523
	I915_WRITE(reg, temp);
2327 Serge 3524
 
6084 serge 3525
	POSTING_READ(reg);
3526
	udelay(150);
2327 Serge 3527
 
6084 serge 3528
	/* enable CPU FDI TX and PCH FDI RX */
3529
	reg = FDI_TX_CTL(pipe);
3530
	temp = I915_READ(reg);
4104 Serge 3531
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
6084 serge 3532
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3533
	temp &= ~FDI_LINK_TRAIN_NONE;
3534
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3535
	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3536
	/* SNB-B */
3537
	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3538
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3539
 
3243 Serge 3540
	I915_WRITE(FDI_RX_MISC(pipe),
3541
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3542
 
6084 serge 3543
	reg = FDI_RX_CTL(pipe);
3544
	temp = I915_READ(reg);
3545
	if (HAS_PCH_CPT(dev)) {
3546
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3547
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3548
	} else {
3549
		temp &= ~FDI_LINK_TRAIN_NONE;
3550
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3551
	}
3552
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3553
 
6084 serge 3554
	POSTING_READ(reg);
3555
	udelay(150);
2327 Serge 3556
 
2342 Serge 3557
	for (i = 0; i < 4; i++) {
6084 serge 3558
		reg = FDI_TX_CTL(pipe);
3559
		temp = I915_READ(reg);
3560
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3561
		temp |= snb_b_fdi_train_param[i];
3562
		I915_WRITE(reg, temp);
2327 Serge 3563
 
6084 serge 3564
		POSTING_READ(reg);
3565
		udelay(500);
2327 Serge 3566
 
3031 serge 3567
		for (retry = 0; retry < 5; retry++) {
6084 serge 3568
			reg = FDI_RX_IIR(pipe);
3569
			temp = I915_READ(reg);
3570
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3571
			if (temp & FDI_RX_BIT_LOCK) {
3572
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3573
				DRM_DEBUG_KMS("FDI train 1 done.\n");
3574
				break;
3575
			}
3031 serge 3576
			udelay(50);
3577
		}
3578
		if (retry < 5)
3579
			break;
6084 serge 3580
	}
3581
	if (i == 4)
3582
		DRM_ERROR("FDI train 1 fail!\n");
2327 Serge 3583
 
6084 serge 3584
	/* Train 2 */
3585
	reg = FDI_TX_CTL(pipe);
3586
	temp = I915_READ(reg);
3587
	temp &= ~FDI_LINK_TRAIN_NONE;
3588
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3589
	if (IS_GEN6(dev)) {
3590
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3591
		/* SNB-B */
3592
		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3593
	}
3594
	I915_WRITE(reg, temp);
2327 Serge 3595
 
6084 serge 3596
	reg = FDI_RX_CTL(pipe);
3597
	temp = I915_READ(reg);
3598
	if (HAS_PCH_CPT(dev)) {
3599
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3600
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3601
	} else {
3602
		temp &= ~FDI_LINK_TRAIN_NONE;
3603
		temp |= FDI_LINK_TRAIN_PATTERN_2;
3604
	}
3605
	I915_WRITE(reg, temp);
2327 Serge 3606
 
6084 serge 3607
	POSTING_READ(reg);
3608
	udelay(150);
2327 Serge 3609
 
2342 Serge 3610
	for (i = 0; i < 4; i++) {
6084 serge 3611
		reg = FDI_TX_CTL(pipe);
3612
		temp = I915_READ(reg);
3613
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3614
		temp |= snb_b_fdi_train_param[i];
3615
		I915_WRITE(reg, temp);
2327 Serge 3616
 
6084 serge 3617
		POSTING_READ(reg);
3618
		udelay(500);
2327 Serge 3619
 
3031 serge 3620
		for (retry = 0; retry < 5; retry++) {
6084 serge 3621
			reg = FDI_RX_IIR(pipe);
3622
			temp = I915_READ(reg);
3623
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3624
			if (temp & FDI_RX_SYMBOL_LOCK) {
3625
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3626
				DRM_DEBUG_KMS("FDI train 2 done.\n");
3627
				break;
3628
			}
3031 serge 3629
			udelay(50);
3630
		}
3631
		if (retry < 5)
3632
			break;
6084 serge 3633
	}
3634
	if (i == 4)
3635
		DRM_ERROR("FDI train 2 fail!\n");
2327 Serge 3636
 
6084 serge 3637
	DRM_DEBUG_KMS("FDI train done.\n");
2327 Serge 3638
}
3639
 
3640
/* Manual link training for Ivy Bridge A0 parts */
3641
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3642
{
6084 serge 3643
	struct drm_device *dev = crtc->dev;
3644
	struct drm_i915_private *dev_priv = dev->dev_private;
3645
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3646
	int pipe = intel_crtc->pipe;
6937 serge 3647
	i915_reg_t reg;
3648
	u32 temp, i, j;
2327 Serge 3649
 
6084 serge 3650
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3651
	   for train result */
3652
	reg = FDI_RX_IMR(pipe);
3653
	temp = I915_READ(reg);
3654
	temp &= ~FDI_RX_SYMBOL_LOCK;
3655
	temp &= ~FDI_RX_BIT_LOCK;
3656
	I915_WRITE(reg, temp);
2327 Serge 3657
 
6084 serge 3658
	POSTING_READ(reg);
3659
	udelay(150);
2327 Serge 3660
 
3243 Serge 3661
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3662
		      I915_READ(FDI_RX_IIR(pipe)));
3663
 
4104 Serge 3664
	/* Try each vswing and preemphasis setting twice before moving on */
3665
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3666
		/* disable first in case we need to retry */
3667
		reg = FDI_TX_CTL(pipe);
3668
		temp = I915_READ(reg);
3669
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3670
		temp &= ~FDI_TX_ENABLE;
3671
		I915_WRITE(reg, temp);
3672
 
3673
		reg = FDI_RX_CTL(pipe);
3674
		temp = I915_READ(reg);
3675
		temp &= ~FDI_LINK_TRAIN_AUTO;
3676
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3677
		temp &= ~FDI_RX_ENABLE;
3678
		I915_WRITE(reg, temp);
3679
 
6084 serge 3680
		/* enable CPU FDI TX and PCH FDI RX */
3681
		reg = FDI_TX_CTL(pipe);
3682
		temp = I915_READ(reg);
3683
		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3684
		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3685
		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3686
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 3687
		temp |= snb_b_fdi_train_param[j/2];
6084 serge 3688
		temp |= FDI_COMPOSITE_SYNC;
3689
		I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3690
 
6084 serge 3691
		I915_WRITE(FDI_RX_MISC(pipe),
3692
			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3243 Serge 3693
 
6084 serge 3694
		reg = FDI_RX_CTL(pipe);
3695
		temp = I915_READ(reg);
3696
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3697
		temp |= FDI_COMPOSITE_SYNC;
3698
		I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3699
 
6084 serge 3700
		POSTING_READ(reg);
4104 Serge 3701
		udelay(1); /* should be 0.5us */
2327 Serge 3702
 
6084 serge 3703
		for (i = 0; i < 4; i++) {
3704
			reg = FDI_RX_IIR(pipe);
3705
			temp = I915_READ(reg);
3706
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3707
 
6084 serge 3708
			if (temp & FDI_RX_BIT_LOCK ||
3709
			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3710
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 3711
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3712
					      i);
6084 serge 3713
				break;
3714
			}
4104 Serge 3715
			udelay(1); /* should be 0.5us */
3716
		}
3717
		if (i == 4) {
3718
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3719
			continue;
6084 serge 3720
		}
2327 Serge 3721
 
6084 serge 3722
		/* Train 2 */
3723
		reg = FDI_TX_CTL(pipe);
3724
		temp = I915_READ(reg);
3725
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3726
		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3727
		I915_WRITE(reg, temp);
2327 Serge 3728
 
6084 serge 3729
		reg = FDI_RX_CTL(pipe);
3730
		temp = I915_READ(reg);
3731
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3732
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3733
		I915_WRITE(reg, temp);
2327 Serge 3734
 
6084 serge 3735
		POSTING_READ(reg);
4104 Serge 3736
		udelay(2); /* should be 1.5us */
2327 Serge 3737
 
6084 serge 3738
		for (i = 0; i < 4; i++) {
3739
			reg = FDI_RX_IIR(pipe);
3740
			temp = I915_READ(reg);
3741
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3742
 
4104 Serge 3743
			if (temp & FDI_RX_SYMBOL_LOCK ||
3744
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
6084 serge 3745
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 3746
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3747
					      i);
3748
				goto train_done;
6084 serge 3749
			}
4104 Serge 3750
			udelay(2); /* should be 1.5us */
6084 serge 3751
		}
3752
		if (i == 4)
4104 Serge 3753
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3754
	}
2327 Serge 3755
 
4104 Serge 3756
train_done:
6084 serge 3757
	DRM_DEBUG_KMS("FDI train done.\n");
2327 Serge 3758
}
3759
 
3031 serge 3760
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 3761
{
3031 serge 3762
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 3763
	struct drm_i915_private *dev_priv = dev->dev_private;
3764
	int pipe = intel_crtc->pipe;
6937 serge 3765
	i915_reg_t reg;
3766
	u32 temp;
2327 Serge 3767
 
3768
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3769
	reg = FDI_RX_CTL(pipe);
3770
	temp = I915_READ(reg);
4104 Serge 3771
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6084 serge 3772
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3480 Serge 3773
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3774
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3775
 
3776
	POSTING_READ(reg);
3777
	udelay(200);
3778
 
3779
	/* Switch from Rawclk to PCDclk */
3780
	temp = I915_READ(reg);
3781
	I915_WRITE(reg, temp | FDI_PCDCLK);
3782
 
3783
	POSTING_READ(reg);
3784
	udelay(200);
3785
 
3786
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3787
	reg = FDI_TX_CTL(pipe);
3788
	temp = I915_READ(reg);
3789
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3790
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3791
 
3792
		POSTING_READ(reg);
3793
		udelay(100);
3794
	}
3795
}
3796
 
3031 serge 3797
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3798
{
3799
	struct drm_device *dev = intel_crtc->base.dev;
3800
	struct drm_i915_private *dev_priv = dev->dev_private;
3801
	int pipe = intel_crtc->pipe;
6937 serge 3802
	i915_reg_t reg;
3803
	u32 temp;
3031 serge 3804
 
3805
	/* Switch from PCDclk to Rawclk */
3806
	reg = FDI_RX_CTL(pipe);
3807
	temp = I915_READ(reg);
3808
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3809
 
3810
	/* Disable CPU FDI TX PLL */
3811
	reg = FDI_TX_CTL(pipe);
3812
	temp = I915_READ(reg);
3813
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3814
 
3815
	POSTING_READ(reg);
3816
	udelay(100);
3817
 
3818
	reg = FDI_RX_CTL(pipe);
3819
	temp = I915_READ(reg);
3820
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3821
 
3822
	/* Wait for the clocks to turn off. */
3823
	POSTING_READ(reg);
3824
	udelay(100);
3825
}
3826
 
2327 Serge 3827
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3828
{
3829
	struct drm_device *dev = crtc->dev;
3830
	struct drm_i915_private *dev_priv = dev->dev_private;
3831
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3832
	int pipe = intel_crtc->pipe;
6937 serge 3833
	i915_reg_t reg;
3834
	u32 temp;
2327 Serge 3835
 
3836
	/* disable CPU FDI tx and PCH FDI rx */
3837
	reg = FDI_TX_CTL(pipe);
3838
	temp = I915_READ(reg);
3839
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3840
	POSTING_READ(reg);
3841
 
3842
	reg = FDI_RX_CTL(pipe);
3843
	temp = I915_READ(reg);
3844
	temp &= ~(0x7 << 16);
3480 Serge 3845
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3846
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3847
 
3848
	POSTING_READ(reg);
3849
	udelay(100);
3850
 
3851
	/* Ironlake workaround, disable clock pointer after downing FDI */
5060 serge 3852
	if (HAS_PCH_IBX(dev))
2327 Serge 3853
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3854
 
3855
	/* still set train pattern 1 */
3856
	reg = FDI_TX_CTL(pipe);
3857
	temp = I915_READ(reg);
3858
	temp &= ~FDI_LINK_TRAIN_NONE;
3859
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3860
	I915_WRITE(reg, temp);
3861
 
3862
	reg = FDI_RX_CTL(pipe);
3863
	temp = I915_READ(reg);
3864
	if (HAS_PCH_CPT(dev)) {
3865
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3866
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3867
	} else {
3868
		temp &= ~FDI_LINK_TRAIN_NONE;
3869
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3870
	}
3871
	/* BPC in FDI rx is consistent with that in PIPECONF */
3872
	temp &= ~(0x07 << 16);
3480 Serge 3873
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3874
	I915_WRITE(reg, temp);
3875
 
3876
	POSTING_READ(reg);
3877
	udelay(100);
3878
}
3879
 
5060 serge 3880
bool intel_has_pending_fb_unpin(struct drm_device *dev)
2327 Serge 3881
{
5060 serge 3882
	struct intel_crtc *crtc;
2327 Serge 3883
 
5060 serge 3884
	/* Note that we don't need to be called with mode_config.lock here
3885
	 * as our list of CRTC objects is static for the lifetime of the
3886
	 * device and so cannot disappear as we iterate. Similarly, we can
3887
	 * happily treat the predicates as racy, atomic checks as userspace
3888
	 * cannot claim and pin a new fb without at least acquring the
3889
	 * struct_mutex and so serialising with us.
3890
	 */
3891
	for_each_intel_crtc(dev, crtc) {
3892
		if (atomic_read(&crtc->unpin_work_count) == 0)
3893
			continue;
2327 Serge 3894
 
5060 serge 3895
		if (crtc->unpin_work)
3896
			intel_wait_for_vblank(dev, crtc->pipe);
3031 serge 3897
 
5060 serge 3898
		return true;
3899
	}
3900
 
3901
	return false;
2327 Serge 3902
}
3903
 
6283 serge 3904
static void page_flip_completed(struct intel_crtc *intel_crtc)
3905
{
3906
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3907
	struct intel_unpin_work *work = intel_crtc->unpin_work;
3908
 
3909
	/* ensure that the unpin work is consistent wrt ->pending. */
3910
	smp_rmb();
3911
	intel_crtc->unpin_work = NULL;
3912
 
3913
	if (work->event)
3914
		drm_send_vblank_event(intel_crtc->base.dev,
3915
				      intel_crtc->pipe,
3916
				      work->event);
3917
 
3918
	drm_crtc_vblank_put(&intel_crtc->base);
3919
 
6320 serge 3920
	wake_up_all(&dev_priv->pending_flip_queue);
6937 serge 3921
	queue_work(dev_priv->wq, &work->work);
3922
 
6320 serge 3923
	trace_i915_flip_complete(intel_crtc->plane,
3924
				 work->pending_flip_obj);
6283 serge 3925
}
6320 serge 3926
 
6937 serge 3927
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2327 Serge 3928
{
3031 serge 3929
	struct drm_device *dev = crtc->dev;
3930
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 3931
	long ret;
2327 Serge 3932
 
3480 Serge 3933
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
6937 serge 3934
 
3935
	ret = wait_event_interruptible_timeout(
3936
					dev_priv->pending_flip_queue,
3937
					!intel_crtc_has_pending_flip(crtc),
3938
					60*HZ);
3939
 
3940
	if (ret < 0)
3941
		return ret;
3942
 
3943
	if (ret == 0) {
5354 serge 3944
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3480 Serge 3945
 
5354 serge 3946
		spin_lock_irq(&dev->event_lock);
3947
		if (intel_crtc->unpin_work) {
3948
			WARN_ONCE(1, "Removing stuck page flip\n");
3949
			page_flip_completed(intel_crtc);
3950
		}
3951
		spin_unlock_irq(&dev->event_lock);
3952
	}
3031 serge 3953
 
6937 serge 3954
	return 0;
5354 serge 3955
	}
6937 serge 3956
 
3957
static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3958
{
3959
	u32 temp;
3960
 
3961
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3962
 
3963
	mutex_lock(&dev_priv->sb_lock);
3964
 
3965
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3966
	temp |= SBI_SSCCTL_DISABLE;
3967
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3968
 
3969
	mutex_unlock(&dev_priv->sb_lock);
2327 Serge 3970
}
3971
 
3031 serge 3972
/* Program iCLKIP clock to the desired frequency */
3973
static void lpt_program_iclkip(struct drm_crtc *crtc)
3974
{
3975
	struct drm_device *dev = crtc->dev;
3976
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3977
	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3031 serge 3978
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3979
	u32 temp;
3980
 
6937 serge 3981
	lpt_disable_iclkip(dev_priv);
3480 Serge 3982
 
3031 serge 3983
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 3984
	if (clock == 20000) {
3031 serge 3985
		auxdiv = 1;
3986
		divsel = 0x41;
3987
		phaseinc = 0x20;
3988
	} else {
3989
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 3990
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3991
		 * divisors, it is necessary to divide one by another, so we
3031 serge 3992
		 * convert the virtual clock precision to KHz here for higher
3993
		 * precision.
3994
		 */
3995
		u32 iclk_virtual_root_freq = 172800 * 1000;
3996
		u32 iclk_pi_range = 64;
3997
		u32 desired_divisor, msb_divisor_value, pi_value;
3998
 
6937 serge 3999
		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
3031 serge 4000
		msb_divisor_value = desired_divisor / iclk_pi_range;
4001
		pi_value = desired_divisor % iclk_pi_range;
4002
 
4003
		auxdiv = 0;
4004
		divsel = msb_divisor_value - 2;
4005
		phaseinc = pi_value;
4006
	}
4007
 
4008
	/* This should not happen with any sane values */
4009
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4010
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4011
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4012
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4013
 
4014
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 4015
			clock,
3031 serge 4016
			auxdiv,
4017
			divsel,
4018
			phasedir,
4019
			phaseinc);
4020
 
6937 serge 4021
	mutex_lock(&dev_priv->sb_lock);
4022
 
3031 serge 4023
	/* Program SSCDIVINTPHASE6 */
3243 Serge 4024
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 4025
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4026
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4027
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4028
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4029
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4030
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 4031
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 4032
 
4033
	/* Program SSCAUXDIV */
3243 Serge 4034
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 4035
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4036
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 4037
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 4038
 
4039
	/* Enable modulator and associated divider */
3243 Serge 4040
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 4041
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 4042
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 4043
 
6937 serge 4044
	mutex_unlock(&dev_priv->sb_lock);
4045
 
3031 serge 4046
	/* Wait for initialization time */
4047
	udelay(24);
4048
 
4049
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4050
}
4051
 
4104 Serge 4052
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4053
						enum pipe pch_transcoder)
4054
{
4055
	struct drm_device *dev = crtc->base.dev;
4056
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 4057
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4104 Serge 4058
 
4059
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4060
		   I915_READ(HTOTAL(cpu_transcoder)));
4061
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4062
		   I915_READ(HBLANK(cpu_transcoder)));
4063
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4064
		   I915_READ(HSYNC(cpu_transcoder)));
4065
 
4066
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4067
		   I915_READ(VTOTAL(cpu_transcoder)));
4068
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4069
		   I915_READ(VBLANK(cpu_transcoder)));
4070
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4071
		   I915_READ(VSYNC(cpu_transcoder)));
4072
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4073
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4074
}
4075
 
6084 serge 4076
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4280 Serge 4077
{
4078
	struct drm_i915_private *dev_priv = dev->dev_private;
4079
	uint32_t temp;
4080
 
4081
	temp = I915_READ(SOUTH_CHICKEN1);
6084 serge 4082
	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4280 Serge 4083
		return;
4084
 
4085
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4086
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4087
 
6084 serge 4088
	temp &= ~FDI_BC_BIFURCATION_SELECT;
4089
	if (enable)
4090
		temp |= FDI_BC_BIFURCATION_SELECT;
4091
 
4092
	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4280 Serge 4093
	I915_WRITE(SOUTH_CHICKEN1, temp);
4094
	POSTING_READ(SOUTH_CHICKEN1);
4095
}
4096
 
4097
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4098
{
4099
	struct drm_device *dev = intel_crtc->base.dev;
4100
 
4101
	switch (intel_crtc->pipe) {
4102
	case PIPE_A:
4103
		break;
4104
	case PIPE_B:
6084 serge 4105
		if (intel_crtc->config->fdi_lanes > 2)
4106
			cpt_set_fdi_bc_bifurcation(dev, false);
4280 Serge 4107
		else
6084 serge 4108
			cpt_set_fdi_bc_bifurcation(dev, true);
4280 Serge 4109
 
4110
		break;
4111
	case PIPE_C:
6084 serge 4112
		cpt_set_fdi_bc_bifurcation(dev, true);
4280 Serge 4113
 
4114
		break;
4115
	default:
4116
		BUG();
4117
	}
4118
}
4119
 
6937 serge 4120
/* Return which DP Port should be selected for Transcoder DP control */
4121
static enum port
4122
intel_trans_dp_port_sel(struct drm_crtc *crtc)
4123
{
4124
	struct drm_device *dev = crtc->dev;
4125
	struct intel_encoder *encoder;
4126
 
4127
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4128
		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4129
		    encoder->type == INTEL_OUTPUT_EDP)
4130
			return enc_to_dig_port(&encoder->base)->port;
4131
	}
4132
 
4133
	return -1;
4134
}
4135
 
2327 Serge 4136
/*
4137
 * Enable PCH resources required for PCH ports:
4138
 *   - PCH PLLs
4139
 *   - FDI training & RX/TX
4140
 *   - update transcoder timings
4141
 *   - DP transcoding bits
4142
 *   - transcoder
4143
 */
4144
static void ironlake_pch_enable(struct drm_crtc *crtc)
4145
{
4146
	struct drm_device *dev = crtc->dev;
4147
	struct drm_i915_private *dev_priv = dev->dev_private;
4148
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4149
	int pipe = intel_crtc->pipe;
6937 serge 4150
	u32 temp;
2327 Serge 4151
 
4104 Serge 4152
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 4153
 
4280 Serge 4154
	if (IS_IVYBRIDGE(dev))
4155
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4156
 
3243 Serge 4157
	/* Write the TU size bits before fdi link training, so that error
4158
	 * detection works. */
4159
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4160
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4161
 
6937 serge 4162
	/*
4163
	 * Sometimes spurious CPU pipe underruns happen during FDI
4164
	 * training, at least with VGA+HDMI cloning. Suppress them.
4165
	 */
4166
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4167
 
2327 Serge 4168
	/* For PCH output, training FDI link */
4169
	dev_priv->display.fdi_link_train(crtc);
4170
 
4104 Serge 4171
	/* We need to program the right clock selection before writing the pixel
4172
	 * mutliplier into the DPLL. */
3243 Serge 4173
	if (HAS_PCH_CPT(dev)) {
3031 serge 4174
		u32 sel;
2342 Serge 4175
 
2327 Serge 4176
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 4177
		temp |= TRANS_DPLL_ENABLE(pipe);
4178
		sel = TRANS_DPLLB_SEL(pipe);
6084 serge 4179
		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 4180
			temp |= sel;
4181
		else
4182
			temp &= ~sel;
2327 Serge 4183
		I915_WRITE(PCH_DPLL_SEL, temp);
4184
	}
4185
 
4104 Serge 4186
	/* XXX: pch pll's can be enabled any time before we enable the PCH
4187
	 * transcoder, and we actually should do this to not upset any PCH
4188
	 * transcoder that already use the clock when we share it.
4189
	 *
4190
	 * Note that enable_shared_dpll tries to do the right thing, but
4191
	 * get_shared_dpll unconditionally resets the pll - we need that to have
4192
	 * the right LVDS enable sequence. */
5060 serge 4193
	intel_enable_shared_dpll(intel_crtc);
4104 Serge 4194
 
2327 Serge 4195
	/* set transcoder timing, panel must allow it */
4196
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 4197
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 4198
 
4199
	intel_fdi_normal_train(crtc);
4200
 
6937 serge 4201
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4202
 
2327 Serge 4203
	/* For PCH DP, enable TRANS_DP_CTL */
6084 serge 4204
	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
6937 serge 4205
		const struct drm_display_mode *adjusted_mode =
4206
			&intel_crtc->config->base.adjusted_mode;
3480 Serge 4207
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
6937 serge 4208
		i915_reg_t reg = TRANS_DP_CTL(pipe);
2327 Serge 4209
		temp = I915_READ(reg);
4210
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4211
			  TRANS_DP_SYNC_MASK |
4212
			  TRANS_DP_BPC_MASK);
6084 serge 4213
		temp |= TRANS_DP_OUTPUT_ENABLE;
2327 Serge 4214
		temp |= bpc << 9; /* same format but at 11:9 */
4215
 
6937 serge 4216
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2327 Serge 4217
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
6937 serge 4218
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2327 Serge 4219
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4220
 
4221
		switch (intel_trans_dp_port_sel(crtc)) {
6937 serge 4222
		case PORT_B:
2327 Serge 4223
			temp |= TRANS_DP_PORT_SEL_B;
4224
			break;
6937 serge 4225
		case PORT_C:
2327 Serge 4226
			temp |= TRANS_DP_PORT_SEL_C;
4227
			break;
6937 serge 4228
		case PORT_D:
2327 Serge 4229
			temp |= TRANS_DP_PORT_SEL_D;
4230
			break;
4231
		default:
3243 Serge 4232
			BUG();
2327 Serge 4233
		}
4234
 
4235
		I915_WRITE(reg, temp);
4236
	}
4237
 
3243 Serge 4238
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 4239
}
4240
 
3243 Serge 4241
static void lpt_pch_enable(struct drm_crtc *crtc)
4242
{
4243
	struct drm_device *dev = crtc->dev;
4244
	struct drm_i915_private *dev_priv = dev->dev_private;
4245
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 4246
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 4247
 
4104 Serge 4248
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 4249
 
4250
	lpt_program_iclkip(crtc);
4251
 
4252
	/* Set transcoder timing. */
4104 Serge 4253
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 4254
 
4255
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4256
}
4257
 
6084 serge 4258
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4259
						struct intel_crtc_state *crtc_state)
3031 serge 4260
{
4104 Serge 4261
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
5354 serge 4262
	struct intel_shared_dpll *pll;
6084 serge 4263
	struct intel_shared_dpll_config *shared_dpll;
4104 Serge 4264
	enum intel_dpll_id i;
6084 serge 4265
	int max = dev_priv->num_shared_dpll;
3031 serge 4266
 
6084 serge 4267
	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4268
 
3031 serge 4269
	if (HAS_PCH_IBX(dev_priv->dev)) {
4270
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 4271
		i = (enum intel_dpll_id) crtc->pipe;
4272
		pll = &dev_priv->shared_dplls[i];
3031 serge 4273
 
4104 Serge 4274
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4275
			      crtc->base.base.id, pll->name);
3031 serge 4276
 
6084 serge 4277
		WARN_ON(shared_dpll[i].crtc_mask);
5060 serge 4278
 
3031 serge 4279
		goto found;
4280
	}
4281
 
6084 serge 4282
	if (IS_BROXTON(dev_priv->dev)) {
4283
		/* PLL is attached to port in bxt */
4284
		struct intel_encoder *encoder;
4285
		struct intel_digital_port *intel_dig_port;
4286
 
4287
		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4288
		if (WARN_ON(!encoder))
4289
			return NULL;
4290
 
4291
		intel_dig_port = enc_to_dig_port(&encoder->base);
4292
		/* 1:1 mapping between ports and PLLs */
4293
		i = (enum intel_dpll_id)intel_dig_port->port;
4104 Serge 4294
		pll = &dev_priv->shared_dplls[i];
6084 serge 4295
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4296
			crtc->base.base.id, pll->name);
4297
		WARN_ON(shared_dpll[i].crtc_mask);
3031 serge 4298
 
6084 serge 4299
		goto found;
4300
	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4301
		/* Do not consider SPLL */
4302
		max = 2;
4303
 
4304
	for (i = 0; i < max; i++) {
4305
		pll = &dev_priv->shared_dplls[i];
4306
 
3031 serge 4307
		/* Only want to check enabled timings first */
6084 serge 4308
		if (shared_dpll[i].crtc_mask == 0)
3031 serge 4309
			continue;
4310
 
6084 serge 4311
		if (memcmp(&crtc_state->dpll_hw_state,
4312
			   &shared_dpll[i].hw_state,
4313
			   sizeof(crtc_state->dpll_hw_state)) == 0) {
5354 serge 4314
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4315
				      crtc->base.base.id, pll->name,
6084 serge 4316
				      shared_dpll[i].crtc_mask,
5354 serge 4317
				      pll->active);
3031 serge 4318
			goto found;
4319
		}
4320
	}
4321
 
4322
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 4323
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4324
		pll = &dev_priv->shared_dplls[i];
6084 serge 4325
		if (shared_dpll[i].crtc_mask == 0) {
4104 Serge 4326
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4327
				      crtc->base.base.id, pll->name);
3031 serge 4328
			goto found;
4329
		}
4330
	}
4331
 
4332
	return NULL;
4333
 
4334
found:
6084 serge 4335
	if (shared_dpll[i].crtc_mask == 0)
4336
		shared_dpll[i].hw_state =
4337
			crtc_state->dpll_hw_state;
5060 serge 4338
 
6084 serge 4339
	crtc_state->shared_dpll = i;
4104 Serge 4340
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4341
			 pipe_name(crtc->pipe));
4342
 
6084 serge 4343
	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
3031 serge 4344
 
4345
	return pll;
4346
}
4347
 
6084 serge 4348
static void intel_shared_dpll_commit(struct drm_atomic_state *state)
5354 serge 4349
{
6084 serge 4350
	struct drm_i915_private *dev_priv = to_i915(state->dev);
4351
	struct intel_shared_dpll_config *shared_dpll;
5354 serge 4352
	struct intel_shared_dpll *pll;
4353
	enum intel_dpll_id i;
4354
 
6084 serge 4355
	if (!to_intel_atomic_state(state)->dpll_set)
4356
		return;
4357
 
4358
	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
5354 serge 4359
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4360
		pll = &dev_priv->shared_dplls[i];
6084 serge 4361
		pll->config = shared_dpll[i];
4362
	}
4363
}
5354 serge 4364
 
6084 serge 4365
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4366
{
4367
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 4368
	i915_reg_t dslreg = PIPEDSL(pipe);
6084 serge 4369
	u32 temp;
5354 serge 4370
 
6084 serge 4371
	temp = I915_READ(dslreg);
4372
	udelay(500);
4373
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4374
		if (wait_for(I915_READ(dslreg) != temp, 5))
4375
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5354 serge 4376
	}
6084 serge 4377
}
5354 serge 4378
 
6084 serge 4379
static int
4380
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4381
		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4382
		  int src_w, int src_h, int dst_w, int dst_h)
4383
{
4384
	struct intel_crtc_scaler_state *scaler_state =
4385
		&crtc_state->scaler_state;
4386
	struct intel_crtc *intel_crtc =
4387
		to_intel_crtc(crtc_state->base.crtc);
4388
	int need_scaling;
5354 serge 4389
 
6084 serge 4390
	need_scaling = intel_rotation_90_or_270(rotation) ?
4391
		(src_h != dst_w || src_w != dst_h):
4392
		(src_w != dst_w || src_h != dst_h);
4393
 
4394
	/*
4395
	 * if plane is being disabled or scaler is no more required or force detach
4396
	 *  - free scaler binded to this plane/crtc
4397
	 *  - in order to do this, update crtc->scaler_usage
4398
	 *
4399
	 * Here scaler state in crtc_state is set free so that
4400
	 * scaler can be assigned to other user. Actual register
4401
	 * update to free the scaler is done in plane/panel-fit programming.
4402
	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4403
	 */
4404
	if (force_detach || !need_scaling) {
4405
		if (*scaler_id >= 0) {
4406
			scaler_state->scaler_users &= ~(1 << scaler_user);
4407
			scaler_state->scalers[*scaler_id].in_use = 0;
4408
 
4409
			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4410
				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4411
				intel_crtc->pipe, scaler_user, *scaler_id,
4412
				scaler_state->scaler_users);
4413
			*scaler_id = -1;
4414
		}
4415
		return 0;
5354 serge 4416
	}
4417
 
6084 serge 4418
	/* range checks */
4419
	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4420
		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4421
 
4422
		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4423
		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4424
		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4425
			"size is out of scaler range\n",
4426
			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4427
		return -EINVAL;
4428
	}
4429
 
4430
	/* mark this plane as a scaler user in crtc_state */
4431
	scaler_state->scaler_users |= (1 << scaler_user);
4432
	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4433
		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4434
		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4435
		scaler_state->scaler_users);
4436
 
4437
	return 0;
5354 serge 4438
}
4439
 
6084 serge 4440
/**
4441
 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4442
 *
4443
 * @state: crtc's scaler state
4444
 *
4445
 * Return
4446
 *     0 - scaler_usage updated successfully
4447
 *    error - requested scaling cannot be supported or other error condition
4448
 */
4449
int skl_update_scaler_crtc(struct intel_crtc_state *state)
5354 serge 4450
{
6084 serge 4451
	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4452
	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5354 serge 4453
 
6084 serge 4454
	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4455
		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
5354 serge 4456
 
6084 serge 4457
	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
6660 serge 4458
		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
6084 serge 4459
		state->pipe_src_w, state->pipe_src_h,
4460
		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
5354 serge 4461
}
4462
 
6084 serge 4463
/**
4464
 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4465
 *
4466
 * @state: crtc's scaler state
4467
 * @plane_state: atomic plane state to update
4468
 *
4469
 * Return
4470
 *     0 - scaler_usage updated successfully
4471
 *    error - requested scaling cannot be supported or other error condition
4472
 */
4473
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4474
				   struct intel_plane_state *plane_state)
5354 serge 4475
{
4476
 
6084 serge 4477
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4478
	struct intel_plane *intel_plane =
4479
		to_intel_plane(plane_state->base.plane);
4480
	struct drm_framebuffer *fb = plane_state->base.fb;
4481
	int ret;
5354 serge 4482
 
6084 serge 4483
	bool force_detach = !fb || !plane_state->visible;
5354 serge 4484
 
6084 serge 4485
	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4486
		      intel_plane->base.base.id, intel_crtc->pipe,
4487
		      drm_plane_index(&intel_plane->base));
4488
 
4489
	ret = skl_update_scaler(crtc_state, force_detach,
4490
				drm_plane_index(&intel_plane->base),
4491
				&plane_state->scaler_id,
4492
				plane_state->base.rotation,
4493
				drm_rect_width(&plane_state->src) >> 16,
4494
				drm_rect_height(&plane_state->src) >> 16,
4495
				drm_rect_width(&plane_state->dst),
4496
				drm_rect_height(&plane_state->dst));
4497
 
4498
	if (ret || plane_state->scaler_id < 0)
4499
		return ret;
4500
 
4501
	/* check colorkey */
4502
	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4503
		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4504
			      intel_plane->base.base.id);
4505
		return -EINVAL;
5354 serge 4506
	}
6084 serge 4507
 
4508
	/* Check src format */
4509
	switch (fb->pixel_format) {
4510
	case DRM_FORMAT_RGB565:
4511
	case DRM_FORMAT_XBGR8888:
4512
	case DRM_FORMAT_XRGB8888:
4513
	case DRM_FORMAT_ABGR8888:
4514
	case DRM_FORMAT_ARGB8888:
4515
	case DRM_FORMAT_XRGB2101010:
4516
	case DRM_FORMAT_XBGR2101010:
4517
	case DRM_FORMAT_YUYV:
4518
	case DRM_FORMAT_YVYU:
4519
	case DRM_FORMAT_UYVY:
4520
	case DRM_FORMAT_VYUY:
4521
		break;
4522
	default:
4523
		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4524
			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4525
		return -EINVAL;
4526
	}
4527
 
4528
	return 0;
5354 serge 4529
}
4530
 
6084 serge 4531
static void skylake_scaler_disable(struct intel_crtc *crtc)
2342 Serge 4532
{
6084 serge 4533
	int i;
2342 Serge 4534
 
6084 serge 4535
	for (i = 0; i < crtc->num_scalers; i++)
4536
		skl_detach_scaler(crtc, i);
2342 Serge 4537
}
4538
 
5354 serge 4539
static void skylake_pfit_enable(struct intel_crtc *crtc)
4540
{
4541
	struct drm_device *dev = crtc->base.dev;
4542
	struct drm_i915_private *dev_priv = dev->dev_private;
4543
	int pipe = crtc->pipe;
6084 serge 4544
	struct intel_crtc_scaler_state *scaler_state =
4545
		&crtc->config->scaler_state;
5354 serge 4546
 
6084 serge 4547
	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4548
 
4549
	if (crtc->config->pch_pfit.enabled) {
4550
		int id;
4551
 
4552
		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4553
			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4554
			return;
4555
		}
4556
 
4557
		id = scaler_state->scaler_id;
4558
		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4559
			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4560
		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4561
		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4562
 
4563
		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
5354 serge 4564
	}
4565
}
4566
 
4104 Serge 4567
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4568
{
4569
	struct drm_device *dev = crtc->base.dev;
4570
	struct drm_i915_private *dev_priv = dev->dev_private;
4571
	int pipe = crtc->pipe;
4572
 
6084 serge 4573
	if (crtc->config->pch_pfit.enabled) {
4104 Serge 4574
		/* Force use of hard-coded filter coefficients
4575
		 * as some pre-programmed values are broken,
4576
		 * e.g. x201.
4577
		 */
4578
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4579
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4580
						 PF_PIPE_SEL_IVB(pipe));
4581
		else
4582
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6084 serge 4583
		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4584
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4104 Serge 4585
	}
4586
}
4587
 
4560 Serge 4588
void hsw_enable_ips(struct intel_crtc *crtc)
4589
{
5060 serge 4590
	struct drm_device *dev = crtc->base.dev;
4591
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 4592
 
6084 serge 4593
	if (!crtc->config->ips_enabled)
4560 Serge 4594
		return;
4595
 
5060 serge 4596
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4597
	intel_wait_for_vblank(dev, crtc->pipe);
4598
 
4560 Serge 4599
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4600
	if (IS_BROADWELL(dev)) {
4560 Serge 4601
		mutex_lock(&dev_priv->rps.hw_lock);
4602
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4603
		mutex_unlock(&dev_priv->rps.hw_lock);
4604
		/* Quoting Art Runyan: "its not safe to expect any particular
4605
		 * value in IPS_CTL bit 31 after enabling IPS through the
4606
		 * mailbox." Moreover, the mailbox may return a bogus state,
4607
		 * so we need to just enable it and continue on.
4608
		 */
4609
	} else {
4610
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4611
		/* The bit only becomes 1 in the next vblank, so this wait here
4612
		 * is essentially intel_wait_for_vblank. If we don't have this
4613
		 * and don't wait for vblanks until the end of crtc_enable, then
4614
		 * the HW state readout code will complain that the expected
4615
		 * IPS_CTL value is not the one we read. */
4616
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4617
			DRM_ERROR("Timed out waiting for IPS enable\n");
4618
	}
4619
}
4620
 
4621
void hsw_disable_ips(struct intel_crtc *crtc)
4622
{
4623
	struct drm_device *dev = crtc->base.dev;
4624
	struct drm_i915_private *dev_priv = dev->dev_private;
4625
 
6084 serge 4626
	if (!crtc->config->ips_enabled)
4560 Serge 4627
		return;
4628
 
4629
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4630
	if (IS_BROADWELL(dev)) {
4560 Serge 4631
		mutex_lock(&dev_priv->rps.hw_lock);
4632
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4633
		mutex_unlock(&dev_priv->rps.hw_lock);
5060 serge 4634
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4635
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4636
			DRM_ERROR("Timed out waiting for IPS disable\n");
4560 Serge 4637
	} else {
4638
		I915_WRITE(IPS_CTL, 0);
4639
		POSTING_READ(IPS_CTL);
4640
	}
4641
 
4642
	/* We need to wait for a vblank before we can disable the plane. */
4643
	intel_wait_for_vblank(dev, crtc->pipe);
4644
}
4645
 
4646
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4647
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4648
{
4649
	struct drm_device *dev = crtc->dev;
4650
	struct drm_i915_private *dev_priv = dev->dev_private;
4651
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4652
	enum pipe pipe = intel_crtc->pipe;
4653
	int i;
4654
	bool reenable_ips = false;
4655
 
4656
	/* The clocks have to be on to load the palette. */
6084 serge 4657
	if (!crtc->state->active)
4560 Serge 4658
		return;
4659
 
6084 serge 4660
	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
6937 serge 4661
		if (intel_crtc->config->has_dsi_encoder)
4560 Serge 4662
			assert_dsi_pll_enabled(dev_priv);
4663
		else
4664
			assert_pll_enabled(dev_priv, pipe);
4665
	}
4666
 
4667
	/* Workaround : Do not read or write the pipe palette/gamma data while
4668
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4669
	 */
6084 serge 4670
	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4560 Serge 4671
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4672
	     GAMMA_MODE_MODE_SPLIT)) {
4673
		hsw_disable_ips(intel_crtc);
4674
		reenable_ips = true;
4675
	}
4676
 
4677
	for (i = 0; i < 256; i++) {
6937 serge 4678
		i915_reg_t palreg;
6084 serge 4679
 
4680
		if (HAS_GMCH_DISPLAY(dev))
4681
			palreg = PALETTE(pipe, i);
4682
		else
4683
			palreg = LGC_PALETTE(pipe, i);
4684
 
4685
		I915_WRITE(palreg,
4560 Serge 4686
			   (intel_crtc->lut_r[i] << 16) |
4687
			   (intel_crtc->lut_g[i] << 8) |
4688
			   intel_crtc->lut_b[i]);
4689
	}
4690
 
4691
	if (reenable_ips)
4692
		hsw_enable_ips(intel_crtc);
4693
}
4694
 
6084 serge 4695
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5060 serge 4696
{
6084 serge 4697
	if (intel_crtc->overlay) {
5060 serge 4698
		struct drm_device *dev = intel_crtc->base.dev;
4699
		struct drm_i915_private *dev_priv = dev->dev_private;
4700
 
4701
		mutex_lock(&dev->struct_mutex);
4702
		dev_priv->mm.interruptible = false;
5354 serge 4703
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
6084 serge 4704
		dev_priv->mm.interruptible = true;
5060 serge 4705
		mutex_unlock(&dev->struct_mutex);
4706
	}
4707
 
4708
	/* Let userspace switch the overlay on again. In most cases userspace
4709
	 * has to recompute where to put it anyway.
4710
	 */
4711
}
4712
 
6084 serge 4713
/**
4714
 * intel_post_enable_primary - Perform operations after enabling primary plane
4715
 * @crtc: the CRTC whose primary plane was just enabled
4716
 *
4717
 * Performs potentially sleeping operations that must be done after the primary
4718
 * plane is enabled, such as updating FBC and IPS.  Note that this may be
4719
 * called due to an explicit primary plane update, or due to an implicit
4720
 * re-enable that is caused when a sprite plane is updated to no longer
4721
 * completely hide the primary plane.
4722
 */
4723
static void
4724
intel_post_enable_primary(struct drm_crtc *crtc)
5060 serge 4725
{
4726
	struct drm_device *dev = crtc->dev;
6084 serge 4727
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4728
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4729
	int pipe = intel_crtc->pipe;
4730
 
6084 serge 4731
	/*
4732
	 * FIXME IPS should be fine as long as one plane is
4733
	 * enabled, but in practice it seems to have problems
4734
	 * when going from primary only to sprite only and vice
4735
	 * versa.
4736
	 */
5060 serge 4737
	hsw_enable_ips(intel_crtc);
4738
 
5354 serge 4739
	/*
6084 serge 4740
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4741
	 * So don't enable underrun reporting before at least some planes
4742
	 * are enabled.
4743
	 * FIXME: Need to fix the logic to work when we turn off all planes
4744
	 * but leave the pipe running.
5354 serge 4745
	 */
6084 serge 4746
	if (IS_GEN2(dev))
4747
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4748
 
6937 serge 4749
	/* Underruns don't always raise interrupts, so check manually. */
4750
	intel_check_cpu_fifo_underruns(dev_priv);
4751
	intel_check_pch_fifo_underruns(dev_priv);
5060 serge 4752
}
4753
 
6084 serge 4754
/**
4755
 * intel_pre_disable_primary - Perform operations before disabling primary plane
4756
 * @crtc: the CRTC whose primary plane is to be disabled
4757
 *
4758
 * Performs potentially sleeping operations that must be done before the
4759
 * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4760
 * be called due to an explicit primary plane update, or due to an implicit
4761
 * disable that is caused when a sprite plane completely hides the primary
4762
 * plane.
4763
 */
4764
static void
4765
intel_pre_disable_primary(struct drm_crtc *crtc)
5060 serge 4766
{
4767
	struct drm_device *dev = crtc->dev;
4768
	struct drm_i915_private *dev_priv = dev->dev_private;
4769
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4770
	int pipe = intel_crtc->pipe;
4771
 
6084 serge 4772
	/*
4773
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4774
	 * So diasble underrun reporting before all the planes get disabled.
4775
	 * FIXME: Need to fix the logic to work when we turn off all planes
4776
	 * but leave the pipe running.
4777
	 */
4778
	if (IS_GEN2(dev))
4779
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 4780
 
6084 serge 4781
	/*
4782
	 * Vblank time updates from the shadow to live plane control register
4783
	 * are blocked if the memory self-refresh mode is active at that
4784
	 * moment. So to make sure the plane gets truly disabled, disable
4785
	 * first the self-refresh mode. The self-refresh enable bit in turn
4786
	 * will be checked/applied by the HW only at the next frame start
4787
	 * event which is after the vblank start event, so we need to have a
4788
	 * wait-for-vblank between disabling the plane and the pipe.
4789
	 */
4790
	if (HAS_GMCH_DISPLAY(dev)) {
4791
		intel_set_memory_cxsr(dev_priv, false);
4792
		dev_priv->wm.vlv.cxsr = false;
4793
		intel_wait_for_vblank(dev, pipe);
4794
	}
5060 serge 4795
 
6084 serge 4796
	/*
4797
	 * FIXME IPS should be fine as long as one plane is
4798
	 * enabled, but in practice it seems to have problems
4799
	 * when going from primary only to sprite only and vice
4800
	 * versa.
4801
	 */
5060 serge 4802
	hsw_disable_ips(intel_crtc);
6084 serge 4803
}
5060 serge 4804
 
6084 serge 4805
static void intel_post_plane_update(struct intel_crtc *crtc)
4806
{
4807
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
6937 serge 4808
	struct intel_crtc_state *pipe_config =
4809
		to_intel_crtc_state(crtc->base.state);
6084 serge 4810
	struct drm_device *dev = crtc->base.dev;
5354 serge 4811
 
6084 serge 4812
	if (atomic->wait_vblank)
4813
		intel_wait_for_vblank(dev, crtc->pipe);
4814
 
4815
	intel_frontbuffer_flip(dev, atomic->fb_bits);
4816
 
4817
		crtc->wm.cxsr_allowed = true;
4818
 
6937 serge 4819
	if (pipe_config->update_wm_post && pipe_config->base.active)
6084 serge 4820
		intel_update_watermarks(&crtc->base);
4821
 
4822
	if (atomic->update_fbc)
6937 serge 4823
		intel_fbc_update(crtc);
6084 serge 4824
 
4825
	if (atomic->post_enable_primary)
4826
		intel_post_enable_primary(&crtc->base);
4827
 
4828
	memset(atomic, 0, sizeof(*atomic));
4829
}
4830
 
4831
static void intel_pre_plane_update(struct intel_crtc *crtc)
4832
{
4833
	struct drm_device *dev = crtc->base.dev;
4834
	struct drm_i915_private *dev_priv = dev->dev_private;
4835
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
6937 serge 4836
	struct intel_crtc_state *pipe_config =
4837
		to_intel_crtc_state(crtc->base.state);
6084 serge 4838
 
4839
	if (atomic->disable_fbc)
6937 serge 4840
		intel_fbc_deactivate(crtc);
6084 serge 4841
 
4842
	if (crtc->atomic.disable_ips)
4843
		hsw_disable_ips(crtc);
4844
 
4845
	if (atomic->pre_disable_primary)
4846
		intel_pre_disable_primary(&crtc->base);
4847
 
6937 serge 4848
	if (pipe_config->disable_cxsr) {
6084 serge 4849
		crtc->wm.cxsr_allowed = false;
4850
		intel_set_memory_cxsr(dev_priv, false);
4851
	}
6937 serge 4852
 
4853
	if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
4854
		intel_update_watermarks(&crtc->base);
6084 serge 4855
}
4856
 
4857
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4858
{
4859
	struct drm_device *dev = crtc->dev;
4860
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4861
	struct drm_plane *p;
4862
	int pipe = intel_crtc->pipe;
4863
 
4864
	intel_crtc_dpms_overlay_disable(intel_crtc);
4865
 
4866
	drm_for_each_plane_mask(p, dev, plane_mask)
4867
		to_intel_plane(p)->disable_plane(p, crtc);
4868
 
5354 serge 4869
	/*
4870
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4871
	 * to compute the mask of flip planes precisely. For the time being
4872
	 * consider this a flip to a NULL plane.
4873
	 */
6320 serge 4874
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4875
}
4876
 
2327 Serge 4877
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4878
{
6084 serge 4879
	struct drm_device *dev = crtc->dev;
4880
	struct drm_i915_private *dev_priv = dev->dev_private;
4881
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4882
	struct intel_encoder *encoder;
6084 serge 4883
	int pipe = intel_crtc->pipe;
2327 Serge 4884
 
6084 serge 4885
	if (WARN_ON(intel_crtc->active))
4886
		return;
3031 serge 4887
 
6084 serge 4888
	if (intel_crtc->config->has_pch_encoder)
6937 serge 4889
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4890
 
4891
	if (intel_crtc->config->has_pch_encoder)
5060 serge 4892
		intel_prepare_shared_dpll(intel_crtc);
4893
 
6084 serge 4894
	if (intel_crtc->config->has_dp_encoder)
4895
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 4896
 
4897
	intel_set_pipe_timings(intel_crtc);
4898
 
6084 serge 4899
	if (intel_crtc->config->has_pch_encoder) {
5060 serge 4900
		intel_cpu_transcoder_set_m_n(intel_crtc,
6084 serge 4901
				     &intel_crtc->config->fdi_m_n, NULL);
5060 serge 4902
	}
4903
 
4904
	ironlake_set_pipeconf(crtc);
4905
 
6084 serge 4906
	intel_crtc->active = true;
4104 Serge 4907
 
5354 serge 4908
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4104 Serge 4909
 
4910
	for_each_encoder_on_crtc(dev, crtc, encoder)
4911
		if (encoder->pre_enable)
4912
			encoder->pre_enable(encoder);
2327 Serge 4913
 
6084 serge 4914
	if (intel_crtc->config->has_pch_encoder) {
3243 Serge 4915
		/* Note: FDI PLL enabling _must_ be done before we enable the
4916
		 * cpu pipes, hence this is separate from all the other fdi/pch
4917
		 * enabling. */
3031 serge 4918
		ironlake_fdi_pll_enable(intel_crtc);
4919
	} else {
4920
		assert_fdi_tx_disabled(dev_priv, pipe);
4921
		assert_fdi_rx_disabled(dev_priv, pipe);
4922
	}
2327 Serge 4923
 
4104 Serge 4924
	ironlake_pfit_enable(intel_crtc);
3031 serge 4925
 
6084 serge 4926
	/*
4927
	 * On ILK+ LUT must be loaded before the pipe is running but with
4928
	 * clocks enabled
4929
	 */
4930
	intel_crtc_load_lut(crtc);
2327 Serge 4931
 
4560 Serge 4932
	intel_update_watermarks(crtc);
5060 serge 4933
	intel_enable_pipe(intel_crtc);
2327 Serge 4934
 
6084 serge 4935
	if (intel_crtc->config->has_pch_encoder)
4936
		ironlake_pch_enable(crtc);
2327 Serge 4937
 
6084 serge 4938
	assert_vblank_disabled(crtc);
4939
	drm_crtc_vblank_on(crtc);
4940
 
3031 serge 4941
	for_each_encoder_on_crtc(dev, crtc, encoder)
4942
		encoder->enable(encoder);
4943
 
4944
	if (HAS_PCH_CPT(dev))
4104 Serge 4945
		cpt_verify_modeset(dev, intel_crtc->pipe);
6937 serge 4946
 
4947
	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4948
	if (intel_crtc->config->has_pch_encoder)
4949
		intel_wait_for_vblank(dev, pipe);
4950
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4951
 
4952
	intel_fbc_enable(intel_crtc);
2327 Serge 4953
}
4954
 
4104 Serge 4955
/* IPS only exists on ULT machines and is tied to pipe A. */
4956
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4957
{
4958
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4959
}
4960
 
3243 Serge 4961
static void haswell_crtc_enable(struct drm_crtc *crtc)
4962
{
4963
	struct drm_device *dev = crtc->dev;
4964
	struct drm_i915_private *dev_priv = dev->dev_private;
4965
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4966
	struct intel_encoder *encoder;
6084 serge 4967
	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4968
	struct intel_crtc_state *pipe_config =
4969
		to_intel_crtc_state(crtc->state);
3243 Serge 4970
 
6084 serge 4971
	if (WARN_ON(intel_crtc->active))
3243 Serge 4972
		return;
4973
 
6937 serge 4974
	if (intel_crtc->config->has_pch_encoder)
4975
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4976
						      false);
4977
 
5060 serge 4978
	if (intel_crtc_to_shared_dpll(intel_crtc))
4979
		intel_enable_shared_dpll(intel_crtc);
4980
 
6084 serge 4981
	if (intel_crtc->config->has_dp_encoder)
4982
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 4983
 
4984
	intel_set_pipe_timings(intel_crtc);
4985
 
6084 serge 4986
	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4987
		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4988
			   intel_crtc->config->pixel_multiplier - 1);
5354 serge 4989
	}
4990
 
6084 serge 4991
	if (intel_crtc->config->has_pch_encoder) {
5060 serge 4992
		intel_cpu_transcoder_set_m_n(intel_crtc,
6084 serge 4993
				     &intel_crtc->config->fdi_m_n, NULL);
5060 serge 4994
	}
4995
 
4996
	haswell_set_pipeconf(crtc);
4997
 
4998
	intel_set_pipe_csc(crtc);
4999
 
3243 Serge 5000
	intel_crtc->active = true;
4104 Serge 5001
 
6937 serge 5002
	if (intel_crtc->config->has_pch_encoder)
5003
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5004
	else
5354 serge 5005
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6937 serge 5006
 
6084 serge 5007
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 5008
		if (encoder->pre_enable)
5009
			encoder->pre_enable(encoder);
6084 serge 5010
	}
3243 Serge 5011
 
6937 serge 5012
	if (intel_crtc->config->has_pch_encoder)
5060 serge 5013
		dev_priv->display.fdi_link_train(crtc);
5014
 
6937 serge 5015
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5016
		intel_ddi_enable_pipe_clock(intel_crtc);
3243 Serge 5017
 
6084 serge 5018
	if (INTEL_INFO(dev)->gen >= 9)
5354 serge 5019
		skylake_pfit_enable(intel_crtc);
5020
	else
6084 serge 5021
		ironlake_pfit_enable(intel_crtc);
3243 Serge 5022
 
5023
	/*
5024
	 * On ILK+ LUT must be loaded before the pipe is running but with
5025
	 * clocks enabled
5026
	 */
5027
	intel_crtc_load_lut(crtc);
5028
 
5029
	intel_ddi_set_pipe_settings(crtc);
6937 serge 5030
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5031
		intel_ddi_enable_transcoder_func(crtc);
3243 Serge 5032
 
4560 Serge 5033
	intel_update_watermarks(crtc);
5060 serge 5034
	intel_enable_pipe(intel_crtc);
3243 Serge 5035
 
6084 serge 5036
	if (intel_crtc->config->has_pch_encoder)
3243 Serge 5037
		lpt_pch_enable(crtc);
5038
 
6937 serge 5039
	if (intel_crtc->config->dp_encoder_is_mst)
5060 serge 5040
		intel_ddi_set_vc_payload_alloc(crtc, true);
5041
 
6084 serge 5042
	assert_vblank_disabled(crtc);
5043
	drm_crtc_vblank_on(crtc);
5044
 
4560 Serge 5045
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 5046
		encoder->enable(encoder);
4560 Serge 5047
		intel_opregion_notify_encoder(encoder, true);
5048
	}
3243 Serge 5049
 
6937 serge 5050
	if (intel_crtc->config->has_pch_encoder) {
5051
		intel_wait_for_vblank(dev, pipe);
5052
		intel_wait_for_vblank(dev, pipe);
5053
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5054
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5055
						      true);
5056
	}
5057
 
4560 Serge 5058
	/* If we change the relative order between pipe/planes enabling, we need
5059
	 * to change the workaround. */
6084 serge 5060
	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5061
	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5062
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5063
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5354 serge 5064
	}
6937 serge 5065
 
5066
	intel_fbc_enable(intel_crtc);
5354 serge 5067
}
5068
 
6084 serge 5069
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4104 Serge 5070
{
5071
	struct drm_device *dev = crtc->base.dev;
5072
	struct drm_i915_private *dev_priv = dev->dev_private;
5073
	int pipe = crtc->pipe;
5074
 
5075
	/* To avoid upsetting the power well on haswell only disable the pfit if
5076
	 * it's in use. The hw state code will make sure we get this right. */
6084 serge 5077
	if (force || crtc->config->pch_pfit.enabled) {
4104 Serge 5078
		I915_WRITE(PF_CTL(pipe), 0);
5079
		I915_WRITE(PF_WIN_POS(pipe), 0);
5080
		I915_WRITE(PF_WIN_SZ(pipe), 0);
5081
	}
5082
}
5083
 
2327 Serge 5084
static void ironlake_crtc_disable(struct drm_crtc *crtc)
5085
{
6084 serge 5086
	struct drm_device *dev = crtc->dev;
5087
	struct drm_i915_private *dev_priv = dev->dev_private;
5088
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5089
	struct intel_encoder *encoder;
6084 serge 5090
	int pipe = intel_crtc->pipe;
2327 Serge 5091
 
6937 serge 5092
	if (intel_crtc->config->has_pch_encoder)
5093
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5094
 
6084 serge 5095
	for_each_encoder_on_crtc(dev, crtc, encoder)
5096
		encoder->disable(encoder);
2327 Serge 5097
 
5354 serge 5098
	drm_crtc_vblank_off(crtc);
5099
	assert_vblank_disabled(crtc);
5100
 
6937 serge 5101
	/*
5102
	 * Sometimes spurious CPU pipe underruns happen when the
5103
	 * pipe is already disabled, but FDI RX/TX is still enabled.
5104
	 * Happens at least with VGA+HDMI cloning. Suppress them.
5105
	 */
6084 serge 5106
	if (intel_crtc->config->has_pch_encoder)
6937 serge 5107
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 5108
 
5354 serge 5109
	intel_disable_pipe(intel_crtc);
5110
 
6084 serge 5111
	ironlake_pfit_disable(intel_crtc, false);
2327 Serge 5112
 
6937 serge 5113
	if (intel_crtc->config->has_pch_encoder) {
6084 serge 5114
		ironlake_fdi_disable(crtc);
6937 serge 5115
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5116
	}
6084 serge 5117
 
3031 serge 5118
	for_each_encoder_on_crtc(dev, crtc, encoder)
5119
		if (encoder->post_disable)
5120
			encoder->post_disable(encoder);
5121
 
6084 serge 5122
	if (intel_crtc->config->has_pch_encoder) {
5123
		ironlake_disable_pch_transcoder(dev_priv, pipe);
2327 Serge 5124
 
6084 serge 5125
		if (HAS_PCH_CPT(dev)) {
6937 serge 5126
			i915_reg_t reg;
5127
			u32 temp;
5128
 
6084 serge 5129
			/* disable TRANS_DP_CTL */
5130
			reg = TRANS_DP_CTL(pipe);
5131
			temp = I915_READ(reg);
4104 Serge 5132
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5133
				  TRANS_DP_PORT_SEL_MASK);
6084 serge 5134
			temp |= TRANS_DP_PORT_SEL_NONE;
5135
			I915_WRITE(reg, temp);
2327 Serge 5136
 
6084 serge 5137
			/* disable DPLL_SEL */
5138
			temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 5139
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6084 serge 5140
			I915_WRITE(PCH_DPLL_SEL, temp);
5141
		}
2327 Serge 5142
 
6084 serge 5143
		ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 5144
	}
6937 serge 5145
 
5146
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5147
 
5148
	intel_fbc_disable_crtc(intel_crtc);
2327 Serge 5149
}
5150
 
3243 Serge 5151
static void haswell_crtc_disable(struct drm_crtc *crtc)
5152
{
5153
	struct drm_device *dev = crtc->dev;
5154
	struct drm_i915_private *dev_priv = dev->dev_private;
5155
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5156
	struct intel_encoder *encoder;
6084 serge 5157
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 5158
 
6937 serge 5159
	if (intel_crtc->config->has_pch_encoder)
5160
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5161
						      false);
5162
 
4560 Serge 5163
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5164
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 5165
		encoder->disable(encoder);
4560 Serge 5166
	}
3243 Serge 5167
 
6084 serge 5168
	drm_crtc_vblank_off(crtc);
5169
	assert_vblank_disabled(crtc);
5170
 
5354 serge 5171
	intel_disable_pipe(intel_crtc);
3243 Serge 5172
 
6084 serge 5173
	if (intel_crtc->config->dp_encoder_is_mst)
5097 serge 5174
		intel_ddi_set_vc_payload_alloc(crtc, false);
5175
 
6937 serge 5176
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5177
		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3243 Serge 5178
 
6084 serge 5179
	if (INTEL_INFO(dev)->gen >= 9)
5180
		skylake_scaler_disable(intel_crtc);
5354 serge 5181
	else
6084 serge 5182
		ironlake_pfit_disable(intel_crtc, false);
3243 Serge 5183
 
6937 serge 5184
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5185
		intel_ddi_disable_pipe_clock(intel_crtc);
3243 Serge 5186
 
6937 serge 5187
	for_each_encoder_on_crtc(dev, crtc, encoder)
5188
		if (encoder->post_disable)
5189
			encoder->post_disable(encoder);
5190
 
6084 serge 5191
	if (intel_crtc->config->has_pch_encoder) {
3243 Serge 5192
		lpt_disable_pch_transcoder(dev_priv);
6937 serge 5193
		lpt_disable_iclkip(dev_priv);
3243 Serge 5194
		intel_ddi_fdi_disable(crtc);
6937 serge 5195
 
5196
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5197
						      true);
3243 Serge 5198
	}
5199
 
6937 serge 5200
	intel_fbc_disable_crtc(intel_crtc);
3243 Serge 5201
}
5202
 
4104 Serge 5203
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5204
{
5205
	struct drm_device *dev = crtc->base.dev;
5206
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 5207
	struct intel_crtc_state *pipe_config = crtc->config;
4104 Serge 5208
 
6084 serge 5209
	if (!pipe_config->gmch_pfit.control)
4104 Serge 5210
		return;
5211
 
5212
	/*
5213
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5214
	 * according to register description and PRM.
5215
	 */
5216
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5217
	assert_pipe_disabled(dev_priv, crtc->pipe);
5218
 
5219
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5220
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5221
 
5222
	/* Border color in case we don't scale up to the full screen. Black by
5223
	 * default, change to something else for debugging. */
5224
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5225
}
5226
 
5060 serge 5227
static enum intel_display_power_domain port_to_power_domain(enum port port)
4560 Serge 5228
{
5060 serge 5229
	switch (port) {
5230
	case PORT_A:
6937 serge 5231
		return POWER_DOMAIN_PORT_DDI_A_LANES;
5060 serge 5232
	case PORT_B:
6937 serge 5233
		return POWER_DOMAIN_PORT_DDI_B_LANES;
5060 serge 5234
	case PORT_C:
6937 serge 5235
		return POWER_DOMAIN_PORT_DDI_C_LANES;
5060 serge 5236
	case PORT_D:
6937 serge 5237
		return POWER_DOMAIN_PORT_DDI_D_LANES;
6084 serge 5238
	case PORT_E:
6937 serge 5239
		return POWER_DOMAIN_PORT_DDI_E_LANES;
5060 serge 5240
	default:
6084 serge 5241
		MISSING_CASE(port);
5060 serge 5242
		return POWER_DOMAIN_PORT_OTHER;
5243
	}
5244
}
5245
 
6084 serge 5246
static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5247
{
5248
	switch (port) {
5249
	case PORT_A:
5250
		return POWER_DOMAIN_AUX_A;
5251
	case PORT_B:
5252
		return POWER_DOMAIN_AUX_B;
5253
	case PORT_C:
5254
		return POWER_DOMAIN_AUX_C;
5255
	case PORT_D:
5256
		return POWER_DOMAIN_AUX_D;
5257
	case PORT_E:
5258
		/* FIXME: Check VBT for actual wiring of PORT E */
5259
		return POWER_DOMAIN_AUX_D;
5260
	default:
5261
		MISSING_CASE(port);
5262
		return POWER_DOMAIN_AUX_A;
5263
	}
5264
}
5265
 
5060 serge 5266
enum intel_display_power_domain
5267
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5268
{
5269
	struct drm_device *dev = intel_encoder->base.dev;
5270
	struct intel_digital_port *intel_dig_port;
5271
 
5272
	switch (intel_encoder->type) {
5273
	case INTEL_OUTPUT_UNKNOWN:
5274
		/* Only DDI platforms should ever use this output type */
5275
		WARN_ON_ONCE(!HAS_DDI(dev));
5276
	case INTEL_OUTPUT_DISPLAYPORT:
5277
	case INTEL_OUTPUT_HDMI:
5278
	case INTEL_OUTPUT_EDP:
5279
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5280
		return port_to_power_domain(intel_dig_port->port);
5281
	case INTEL_OUTPUT_DP_MST:
5282
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5283
		return port_to_power_domain(intel_dig_port->port);
5284
	case INTEL_OUTPUT_ANALOG:
5285
		return POWER_DOMAIN_PORT_CRT;
5286
	case INTEL_OUTPUT_DSI:
5287
		return POWER_DOMAIN_PORT_DSI;
5288
	default:
5289
		return POWER_DOMAIN_PORT_OTHER;
5290
	}
5291
}
5292
 
6084 serge 5293
enum intel_display_power_domain
5294
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5295
{
5296
	struct drm_device *dev = intel_encoder->base.dev;
5297
	struct intel_digital_port *intel_dig_port;
5298
 
5299
	switch (intel_encoder->type) {
5300
	case INTEL_OUTPUT_UNKNOWN:
5301
	case INTEL_OUTPUT_HDMI:
5302
		/*
5303
		 * Only DDI platforms should ever use these output types.
5304
		 * We can get here after the HDMI detect code has already set
5305
		 * the type of the shared encoder. Since we can't be sure
5306
		 * what's the status of the given connectors, play safe and
5307
		 * run the DP detection too.
5308
		 */
5309
		WARN_ON_ONCE(!HAS_DDI(dev));
5310
	case INTEL_OUTPUT_DISPLAYPORT:
5311
	case INTEL_OUTPUT_EDP:
5312
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5313
		return port_to_aux_power_domain(intel_dig_port->port);
5314
	case INTEL_OUTPUT_DP_MST:
5315
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5316
		return port_to_aux_power_domain(intel_dig_port->port);
5317
	default:
5318
		MISSING_CASE(intel_encoder->type);
5319
		return POWER_DOMAIN_AUX_A;
5320
	}
5321
}
5322
 
5060 serge 5323
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5324
{
5325
	struct drm_device *dev = crtc->dev;
5326
	struct intel_encoder *intel_encoder;
5327
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5328
	enum pipe pipe = intel_crtc->pipe;
5329
	unsigned long mask;
6937 serge 5330
	enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
5060 serge 5331
 
6084 serge 5332
	if (!crtc->state->active)
5333
		return 0;
5334
 
5060 serge 5335
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5336
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6084 serge 5337
	if (intel_crtc->config->pch_pfit.enabled ||
5338
	    intel_crtc->config->pch_pfit.force_thru)
5060 serge 5339
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5340
 
5341
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5342
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5343
 
5344
	return mask;
5345
}
5346
 
6084 serge 5347
static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5060 serge 5348
{
6084 serge 5349
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5350
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5351
	enum intel_display_power_domain domain;
5352
	unsigned long domains, new_domains, old_domains;
5060 serge 5353
 
6084 serge 5354
	old_domains = intel_crtc->enabled_power_domains;
5355
	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5060 serge 5356
 
6084 serge 5357
	domains = new_domains & ~old_domains;
5060 serge 5358
 
6084 serge 5359
	for_each_power_domain(domain, domains)
5360
		intel_display_power_get(dev_priv, domain);
5060 serge 5361
 
6084 serge 5362
	return old_domains & ~new_domains;
5363
}
5060 serge 5364
 
6084 serge 5365
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5366
				      unsigned long domains)
5367
{
5368
	enum intel_display_power_domain domain;
5354 serge 5369
 
6084 serge 5370
	for_each_power_domain(domain, domains)
5371
		intel_display_power_put(dev_priv, domain);
5372
}
5060 serge 5373
 
6084 serge 5374
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5375
{
5376
	struct drm_device *dev = state->dev;
5377
	struct drm_i915_private *dev_priv = dev->dev_private;
5378
	unsigned long put_domains[I915_MAX_PIPES] = {};
5379
	struct drm_crtc_state *crtc_state;
5380
	struct drm_crtc *crtc;
5381
	int i;
5060 serge 5382
 
6084 serge 5383
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
5384
		if (needs_modeset(crtc->state))
5385
			put_domains[to_intel_crtc(crtc)->pipe] =
5386
				modeset_get_crtc_power_domains(crtc);
5060 serge 5387
	}
5388
 
6084 serge 5389
	if (dev_priv->display.modeset_commit_cdclk) {
5390
		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5391
 
5392
		if (cdclk != dev_priv->cdclk_freq &&
5393
		    !WARN_ON(!state->allow_modeset))
5394
			dev_priv->display.modeset_commit_cdclk(state);
5395
	}
5396
 
5397
	for (i = 0; i < I915_MAX_PIPES; i++)
5398
		if (put_domains[i])
5399
			modeset_put_power_domains(dev_priv, put_domains[i]);
5060 serge 5400
}
5401
 
6084 serge 5402
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5060 serge 5403
{
6084 serge 5404
	int max_cdclk_freq = dev_priv->max_cdclk_freq;
4560 Serge 5405
 
6084 serge 5406
	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5407
	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5408
		return max_cdclk_freq;
5409
	else if (IS_CHERRYVIEW(dev_priv))
5410
		return max_cdclk_freq*95/100;
5411
	else if (INTEL_INFO(dev_priv)->gen < 4)
5412
		return 2*max_cdclk_freq*90/100;
5413
	else
5414
		return max_cdclk_freq*90/100;
5415
}
4560 Serge 5416
 
6084 serge 5417
static void intel_update_max_cdclk(struct drm_device *dev)
5418
{
5419
	struct drm_i915_private *dev_priv = dev->dev_private;
5420
 
6937 serge 5421
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 5422
		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5423
 
5424
		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5425
			dev_priv->max_cdclk_freq = 675000;
5426
		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5427
			dev_priv->max_cdclk_freq = 540000;
5428
		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5429
			dev_priv->max_cdclk_freq = 450000;
5430
		else
5431
			dev_priv->max_cdclk_freq = 337500;
5432
	} else if (IS_BROADWELL(dev))  {
5433
		/*
5434
		 * FIXME with extra cooling we can allow
5435
		 * 540 MHz for ULX and 675 Mhz for ULT.
5436
		 * How can we know if extra cooling is
5437
		 * available? PCI ID, VTB, something else?
5438
		 */
5439
		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5440
			dev_priv->max_cdclk_freq = 450000;
5441
		else if (IS_BDW_ULX(dev))
5442
			dev_priv->max_cdclk_freq = 450000;
5443
		else if (IS_BDW_ULT(dev))
5444
			dev_priv->max_cdclk_freq = 540000;
5445
		else
5446
			dev_priv->max_cdclk_freq = 675000;
5447
	} else if (IS_CHERRYVIEW(dev)) {
5448
		dev_priv->max_cdclk_freq = 320000;
5449
	} else if (IS_VALLEYVIEW(dev)) {
5450
		dev_priv->max_cdclk_freq = 400000;
5451
	} else {
5452
		/* otherwise assume cdclk is fixed */
5453
		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5454
	}
5455
 
5456
	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5457
 
5458
	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5459
			 dev_priv->max_cdclk_freq);
5460
 
5461
	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5462
			 dev_priv->max_dotclk_freq);
4560 Serge 5463
}
5464
 
6084 serge 5465
static void intel_update_cdclk(struct drm_device *dev)
5060 serge 5466
{
5467
	struct drm_i915_private *dev_priv = dev->dev_private;
5468
 
6084 serge 5469
	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5354 serge 5470
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
6084 serge 5471
			 dev_priv->cdclk_freq);
5060 serge 5472
 
5473
	/*
5474
	 * Program the gmbus_freq based on the cdclk frequency.
5475
	 * BSpec erroneously claims we should aim for 4MHz, but
5476
	 * in fact 1MHz is the correct frequency.
5477
	 */
6937 serge 5478
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 5479
		/*
5480
		 * Program the gmbus_freq based on the cdclk frequency.
5481
		 * BSpec erroneously claims we should aim for 4MHz, but
5482
		 * in fact 1MHz is the correct frequency.
5483
		 */
5484
		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5485
	}
5486
 
5487
	if (dev_priv->max_cdclk_freq == 0)
5488
		intel_update_max_cdclk(dev);
5060 serge 5489
}
5490
 
6084 serge 5491
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5492
{
5493
	struct drm_i915_private *dev_priv = dev->dev_private;
5494
	uint32_t divider;
5495
	uint32_t ratio;
5496
	uint32_t current_freq;
5497
	int ret;
5498
 
5499
	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5500
	switch (frequency) {
5501
	case 144000:
5502
		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5503
		ratio = BXT_DE_PLL_RATIO(60);
5504
		break;
5505
	case 288000:
5506
		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5507
		ratio = BXT_DE_PLL_RATIO(60);
5508
		break;
5509
	case 384000:
5510
		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5511
		ratio = BXT_DE_PLL_RATIO(60);
5512
		break;
5513
	case 576000:
5514
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5515
		ratio = BXT_DE_PLL_RATIO(60);
5516
		break;
5517
	case 624000:
5518
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5519
		ratio = BXT_DE_PLL_RATIO(65);
5520
		break;
5521
	case 19200:
5522
		/*
5523
		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5524
		 * to suppress GCC warning.
5525
		 */
5526
		ratio = 0;
5527
		divider = 0;
5528
		break;
5529
	default:
5530
		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5531
 
5532
		return;
5533
	}
5534
 
5535
	mutex_lock(&dev_priv->rps.hw_lock);
5536
	/* Inform power controller of upcoming frequency change */
5537
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5538
				      0x80000000);
5539
	mutex_unlock(&dev_priv->rps.hw_lock);
5540
 
5541
	if (ret) {
5542
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5543
			  ret, frequency);
5544
		return;
5545
	}
5546
 
5547
	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5548
	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5549
	current_freq = current_freq * 500 + 1000;
5550
 
5551
	/*
5552
	 * DE PLL has to be disabled when
5553
	 * - setting to 19.2MHz (bypass, PLL isn't used)
5554
	 * - before setting to 624MHz (PLL needs toggling)
5555
	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5556
	 */
5557
	if (frequency == 19200 || frequency == 624000 ||
5558
	    current_freq == 624000) {
5559
		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5560
		/* Timeout 200us */
5561
		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5562
			     1))
5563
			DRM_ERROR("timout waiting for DE PLL unlock\n");
5564
	}
5565
 
5566
	if (frequency != 19200) {
5567
		uint32_t val;
5568
 
5569
		val = I915_READ(BXT_DE_PLL_CTL);
5570
		val &= ~BXT_DE_PLL_RATIO_MASK;
5571
		val |= ratio;
5572
		I915_WRITE(BXT_DE_PLL_CTL, val);
5573
 
5574
		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5575
		/* Timeout 200us */
5576
		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5577
			DRM_ERROR("timeout waiting for DE PLL lock\n");
5578
 
5579
		val = I915_READ(CDCLK_CTL);
5580
		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5581
		val |= divider;
5582
		/*
5583
		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5584
		 * enable otherwise.
5585
		 */
5586
		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5587
		if (frequency >= 500000)
5588
			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5589
 
5590
		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5591
		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5592
		val |= (frequency - 1000) / 500;
5593
		I915_WRITE(CDCLK_CTL, val);
5594
	}
5595
 
5596
	mutex_lock(&dev_priv->rps.hw_lock);
5597
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5598
				      DIV_ROUND_UP(frequency, 25000));
5599
	mutex_unlock(&dev_priv->rps.hw_lock);
5600
 
5601
	if (ret) {
5602
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5603
			  ret, frequency);
5604
		return;
5605
	}
5606
 
5607
	intel_update_cdclk(dev);
5608
}
5609
 
5610
void broxton_init_cdclk(struct drm_device *dev)
5611
{
5612
	struct drm_i915_private *dev_priv = dev->dev_private;
5613
	uint32_t val;
5614
 
5615
	/*
5616
	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5617
	 * or else the reset will hang because there is no PCH to respond.
5618
	 * Move the handshake programming to initialization sequence.
5619
	 * Previously was left up to BIOS.
5620
	 */
5621
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5622
	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5623
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5624
 
5625
	/* Enable PG1 for cdclk */
5626
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5627
 
5628
	/* check if cd clock is enabled */
5629
	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5630
		DRM_DEBUG_KMS("Display already initialized\n");
5631
		return;
5632
	}
5633
 
5634
	/*
5635
	 * FIXME:
5636
	 * - The initial CDCLK needs to be read from VBT.
5637
	 *   Need to make this change after VBT has changes for BXT.
5638
	 * - check if setting the max (or any) cdclk freq is really necessary
5639
	 *   here, it belongs to modeset time
5640
	 */
5641
	broxton_set_cdclk(dev, 624000);
5642
 
5643
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5644
	POSTING_READ(DBUF_CTL);
5645
 
5646
	udelay(10);
5647
 
5648
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5649
		DRM_ERROR("DBuf power enable timeout!\n");
5650
}
5651
 
5652
void broxton_uninit_cdclk(struct drm_device *dev)
5653
{
5654
	struct drm_i915_private *dev_priv = dev->dev_private;
5655
 
5656
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5657
	POSTING_READ(DBUF_CTL);
5658
 
5659
	udelay(10);
5660
 
5661
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5662
		DRM_ERROR("DBuf power disable timeout!\n");
5663
 
5664
	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5665
	broxton_set_cdclk(dev, 19200);
5666
 
5667
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5668
}
5669
 
5670
static const struct skl_cdclk_entry {
5671
	unsigned int freq;
5672
	unsigned int vco;
5673
} skl_cdclk_frequencies[] = {
5674
	{ .freq = 308570, .vco = 8640 },
5675
	{ .freq = 337500, .vco = 8100 },
5676
	{ .freq = 432000, .vco = 8640 },
5677
	{ .freq = 450000, .vco = 8100 },
5678
	{ .freq = 540000, .vco = 8100 },
5679
	{ .freq = 617140, .vco = 8640 },
5680
	{ .freq = 675000, .vco = 8100 },
5681
};
5682
 
5683
static unsigned int skl_cdclk_decimal(unsigned int freq)
5684
{
5685
	return (freq - 1000) / 500;
5686
}
5687
 
5688
static unsigned int skl_cdclk_get_vco(unsigned int freq)
5689
{
5690
	unsigned int i;
5691
 
5692
	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5693
		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5694
 
5695
		if (e->freq == freq)
5696
			return e->vco;
5697
	}
5698
 
5699
	return 8100;
5700
}
5701
 
5702
static void
5703
skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5704
{
5705
	unsigned int min_freq;
5706
	u32 val;
5707
 
5708
	/* select the minimum CDCLK before enabling DPLL 0 */
5709
	val = I915_READ(CDCLK_CTL);
5710
	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5711
	val |= CDCLK_FREQ_337_308;
5712
 
5713
	if (required_vco == 8640)
5714
		min_freq = 308570;
5715
	else
5716
		min_freq = 337500;
5717
 
5718
	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5719
 
5720
	I915_WRITE(CDCLK_CTL, val);
5721
	POSTING_READ(CDCLK_CTL);
5722
 
5723
	/*
5724
	 * We always enable DPLL0 with the lowest link rate possible, but still
5725
	 * taking into account the VCO required to operate the eDP panel at the
5726
	 * desired frequency. The usual DP link rates operate with a VCO of
5727
	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5728
	 * The modeset code is responsible for the selection of the exact link
5729
	 * rate later on, with the constraint of choosing a frequency that
5730
	 * works with required_vco.
5731
	 */
5732
	val = I915_READ(DPLL_CTRL1);
5733
 
5734
	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5735
		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5736
	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5737
	if (required_vco == 8640)
5738
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5739
					    SKL_DPLL0);
5740
	else
5741
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5742
					    SKL_DPLL0);
5743
 
5744
	I915_WRITE(DPLL_CTRL1, val);
5745
	POSTING_READ(DPLL_CTRL1);
5746
 
5747
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5748
 
5749
	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5750
		DRM_ERROR("DPLL0 not locked\n");
5751
}
5752
 
5753
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5754
{
5755
	int ret;
5756
	u32 val;
5757
 
5758
	/* inform PCU we want to change CDCLK */
5759
	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5760
	mutex_lock(&dev_priv->rps.hw_lock);
5761
	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5762
	mutex_unlock(&dev_priv->rps.hw_lock);
5763
 
5764
	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5765
}
5766
 
5767
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5768
{
5769
	unsigned int i;
5770
 
5771
	for (i = 0; i < 15; i++) {
5772
		if (skl_cdclk_pcu_ready(dev_priv))
5773
			return true;
5774
		udelay(10);
5775
	}
5776
 
5777
	return false;
5778
}
5779
 
5780
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5781
{
5782
	struct drm_device *dev = dev_priv->dev;
5783
	u32 freq_select, pcu_ack;
5784
 
5785
	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5786
 
5787
	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5788
		DRM_ERROR("failed to inform PCU about cdclk change\n");
5789
		return;
5790
	}
5791
 
5792
	/* set CDCLK_CTL */
5793
	switch(freq) {
5794
	case 450000:
5795
	case 432000:
5796
		freq_select = CDCLK_FREQ_450_432;
5797
		pcu_ack = 1;
5798
		break;
5799
	case 540000:
5800
		freq_select = CDCLK_FREQ_540;
5801
		pcu_ack = 2;
5802
		break;
5803
	case 308570:
5804
	case 337500:
5805
	default:
5806
		freq_select = CDCLK_FREQ_337_308;
5807
		pcu_ack = 0;
5808
		break;
5809
	case 617140:
5810
	case 675000:
5811
		freq_select = CDCLK_FREQ_675_617;
5812
		pcu_ack = 3;
5813
		break;
5814
	}
5815
 
5816
	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5817
	POSTING_READ(CDCLK_CTL);
5818
 
5819
	/* inform PCU of the change */
5820
	mutex_lock(&dev_priv->rps.hw_lock);
5821
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5822
	mutex_unlock(&dev_priv->rps.hw_lock);
5823
 
5824
	intel_update_cdclk(dev);
5825
}
5826
 
5827
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5828
{
5829
	/* disable DBUF power */
5830
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5831
	POSTING_READ(DBUF_CTL);
5832
 
5833
	udelay(10);
5834
 
5835
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5836
		DRM_ERROR("DBuf power disable timeout\n");
5837
 
5838
		/* disable DPLL0 */
6937 serge 5839
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
6084 serge 5840
		if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5841
			DRM_ERROR("Couldn't disable DPLL0\n");
5842
	}
5843
 
5844
void skl_init_cdclk(struct drm_i915_private *dev_priv)
5845
{
5846
	unsigned int required_vco;
5847
 
5848
	/* DPLL0 not enabled (happens on early BIOS versions) */
5849
	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5850
		/* enable DPLL0 */
5851
		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5852
		skl_dpll0_enable(dev_priv, required_vco);
5853
	}
5854
 
5855
	/* set CDCLK to the frequency the BIOS chose */
5856
	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5857
 
5858
	/* enable DBUF power */
5859
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5860
	POSTING_READ(DBUF_CTL);
5861
 
5862
	udelay(10);
5863
 
5864
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5865
		DRM_ERROR("DBuf power enable timeout\n");
5866
}
5867
 
6937 serge 5868
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5869
{
5870
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5871
	uint32_t cdctl = I915_READ(CDCLK_CTL);
5872
	int freq = dev_priv->skl_boot_cdclk;
5873
 
5874
	/*
5875
	 * check if the pre-os intialized the display
5876
	 * There is SWF18 scratchpad register defined which is set by the
5877
	 * pre-os which can be used by the OS drivers to check the status
5878
	 */
5879
	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5880
		goto sanitize;
5881
 
5882
	/* Is PLL enabled and locked ? */
5883
	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5884
		goto sanitize;
5885
 
5886
	/* DPLL okay; verify the cdclock
5887
	 *
5888
	 * Noticed in some instances that the freq selection is correct but
5889
	 * decimal part is programmed wrong from BIOS where pre-os does not
5890
	 * enable display. Verify the same as well.
5891
	 */
5892
	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5893
		/* All well; nothing to sanitize */
5894
		return false;
5895
sanitize:
5896
	/*
5897
	 * As of now initialize with max cdclk till
5898
	 * we get dynamic cdclk support
5899
	 * */
5900
	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5901
	skl_init_cdclk(dev_priv);
5902
 
5903
	/* we did have to sanitize */
5904
	return true;
5905
}
5906
 
4560 Serge 5907
/* Adjust CDclk dividers to allow high res or save power if possible */
5908
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5909
{
5910
	struct drm_i915_private *dev_priv = dev->dev_private;
5911
	u32 val, cmd;
5912
 
6084 serge 5913
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5914
					!= dev_priv->cdclk_freq);
5060 serge 5915
 
5916
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4560 Serge 5917
		cmd = 2;
5060 serge 5918
	else if (cdclk == 266667)
4560 Serge 5919
		cmd = 1;
5920
	else
5921
		cmd = 0;
5922
 
5923
	mutex_lock(&dev_priv->rps.hw_lock);
5924
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5925
	val &= ~DSPFREQGUAR_MASK;
5926
	val |= (cmd << DSPFREQGUAR_SHIFT);
5927
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5928
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5929
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5930
		     50)) {
5931
		DRM_ERROR("timed out waiting for CDclk change\n");
5932
	}
5933
	mutex_unlock(&dev_priv->rps.hw_lock);
5934
 
6084 serge 5935
	mutex_lock(&dev_priv->sb_lock);
5936
 
5060 serge 5937
	if (cdclk == 400000) {
5354 serge 5938
		u32 divider;
4560 Serge 5939
 
5354 serge 5940
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4560 Serge 5941
 
5942
		/* adjust cdclk divider */
5943
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
6084 serge 5944
		val &= ~CCK_FREQUENCY_VALUES;
4560 Serge 5945
		val |= divider;
5946
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5060 serge 5947
 
5948
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
6084 serge 5949
			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5060 serge 5950
			     50))
5951
			DRM_ERROR("timed out waiting for CDclk change\n");
4560 Serge 5952
	}
5953
 
5954
	/* adjust self-refresh exit latency value */
5955
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5956
	val &= ~0x7f;
5957
 
5958
	/*
5959
	 * For high bandwidth configs, we set a higher latency in the bunit
5960
	 * so that the core display fetch happens in time to avoid underruns.
5961
	 */
5060 serge 5962
	if (cdclk == 400000)
4560 Serge 5963
		val |= 4500 / 250; /* 4.5 usec */
5964
	else
5965
		val |= 3000 / 250; /* 3.0 usec */
5966
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5967
 
6084 serge 5968
	mutex_unlock(&dev_priv->sb_lock);
5969
 
5970
	intel_update_cdclk(dev);
4560 Serge 5971
}
5972
 
5354 serge 5973
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5974
{
5975
	struct drm_i915_private *dev_priv = dev->dev_private;
5976
	u32 val, cmd;
5977
 
6084 serge 5978
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5979
						!= dev_priv->cdclk_freq);
5354 serge 5980
 
5981
	switch (cdclk) {
5982
	case 333333:
5983
	case 320000:
5984
	case 266667:
5985
	case 200000:
5986
		break;
5987
	default:
6084 serge 5988
		MISSING_CASE(cdclk);
5354 serge 5989
		return;
5990
	}
5991
 
6084 serge 5992
	/*
5993
	 * Specs are full of misinformation, but testing on actual
5994
	 * hardware has shown that we just need to write the desired
5995
	 * CCK divider into the Punit register.
5996
	 */
5997
	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5998
 
5354 serge 5999
	mutex_lock(&dev_priv->rps.hw_lock);
6000
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6001
	val &= ~DSPFREQGUAR_MASK_CHV;
6002
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
6003
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
6004
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
6005
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
6006
		     50)) {
6007
		DRM_ERROR("timed out waiting for CDclk change\n");
6008
	}
6009
	mutex_unlock(&dev_priv->rps.hw_lock);
6010
 
6084 serge 6011
	intel_update_cdclk(dev);
5354 serge 6012
}
6013
 
4560 Serge 6014
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
6015
				 int max_pixclk)
6016
{
5354 serge 6017
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
6084 serge 6018
	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
4560 Serge 6019
 
6020
	/*
6021
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
6022
	 *   200MHz
6023
	 *   267MHz
5060 serge 6024
	 *   320/333MHz (depends on HPLL freq)
6084 serge 6025
	 *   400MHz (VLV only)
6026
	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
6027
	 * of the lower bin and adjust if needed.
5060 serge 6028
	 *
6029
	 * We seem to get an unstable or solid color picture at 200MHz.
6030
	 * Not sure what's wrong. For now use 200MHz only when all pipes
6031
	 * are off.
4560 Serge 6032
	 */
6084 serge 6033
	if (!IS_CHERRYVIEW(dev_priv) &&
6034
	    max_pixclk > freq_320*limit/100)
5060 serge 6035
		return 400000;
6084 serge 6036
	else if (max_pixclk > 266667*limit/100)
5060 serge 6037
		return freq_320;
6038
	else if (max_pixclk > 0)
6039
		return 266667;
6040
	else
6041
		return 200000;
4560 Serge 6042
}
6043
 
6084 serge 6044
static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6045
			      int max_pixclk)
4560 Serge 6046
{
6084 serge 6047
	/*
6048
	 * FIXME:
6049
	 * - remove the guardband, it's not needed on BXT
6050
	 * - set 19.2MHz bypass frequency if there are no active pipes
6051
	 */
6052
	if (max_pixclk > 576000*9/10)
6053
		return 624000;
6054
	else if (max_pixclk > 384000*9/10)
6055
		return 576000;
6056
	else if (max_pixclk > 288000*9/10)
6057
		return 384000;
6058
	else if (max_pixclk > 144000*9/10)
6059
		return 288000;
6060
	else
6061
		return 144000;
6062
}
6063
 
6064
/* Compute the max pixel clock for new configuration. Uses atomic state if
6065
 * that's non-NULL, look at current state otherwise. */
6066
static int intel_mode_max_pixclk(struct drm_device *dev,
6067
				 struct drm_atomic_state *state)
6068
{
4560 Serge 6069
	struct intel_crtc *intel_crtc;
6084 serge 6070
	struct intel_crtc_state *crtc_state;
4560 Serge 6071
	int max_pixclk = 0;
6072
 
5060 serge 6073
	for_each_intel_crtc(dev, intel_crtc) {
6084 serge 6074
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6075
		if (IS_ERR(crtc_state))
6076
			return PTR_ERR(crtc_state);
6077
 
6078
		if (!crtc_state->base.enable)
6079
			continue;
6080
 
6081
		max_pixclk = max(max_pixclk,
6082
				 crtc_state->base.adjusted_mode.crtc_clock);
4560 Serge 6083
	}
6084
 
6085
	return max_pixclk;
6086
}
6087
 
6084 serge 6088
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
4560 Serge 6089
{
6084 serge 6090
	struct drm_device *dev = state->dev;
4560 Serge 6091
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6092
	int max_pixclk = intel_mode_max_pixclk(dev, state);
4560 Serge 6093
 
6084 serge 6094
	if (max_pixclk < 0)
6095
		return max_pixclk;
4560 Serge 6096
 
6084 serge 6097
	to_intel_atomic_state(state)->cdclk =
6098
		valleyview_calc_cdclk(dev_priv, max_pixclk);
6099
 
6100
	return 0;
4560 Serge 6101
}
6102
 
6084 serge 6103
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
4560 Serge 6104
{
6084 serge 6105
	struct drm_device *dev = state->dev;
4560 Serge 6106
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6107
	int max_pixclk = intel_mode_max_pixclk(dev, state);
4560 Serge 6108
 
6084 serge 6109
	if (max_pixclk < 0)
6110
		return max_pixclk;
5354 serge 6111
 
6084 serge 6112
	to_intel_atomic_state(state)->cdclk =
6113
		broxton_calc_cdclk(dev_priv, max_pixclk);
6114
 
6115
	return 0;
6116
}
6117
 
6118
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6119
{
6120
	unsigned int credits, default_credits;
6121
 
6122
	if (IS_CHERRYVIEW(dev_priv))
6123
		default_credits = PFI_CREDIT(12);
6124
	else
6125
		default_credits = PFI_CREDIT(8);
6126
 
6127
	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6128
		/* CHV suggested value is 31 or 63 */
6129
		if (IS_CHERRYVIEW(dev_priv))
6130
			credits = PFI_CREDIT_63;
5354 serge 6131
		else
6084 serge 6132
			credits = PFI_CREDIT(15);
6133
	} else {
6134
		credits = default_credits;
6135
	}
6136
 
6137
	/*
6138
	 * WA - write default credits before re-programming
6139
	 * FIXME: should we also set the resend bit here?
6140
	 */
6141
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6142
		   default_credits);
6143
 
6144
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6145
		   credits | PFI_CREDIT_RESEND);
6146
 
6147
	/*
6148
	 * FIXME is this guaranteed to clear
6149
	 * immediately or should we poll for it?
6150
	 */
6151
	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6152
}
6153
 
6154
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6155
{
6156
	struct drm_device *dev = old_state->dev;
6157
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
6158
	struct drm_i915_private *dev_priv = dev->dev_private;
6159
 
6160
	/*
6161
	 * FIXME: We can end up here with all power domains off, yet
6162
	 * with a CDCLK frequency other than the minimum. To account
6163
	 * for this take the PIPE-A power domain, which covers the HW
6164
	 * blocks needed for the following programming. This can be
6165
	 * removed once it's guaranteed that we get here either with
6166
	 * the minimum CDCLK set, or the required power domains
6167
	 * enabled.
6168
	 */
6169
	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6170
 
6171
	if (IS_CHERRYVIEW(dev))
6172
		cherryview_set_cdclk(dev, req_cdclk);
6173
	else
4560 Serge 6174
		valleyview_set_cdclk(dev, req_cdclk);
5354 serge 6175
 
6084 serge 6176
	vlv_program_pfi_credits(dev_priv);
6177
 
6178
	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4560 Serge 6179
}
6180
 
4104 Serge 6181
static void valleyview_crtc_enable(struct drm_crtc *crtc)
6182
{
6183
	struct drm_device *dev = crtc->dev;
5354 serge 6184
	struct drm_i915_private *dev_priv = to_i915(dev);
4104 Serge 6185
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6186
	struct intel_encoder *encoder;
6187
	int pipe = intel_crtc->pipe;
6188
 
6084 serge 6189
	if (WARN_ON(intel_crtc->active))
4104 Serge 6190
		return;
6191
 
6084 serge 6192
	if (intel_crtc->config->has_dp_encoder)
6193
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 6194
 
6195
	intel_set_pipe_timings(intel_crtc);
6196
 
5354 serge 6197
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6198
		struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 6199
 
5354 serge 6200
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6201
		I915_WRITE(CHV_CANVAS(pipe), 0);
6202
	}
6203
 
5060 serge 6204
	i9xx_set_pipeconf(intel_crtc);
6205
 
4104 Serge 6206
	intel_crtc->active = true;
6207
 
5354 serge 6208
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 6209
 
4104 Serge 6210
	for_each_encoder_on_crtc(dev, crtc, encoder)
6211
		if (encoder->pre_pll_enable)
6212
			encoder->pre_pll_enable(encoder);
6213
 
6937 serge 6214
	if (!intel_crtc->config->has_dsi_encoder) {
6084 serge 6215
		if (IS_CHERRYVIEW(dev)) {
6216
			chv_prepare_pll(intel_crtc, intel_crtc->config);
6217
			chv_enable_pll(intel_crtc, intel_crtc->config);
6218
		} else {
6219
			vlv_prepare_pll(intel_crtc, intel_crtc->config);
6220
			vlv_enable_pll(intel_crtc, intel_crtc->config);
6221
		}
5060 serge 6222
	}
4104 Serge 6223
 
6224
	for_each_encoder_on_crtc(dev, crtc, encoder)
6225
		if (encoder->pre_enable)
6226
			encoder->pre_enable(encoder);
6227
 
6228
	i9xx_pfit_enable(intel_crtc);
6229
 
6230
	intel_crtc_load_lut(crtc);
6231
 
6937 serge 6232
	intel_update_watermarks(crtc);
5060 serge 6233
	intel_enable_pipe(intel_crtc);
4104 Serge 6234
 
5354 serge 6235
	assert_vblank_disabled(crtc);
6236
	drm_crtc_vblank_on(crtc);
6237
 
6084 serge 6238
	for_each_encoder_on_crtc(dev, crtc, encoder)
6239
		encoder->enable(encoder);
4104 Serge 6240
}
6241
 
5060 serge 6242
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6243
{
6244
	struct drm_device *dev = crtc->base.dev;
6245
	struct drm_i915_private *dev_priv = dev->dev_private;
6246
 
6084 serge 6247
	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6248
	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
5060 serge 6249
}
6250
 
2327 Serge 6251
static void i9xx_crtc_enable(struct drm_crtc *crtc)
6252
{
6084 serge 6253
	struct drm_device *dev = crtc->dev;
5354 serge 6254
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 6255
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6256
	struct intel_encoder *encoder;
6084 serge 6257
	int pipe = intel_crtc->pipe;
2327 Serge 6258
 
6084 serge 6259
	if (WARN_ON(intel_crtc->active))
6260
		return;
3031 serge 6261
 
5060 serge 6262
	i9xx_set_pll_dividers(intel_crtc);
6263
 
6084 serge 6264
	if (intel_crtc->config->has_dp_encoder)
6265
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 6266
 
6267
	intel_set_pipe_timings(intel_crtc);
6268
 
6269
	i9xx_set_pipeconf(intel_crtc);
6270
 
6084 serge 6271
	intel_crtc->active = true;
2327 Serge 6272
 
5060 serge 6273
	if (!IS_GEN2(dev))
5354 serge 6274
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 6275
 
3480 Serge 6276
	for_each_encoder_on_crtc(dev, crtc, encoder)
6277
		if (encoder->pre_enable)
6278
			encoder->pre_enable(encoder);
6279
 
4104 Serge 6280
	i9xx_enable_pll(intel_crtc);
6281
 
6282
	i9xx_pfit_enable(intel_crtc);
6283
 
6284
	intel_crtc_load_lut(crtc);
6285
 
4560 Serge 6286
	intel_update_watermarks(crtc);
5060 serge 6287
	intel_enable_pipe(intel_crtc);
2327 Serge 6288
 
5354 serge 6289
	assert_vblank_disabled(crtc);
6290
	drm_crtc_vblank_on(crtc);
6291
 
6084 serge 6292
	for_each_encoder_on_crtc(dev, crtc, encoder)
6293
		encoder->enable(encoder);
6937 serge 6294
 
6295
	intel_fbc_enable(intel_crtc);
2327 Serge 6296
}
6297
 
3746 Serge 6298
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6299
{
6300
	struct drm_device *dev = crtc->base.dev;
6301
	struct drm_i915_private *dev_priv = dev->dev_private;
6302
 
6084 serge 6303
	if (!crtc->config->gmch_pfit.control)
4104 Serge 6304
		return;
6305
 
3746 Serge 6306
	assert_pipe_disabled(dev_priv, crtc->pipe);
6307
 
4104 Serge 6308
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6309
			 I915_READ(PFIT_CONTROL));
6084 serge 6310
	I915_WRITE(PFIT_CONTROL, 0);
3746 Serge 6311
}
6312
 
2327 Serge 6313
static void i9xx_crtc_disable(struct drm_crtc *crtc)
6314
{
6084 serge 6315
	struct drm_device *dev = crtc->dev;
6316
	struct drm_i915_private *dev_priv = dev->dev_private;
6317
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6318
	struct intel_encoder *encoder;
6084 serge 6319
	int pipe = intel_crtc->pipe;
2327 Serge 6320
 
5060 serge 6321
	/*
6322
	 * On gen2 planes are double buffered but the pipe isn't, so we must
6323
	 * wait for planes to fully turn off before disabling the pipe.
6324
	 * We also need to wait on all gmch platforms because of the
6325
	 * self-refresh mode constraint explained above.
6326
	 */
6084 serge 6327
	intel_wait_for_vblank(dev, pipe);
2327 Serge 6328
 
6084 serge 6329
	for_each_encoder_on_crtc(dev, crtc, encoder)
6330
		encoder->disable(encoder);
6331
 
5354 serge 6332
	drm_crtc_vblank_off(crtc);
6333
	assert_vblank_disabled(crtc);
3480 Serge 6334
 
5354 serge 6335
	intel_disable_pipe(intel_crtc);
6336
 
3746 Serge 6337
	i9xx_pfit_disable(intel_crtc);
3480 Serge 6338
 
4104 Serge 6339
	for_each_encoder_on_crtc(dev, crtc, encoder)
6340
		if (encoder->post_disable)
6341
			encoder->post_disable(encoder);
2327 Serge 6342
 
6937 serge 6343
	if (!intel_crtc->config->has_dsi_encoder) {
5060 serge 6344
		if (IS_CHERRYVIEW(dev))
6345
			chv_disable_pll(dev_priv, pipe);
6346
		else if (IS_VALLEYVIEW(dev))
6084 serge 6347
			vlv_disable_pll(dev_priv, pipe);
5060 serge 6348
		else
5354 serge 6349
			i9xx_disable_pll(intel_crtc);
5060 serge 6350
	}
4104 Serge 6351
 
6084 serge 6352
	for_each_encoder_on_crtc(dev, crtc, encoder)
6353
		if (encoder->post_pll_disable)
6354
			encoder->post_pll_disable(encoder);
6355
 
5060 serge 6356
	if (!IS_GEN2(dev))
5354 serge 6357
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6937 serge 6358
 
6359
	intel_fbc_disable_crtc(intel_crtc);
2327 Serge 6360
}
6361
 
6084 serge 6362
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
2327 Serge 6363
{
5060 serge 6364
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 6365
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5060 serge 6366
	enum intel_display_power_domain domain;
6367
	unsigned long domains;
6368
 
6084 serge 6369
	if (!intel_crtc->active)
6370
		return;
5060 serge 6371
 
6084 serge 6372
	if (to_intel_plane_state(crtc->primary->state)->visible) {
6937 serge 6373
		WARN_ON(intel_crtc->unpin_work);
6374
 
6084 serge 6375
		intel_pre_disable_primary(crtc);
5060 serge 6376
 
6084 serge 6377
		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6378
		to_intel_plane_state(crtc->primary->state)->visible = false;
5060 serge 6379
	}
6084 serge 6380
 
6381
	dev_priv->display.crtc_disable(crtc);
6382
	intel_crtc->active = false;
6383
	intel_update_watermarks(crtc);
6384
	intel_disable_shared_dpll(intel_crtc);
6385
 
6386
	domains = intel_crtc->enabled_power_domains;
6387
	for_each_power_domain(domain, domains)
6388
		intel_display_power_put(dev_priv, domain);
6389
	intel_crtc->enabled_power_domains = 0;
2330 Serge 6390
}
2327 Serge 6391
 
6084 serge 6392
/*
6393
 * turn all crtc's off, but do not adjust state
6394
 * This has to be paired with a call to intel_modeset_setup_hw_state.
3031 serge 6395
 */
6084 serge 6396
int intel_display_suspend(struct drm_device *dev)
3031 serge 6397
{
6084 serge 6398
	struct drm_mode_config *config = &dev->mode_config;
6399
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6400
	struct drm_atomic_state *state;
6401
	struct drm_crtc *crtc;
6402
	unsigned crtc_mask = 0;
6403
	int ret = 0;
3031 serge 6404
 
6084 serge 6405
	if (WARN_ON(!ctx))
6406
		return 0;
3031 serge 6407
 
6084 serge 6408
	lockdep_assert_held(&ctx->ww_ctx);
6409
	state = drm_atomic_state_alloc(dev);
6410
	if (WARN_ON(!state))
6411
		return -ENOMEM;
3031 serge 6412
 
6084 serge 6413
	state->acquire_ctx = ctx;
6414
	state->allow_modeset = true;
2327 Serge 6415
 
6084 serge 6416
	for_each_crtc(dev, crtc) {
6417
		struct drm_crtc_state *crtc_state =
6418
			drm_atomic_get_crtc_state(state, crtc);
2327 Serge 6419
 
6084 serge 6420
		ret = PTR_ERR_OR_ZERO(crtc_state);
6421
		if (ret)
6422
			goto free;
3031 serge 6423
 
6084 serge 6424
		if (!crtc_state->active)
6425
			continue;
6426
 
6427
		crtc_state->active = false;
6428
		crtc_mask |= 1 << drm_crtc_index(crtc);
4280 Serge 6429
	}
3031 serge 6430
 
6084 serge 6431
	if (crtc_mask) {
6432
		ret = drm_atomic_commit(state);
3031 serge 6433
 
6084 serge 6434
		if (!ret) {
6435
			for_each_crtc(dev, crtc)
6436
				if (crtc_mask & (1 << drm_crtc_index(crtc)))
6437
					crtc->state->active = true;
3031 serge 6438
 
6084 serge 6439
			return ret;
6440
		}
2330 Serge 6441
	}
6084 serge 6442
 
6443
free:
6444
	if (ret)
6445
		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6446
	drm_atomic_state_free(state);
6447
	return ret;
2330 Serge 6448
}
2327 Serge 6449
 
3031 serge 6450
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 6451
{
3031 serge 6452
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6453
 
6454
	drm_encoder_cleanup(encoder);
6455
	kfree(intel_encoder);
2330 Serge 6456
}
2327 Serge 6457
 
3031 serge 6458
/* Cross check the actual hw state with our own modeset state tracking (and it's
6459
 * internal consistency). */
6460
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 6461
{
6084 serge 6462
	struct drm_crtc *crtc = connector->base.state->crtc;
6463
 
6464
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6465
		      connector->base.base.id,
6466
		      connector->base.name);
6467
 
3031 serge 6468
	if (connector->get_hw_state(connector)) {
6469
		struct intel_encoder *encoder = connector->encoder;
6084 serge 6470
		struct drm_connector_state *conn_state = connector->base.state;
3031 serge 6471
 
6084 serge 6472
		I915_STATE_WARN(!crtc,
6473
			 "connector enabled without attached crtc\n");
3031 serge 6474
 
6084 serge 6475
		if (!crtc)
5060 serge 6476
			return;
6477
 
6084 serge 6478
		I915_STATE_WARN(!crtc->state->active,
6479
		      "connector is active, but attached crtc isn't\n");
5060 serge 6480
 
6084 serge 6481
		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3031 serge 6482
			return;
6483
 
6084 serge 6484
		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6485
			"atomic encoder doesn't match attached encoder\n");
3031 serge 6486
 
6084 serge 6487
		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6488
			"attached encoder crtc differs from connector crtc\n");
6489
	} else {
6490
		I915_STATE_WARN(crtc && crtc->state->active,
6491
			"attached crtc is active, but connector isn't\n");
6492
		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6493
			"best encoder set without crtc!\n");
3031 serge 6494
	}
2330 Serge 6495
}
2327 Serge 6496
 
6084 serge 6497
int intel_connector_init(struct intel_connector *connector)
2330 Serge 6498
{
6937 serge 6499
	drm_atomic_helper_connector_reset(&connector->base);
2342 Serge 6500
 
6937 serge 6501
	if (!connector->base.state)
6084 serge 6502
		return -ENOMEM;
3031 serge 6503
 
6084 serge 6504
	return 0;
6505
}
3031 serge 6506
 
6084 serge 6507
struct intel_connector *intel_connector_alloc(void)
6508
{
6509
	struct intel_connector *connector;
3031 serge 6510
 
6084 serge 6511
	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6512
	if (!connector)
6513
		return NULL;
6514
 
6515
	if (intel_connector_init(connector) < 0) {
6516
		kfree(connector);
6517
		return NULL;
6518
	}
6519
 
6520
	return connector;
2330 Serge 6521
}
2327 Serge 6522
 
3031 serge 6523
/* Simple connector->get_hw_state implementation for encoders that support only
6524
 * one connector and no cloning and hence the encoder state determines the state
6525
 * of the connector. */
6526
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 6527
{
3031 serge 6528
	enum pipe pipe = 0;
6529
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 6530
 
3031 serge 6531
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 6532
}
6533
 
6084 serge 6534
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
4104 Serge 6535
{
6084 serge 6536
	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6537
		return crtc_state->fdi_lanes;
4104 Serge 6538
 
6084 serge 6539
	return 0;
6540
}
6541
 
6542
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6543
				     struct intel_crtc_state *pipe_config)
6544
{
6545
	struct drm_atomic_state *state = pipe_config->base.state;
6546
	struct intel_crtc *other_crtc;
6547
	struct intel_crtc_state *other_crtc_state;
6548
 
4104 Serge 6549
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6550
		      pipe_name(pipe), pipe_config->fdi_lanes);
6551
	if (pipe_config->fdi_lanes > 4) {
6552
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6553
			      pipe_name(pipe), pipe_config->fdi_lanes);
6084 serge 6554
		return -EINVAL;
4104 Serge 6555
	}
6556
 
4560 Serge 6557
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 6558
		if (pipe_config->fdi_lanes > 2) {
6559
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6560
				      pipe_config->fdi_lanes);
6084 serge 6561
			return -EINVAL;
4104 Serge 6562
		} else {
6084 serge 6563
			return 0;
4104 Serge 6564
		}
6565
	}
6566
 
6567
	if (INTEL_INFO(dev)->num_pipes == 2)
6084 serge 6568
		return 0;
4104 Serge 6569
 
6570
	/* Ivybridge 3 pipe is really complicated */
6571
	switch (pipe) {
6572
	case PIPE_A:
6084 serge 6573
		return 0;
4104 Serge 6574
	case PIPE_B:
6084 serge 6575
		if (pipe_config->fdi_lanes <= 2)
6576
			return 0;
6577
 
6578
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6579
		other_crtc_state =
6580
			intel_atomic_get_crtc_state(state, other_crtc);
6581
		if (IS_ERR(other_crtc_state))
6582
			return PTR_ERR(other_crtc_state);
6583
 
6584
		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
4104 Serge 6585
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6586
				      pipe_name(pipe), pipe_config->fdi_lanes);
6084 serge 6587
			return -EINVAL;
4104 Serge 6588
		}
6084 serge 6589
		return 0;
4104 Serge 6590
	case PIPE_C:
6084 serge 6591
		if (pipe_config->fdi_lanes > 2) {
6592
			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6593
				      pipe_name(pipe), pipe_config->fdi_lanes);
6594
			return -EINVAL;
6595
		}
6596
 
6597
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6598
		other_crtc_state =
6599
			intel_atomic_get_crtc_state(state, other_crtc);
6600
		if (IS_ERR(other_crtc_state))
6601
			return PTR_ERR(other_crtc_state);
6602
 
6603
		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
4104 Serge 6604
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6084 serge 6605
			return -EINVAL;
4104 Serge 6606
		}
6084 serge 6607
		return 0;
4104 Serge 6608
	default:
6609
		BUG();
6610
	}
6611
}
6612
 
6613
#define RETRY 1
6614
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6084 serge 6615
				       struct intel_crtc_state *pipe_config)
2330 Serge 6616
{
4104 Serge 6617
	struct drm_device *dev = intel_crtc->base.dev;
6084 serge 6618
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6619
	int lane, link_bw, fdi_dotclock, ret;
6620
	bool needs_recompute = false;
2330 Serge 6621
 
4104 Serge 6622
retry:
6623
	/* FDI is a binary signal running at ~2.7GHz, encoding
6624
	 * each output octet as 10 bits. The actual frequency
6625
	 * is stored as a divider into a 100MHz clock, and the
6626
	 * mode pixel clock is stored in units of 1KHz.
6627
	 * Hence the bw of each lane in terms of the mode signal
6628
	 * is:
6629
	 */
6630
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6631
 
4560 Serge 6632
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 6633
 
6634
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6635
					   pipe_config->pipe_bpp);
6636
 
6637
	pipe_config->fdi_lanes = lane;
6638
 
6639
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6640
			       link_bw, &pipe_config->fdi_m_n);
6641
 
6084 serge 6642
	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6643
				       intel_crtc->pipe, pipe_config);
6644
	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
4104 Serge 6645
		pipe_config->pipe_bpp -= 2*3;
6646
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6647
			      pipe_config->pipe_bpp);
6648
		needs_recompute = true;
6649
		pipe_config->bw_constrained = true;
6650
 
6651
		goto retry;
6652
	}
6653
 
6654
	if (needs_recompute)
6655
		return RETRY;
6656
 
6084 serge 6657
	return ret;
4104 Serge 6658
}
6659
 
6084 serge 6660
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6661
				     struct intel_crtc_state *pipe_config)
6662
{
6663
	if (pipe_config->pipe_bpp > 24)
6664
		return false;
6665
 
6666
	/* HSW can handle pixel rate up to cdclk? */
6667
	if (IS_HASWELL(dev_priv->dev))
6668
		return true;
6669
 
6670
	/*
6671
	 * We compare against max which means we must take
6672
	 * the increased cdclk requirement into account when
6673
	 * calculating the new cdclk.
6674
	 *
6675
	 * Should measure whether using a lower cdclk w/o IPS
6676
	 */
6677
	return ilk_pipe_pixel_rate(pipe_config) <=
6678
		dev_priv->max_cdclk_freq * 95 / 100;
6679
}
6680
 
4104 Serge 6681
static void hsw_compute_ips_config(struct intel_crtc *crtc,
6084 serge 6682
				   struct intel_crtc_state *pipe_config)
4104 Serge 6683
{
6084 serge 6684
	struct drm_device *dev = crtc->base.dev;
6685
	struct drm_i915_private *dev_priv = dev->dev_private;
6686
 
5060 serge 6687
	pipe_config->ips_enabled = i915.enable_ips &&
6084 serge 6688
		hsw_crtc_supports_ips(crtc) &&
6689
		pipe_config_supports_ips(dev_priv, pipe_config);
4104 Serge 6690
}
6691
 
6937 serge 6692
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6693
{
6694
	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6695
 
6696
	/* GDG double wide on either pipe, otherwise pipe A only */
6697
	return INTEL_INFO(dev_priv)->gen < 4 &&
6698
		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6699
}
6700
 
4104 Serge 6701
static int intel_crtc_compute_config(struct intel_crtc *crtc,
6084 serge 6702
				     struct intel_crtc_state *pipe_config)
4104 Serge 6703
{
6704
	struct drm_device *dev = crtc->base.dev;
5354 serge 6705
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6706
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
4104 Serge 6707
 
4560 Serge 6708
	/* FIXME should check pixel clock limits on all platforms */
6709
	if (INTEL_INFO(dev)->gen < 4) {
6937 serge 6710
		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4560 Serge 6711
 
6712
		/*
6937 serge 6713
		 * Enable double wide mode when the dot clock
4560 Serge 6714
		 * is > 90% of the (display) core speed.
6715
		 */
6937 serge 6716
		if (intel_crtc_supports_double_wide(crtc) &&
6717
		    adjusted_mode->crtc_clock > clock_limit) {
4560 Serge 6718
			clock_limit *= 2;
6719
			pipe_config->double_wide = true;
6720
		}
6721
 
6937 serge 6722
		if (adjusted_mode->crtc_clock > clock_limit) {
6723
			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6724
				      adjusted_mode->crtc_clock, clock_limit,
6725
				      yesno(pipe_config->double_wide));
4104 Serge 6726
			return -EINVAL;
2330 Serge 6727
	}
6937 serge 6728
	}
2330 Serge 6729
 
4560 Serge 6730
	/*
6731
	 * Pipe horizontal size must be even in:
6732
	 * - DVO ganged mode
6733
	 * - LVDS dual channel mode
6734
	 * - Double wide pipe
6735
	 */
6084 serge 6736
	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4560 Serge 6737
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6738
		pipe_config->pipe_src_w &= ~1;
6739
 
4104 Serge 6740
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6741
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 6742
	 */
6743
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6084 serge 6744
		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
4104 Serge 6745
		return -EINVAL;
3031 serge 6746
 
4104 Serge 6747
	if (HAS_IPS(dev))
6748
		hsw_compute_ips_config(crtc, pipe_config);
6749
 
6750
	if (pipe_config->has_pch_encoder)
6751
		return ironlake_fdi_compute_config(crtc, pipe_config);
6752
 
6753
	return 0;
2330 Serge 6754
}
6755
 
6084 serge 6756
static int skylake_get_display_clock_speed(struct drm_device *dev)
3031 serge 6757
{
6084 serge 6758
	struct drm_i915_private *dev_priv = to_i915(dev);
6759
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6760
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6761
	uint32_t linkrate;
5060 serge 6762
 
6084 serge 6763
	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6764
		return 24000; /* 24MHz is the cd freq with NSSC ref */
5354 serge 6765
 
6084 serge 6766
	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6767
		return 540000;
5354 serge 6768
 
6084 serge 6769
	linkrate = (I915_READ(DPLL_CTRL1) &
6770
		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
5060 serge 6771
 
6084 serge 6772
	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6773
	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6774
		/* vco 8640 */
6775
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6776
		case CDCLK_FREQ_450_432:
6777
			return 432000;
6778
		case CDCLK_FREQ_337_308:
6779
			return 308570;
6780
		case CDCLK_FREQ_675_617:
6781
			return 617140;
6782
		default:
6783
			WARN(1, "Unknown cd freq selection\n");
6784
		}
6785
	} else {
6786
		/* vco 8100 */
6787
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6788
		case CDCLK_FREQ_450_432:
6789
			return 450000;
6790
		case CDCLK_FREQ_337_308:
6791
			return 337500;
6792
		case CDCLK_FREQ_675_617:
6793
			return 675000;
6794
		default:
6795
			WARN(1, "Unknown cd freq selection\n");
6796
		}
6797
	}
5060 serge 6798
 
6084 serge 6799
	/* error case, do as if DPLL0 isn't enabled */
6800
	return 24000;
6801
}
5060 serge 6802
 
6084 serge 6803
static int broxton_get_display_clock_speed(struct drm_device *dev)
6804
{
6805
	struct drm_i915_private *dev_priv = to_i915(dev);
6806
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6807
	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6808
	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6809
	int cdclk;
6810
 
6811
	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6812
		return 19200;
6813
 
6814
	cdclk = 19200 * pll_ratio / 2;
6815
 
6816
	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6817
	case BXT_CDCLK_CD2X_DIV_SEL_1:
6818
		return cdclk;  /* 576MHz or 624MHz */
6819
	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6820
		return cdclk * 2 / 3; /* 384MHz */
6821
	case BXT_CDCLK_CD2X_DIV_SEL_2:
6822
		return cdclk / 2; /* 288MHz */
6823
	case BXT_CDCLK_CD2X_DIV_SEL_4:
6824
		return cdclk / 4; /* 144MHz */
6825
	}
6826
 
6827
	/* error case, do as if DE PLL isn't enabled */
6828
	return 19200;
3031 serge 6829
}
6830
 
6084 serge 6831
static int broadwell_get_display_clock_speed(struct drm_device *dev)
6832
{
6833
	struct drm_i915_private *dev_priv = dev->dev_private;
6834
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6835
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6836
 
6837
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6838
		return 800000;
6839
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6840
		return 450000;
6841
	else if (freq == LCPLL_CLK_FREQ_450)
6842
		return 450000;
6843
	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6844
		return 540000;
6845
	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6846
		return 337500;
6847
	else
6848
		return 675000;
6849
}
6850
 
6851
static int haswell_get_display_clock_speed(struct drm_device *dev)
6852
{
6853
	struct drm_i915_private *dev_priv = dev->dev_private;
6854
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6855
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6856
 
6857
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6858
		return 800000;
6859
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6860
		return 450000;
6861
	else if (freq == LCPLL_CLK_FREQ_450)
6862
		return 450000;
6863
	else if (IS_HSW_ULT(dev))
6864
		return 337500;
6865
	else
6866
		return 540000;
6867
}
6868
 
6869
static int valleyview_get_display_clock_speed(struct drm_device *dev)
6870
{
6871
	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6872
				      CCK_DISPLAY_CLOCK_CONTROL);
6873
}
6874
 
6875
static int ilk_get_display_clock_speed(struct drm_device *dev)
6876
{
6877
	return 450000;
6878
}
6879
 
2327 Serge 6880
static int i945_get_display_clock_speed(struct drm_device *dev)
6881
{
6882
	return 400000;
6883
}
6884
 
6885
static int i915_get_display_clock_speed(struct drm_device *dev)
6886
{
6084 serge 6887
	return 333333;
2327 Serge 6888
}
6889
 
6890
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6891
{
6892
	return 200000;
6893
}
6894
 
4104 Serge 6895
static int pnv_get_display_clock_speed(struct drm_device *dev)
6896
{
6897
	u16 gcfgc = 0;
6898
 
6899
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6900
 
6901
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6902
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6084 serge 6903
		return 266667;
4104 Serge 6904
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6084 serge 6905
		return 333333;
4104 Serge 6906
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6084 serge 6907
		return 444444;
4104 Serge 6908
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6909
		return 200000;
6910
	default:
6911
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6912
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6084 serge 6913
		return 133333;
4104 Serge 6914
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6084 serge 6915
		return 166667;
4104 Serge 6916
	}
6917
}
6918
 
2327 Serge 6919
static int i915gm_get_display_clock_speed(struct drm_device *dev)
6920
{
6921
	u16 gcfgc = 0;
6922
 
6923
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6924
 
6925
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6084 serge 6926
		return 133333;
2327 Serge 6927
	else {
6928
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6929
		case GC_DISPLAY_CLOCK_333_MHZ:
6084 serge 6930
			return 333333;
2327 Serge 6931
		default:
6932
		case GC_DISPLAY_CLOCK_190_200_MHZ:
6933
			return 190000;
6934
		}
6935
	}
6936
}
6937
 
6938
static int i865_get_display_clock_speed(struct drm_device *dev)
6939
{
6084 serge 6940
	return 266667;
2327 Serge 6941
}
6942
 
6084 serge 6943
static int i85x_get_display_clock_speed(struct drm_device *dev)
2327 Serge 6944
{
6945
	u16 hpllcc = 0;
6084 serge 6946
 
6947
	/*
6948
	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6949
	 * encoding is different :(
6950
	 * FIXME is this the right way to detect 852GM/852GMV?
6951
	 */
6952
	if (dev->pdev->revision == 0x1)
6953
		return 133333;
6954
 
6955
//   pci_bus_read_config_word(dev->pdev->bus,
6956
//                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6957
 
2327 Serge 6958
	/* Assume that the hardware is in the high speed state.  This
6959
	 * should be the default.
6960
	 */
6961
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6962
	case GC_CLOCK_133_200:
6084 serge 6963
	case GC_CLOCK_133_200_2:
2327 Serge 6964
	case GC_CLOCK_100_200:
6965
		return 200000;
6966
	case GC_CLOCK_166_250:
6967
		return 250000;
6968
	case GC_CLOCK_100_133:
6084 serge 6969
		return 133333;
6970
	case GC_CLOCK_133_266:
6971
	case GC_CLOCK_133_266_2:
6972
	case GC_CLOCK_166_266:
6973
		return 266667;
2327 Serge 6974
	}
6975
 
6976
	/* Shouldn't happen */
6977
	return 0;
6978
}
6979
 
6980
static int i830_get_display_clock_speed(struct drm_device *dev)
6981
{
6084 serge 6982
	return 133333;
2327 Serge 6983
}
6984
 
6084 serge 6985
static unsigned int intel_hpll_vco(struct drm_device *dev)
6986
{
6987
	struct drm_i915_private *dev_priv = dev->dev_private;
6988
	static const unsigned int blb_vco[8] = {
6989
		[0] = 3200000,
6990
		[1] = 4000000,
6991
		[2] = 5333333,
6992
		[3] = 4800000,
6993
		[4] = 6400000,
6994
	};
6995
	static const unsigned int pnv_vco[8] = {
6996
		[0] = 3200000,
6997
		[1] = 4000000,
6998
		[2] = 5333333,
6999
		[3] = 4800000,
7000
		[4] = 2666667,
7001
	};
7002
	static const unsigned int cl_vco[8] = {
7003
		[0] = 3200000,
7004
		[1] = 4000000,
7005
		[2] = 5333333,
7006
		[3] = 6400000,
7007
		[4] = 3333333,
7008
		[5] = 3566667,
7009
		[6] = 4266667,
7010
	};
7011
	static const unsigned int elk_vco[8] = {
7012
		[0] = 3200000,
7013
		[1] = 4000000,
7014
		[2] = 5333333,
7015
		[3] = 4800000,
7016
	};
7017
	static const unsigned int ctg_vco[8] = {
7018
		[0] = 3200000,
7019
		[1] = 4000000,
7020
		[2] = 5333333,
7021
		[3] = 6400000,
7022
		[4] = 2666667,
7023
		[5] = 4266667,
7024
	};
7025
	const unsigned int *vco_table;
7026
	unsigned int vco;
7027
	uint8_t tmp = 0;
7028
 
7029
	/* FIXME other chipsets? */
7030
	if (IS_GM45(dev))
7031
		vco_table = ctg_vco;
7032
	else if (IS_G4X(dev))
7033
		vco_table = elk_vco;
7034
	else if (IS_CRESTLINE(dev))
7035
		vco_table = cl_vco;
7036
	else if (IS_PINEVIEW(dev))
7037
		vco_table = pnv_vco;
7038
	else if (IS_G33(dev))
7039
		vco_table = blb_vco;
7040
	else
7041
		return 0;
7042
 
7043
	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
7044
 
7045
	vco = vco_table[tmp & 0x7];
7046
	if (vco == 0)
7047
		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7048
	else
7049
		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7050
 
7051
	return vco;
7052
}
7053
 
7054
static int gm45_get_display_clock_speed(struct drm_device *dev)
7055
{
7056
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7057
	uint16_t tmp = 0;
7058
 
7059
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7060
 
7061
	cdclk_sel = (tmp >> 12) & 0x1;
7062
 
7063
	switch (vco) {
7064
	case 2666667:
7065
	case 4000000:
7066
	case 5333333:
7067
		return cdclk_sel ? 333333 : 222222;
7068
	case 3200000:
7069
		return cdclk_sel ? 320000 : 228571;
7070
	default:
7071
		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7072
		return 222222;
7073
	}
7074
}
7075
 
7076
static int i965gm_get_display_clock_speed(struct drm_device *dev)
7077
{
7078
	static const uint8_t div_3200[] = { 16, 10,  8 };
7079
	static const uint8_t div_4000[] = { 20, 12, 10 };
7080
	static const uint8_t div_5333[] = { 24, 16, 14 };
7081
	const uint8_t *div_table;
7082
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7083
	uint16_t tmp = 0;
7084
 
7085
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7086
 
7087
	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7088
 
7089
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7090
		goto fail;
7091
 
7092
	switch (vco) {
7093
	case 3200000:
7094
		div_table = div_3200;
7095
		break;
7096
	case 4000000:
7097
		div_table = div_4000;
7098
		break;
7099
	case 5333333:
7100
		div_table = div_5333;
7101
		break;
7102
	default:
7103
		goto fail;
7104
	}
7105
 
7106
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7107
 
7108
fail:
7109
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7110
	return 200000;
7111
}
7112
 
7113
static int g33_get_display_clock_speed(struct drm_device *dev)
7114
{
7115
	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7116
	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7117
	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7118
	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7119
	const uint8_t *div_table;
7120
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7121
	uint16_t tmp = 0;
7122
 
7123
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7124
 
7125
	cdclk_sel = (tmp >> 4) & 0x7;
7126
 
7127
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7128
		goto fail;
7129
 
7130
	switch (vco) {
7131
	case 3200000:
7132
		div_table = div_3200;
7133
		break;
7134
	case 4000000:
7135
		div_table = div_4000;
7136
		break;
7137
	case 4800000:
7138
		div_table = div_4800;
7139
		break;
7140
	case 5333333:
7141
		div_table = div_5333;
7142
		break;
7143
	default:
7144
		goto fail;
7145
	}
7146
 
7147
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7148
 
7149
fail:
7150
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7151
	return 190476;
7152
}
7153
 
2327 Serge 7154
static void
3746 Serge 7155
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 7156
{
3746 Serge 7157
	while (*num > DATA_LINK_M_N_MASK ||
7158
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 7159
		*num >>= 1;
7160
		*den >>= 1;
7161
	}
7162
}
7163
 
3746 Serge 7164
static void compute_m_n(unsigned int m, unsigned int n,
7165
			uint32_t *ret_m, uint32_t *ret_n)
7166
{
7167
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7168
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7169
	intel_reduce_m_n_ratio(ret_m, ret_n);
7170
}
7171
 
3480 Serge 7172
void
7173
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7174
		       int pixel_clock, int link_clock,
7175
		       struct intel_link_m_n *m_n)
2327 Serge 7176
{
3480 Serge 7177
	m_n->tu = 64;
3746 Serge 7178
 
7179
	compute_m_n(bits_per_pixel * pixel_clock,
7180
		    link_clock * nlanes * 8,
7181
		    &m_n->gmch_m, &m_n->gmch_n);
7182
 
7183
	compute_m_n(pixel_clock, link_clock,
7184
		    &m_n->link_m, &m_n->link_n);
2327 Serge 7185
}
7186
 
7187
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7188
{
5060 serge 7189
	if (i915.panel_use_ssc >= 0)
7190
		return i915.panel_use_ssc != 0;
4104 Serge 7191
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 7192
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7193
}
7194
 
6084 serge 7195
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7196
			   int num_connectors)
3031 serge 7197
{
6084 serge 7198
	struct drm_device *dev = crtc_state->base.crtc->dev;
3031 serge 7199
	struct drm_i915_private *dev_priv = dev->dev_private;
7200
	int refclk;
2327 Serge 7201
 
6084 serge 7202
	WARN_ON(!crtc_state->base.state);
7203
 
6937 serge 7204
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
4560 Serge 7205
		refclk = 100000;
6084 serge 7206
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7207
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 7208
		refclk = dev_priv->vbt.lvds_ssc_freq;
7209
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 7210
	} else if (!IS_GEN2(dev)) {
7211
		refclk = 96000;
7212
	} else {
7213
		refclk = 48000;
7214
	}
2327 Serge 7215
 
3031 serge 7216
	return refclk;
7217
}
2327 Serge 7218
 
4104 Serge 7219
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 7220
{
4104 Serge 7221
	return (1 << dpll->n) << 16 | dpll->m2;
7222
}
3746 Serge 7223
 
4104 Serge 7224
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7225
{
7226
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 7227
}
2327 Serge 7228
 
3746 Serge 7229
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6084 serge 7230
				     struct intel_crtc_state *crtc_state,
3031 serge 7231
				     intel_clock_t *reduced_clock)
7232
{
3746 Serge 7233
	struct drm_device *dev = crtc->base.dev;
3031 serge 7234
	u32 fp, fp2 = 0;
2327 Serge 7235
 
3031 serge 7236
	if (IS_PINEVIEW(dev)) {
6084 serge 7237
		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
3031 serge 7238
		if (reduced_clock)
4104 Serge 7239
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 7240
	} else {
6084 serge 7241
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
3031 serge 7242
		if (reduced_clock)
4104 Serge 7243
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 7244
	}
2327 Serge 7245
 
6084 serge 7246
	crtc_state->dpll_hw_state.fp0 = fp;
2327 Serge 7247
 
3746 Serge 7248
	crtc->lowfreq_avail = false;
6084 serge 7249
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7250
	    reduced_clock) {
7251
		crtc_state->dpll_hw_state.fp1 = fp2;
3746 Serge 7252
		crtc->lowfreq_avail = true;
3031 serge 7253
	} else {
6084 serge 7254
		crtc_state->dpll_hw_state.fp1 = fp;
3031 serge 7255
	}
7256
}
2327 Serge 7257
 
4560 Serge 7258
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7259
		pipe)
4104 Serge 7260
{
7261
	u32 reg_val;
7262
 
7263
	/*
7264
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7265
	 * and set it to a reasonable value instead.
7266
	 */
4560 Serge 7267
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 7268
	reg_val &= 0xffffff00;
7269
	reg_val |= 0x00000030;
4560 Serge 7270
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 7271
 
4560 Serge 7272
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 7273
	reg_val &= 0x8cffffff;
7274
	reg_val = 0x8c000000;
4560 Serge 7275
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 7276
 
4560 Serge 7277
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 7278
	reg_val &= 0xffffff00;
4560 Serge 7279
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 7280
 
4560 Serge 7281
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 7282
	reg_val &= 0x00ffffff;
7283
	reg_val |= 0xb0000000;
4560 Serge 7284
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 7285
}
7286
 
7287
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7288
					 struct intel_link_m_n *m_n)
7289
{
7290
	struct drm_device *dev = crtc->base.dev;
7291
	struct drm_i915_private *dev_priv = dev->dev_private;
7292
	int pipe = crtc->pipe;
7293
 
7294
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7295
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7296
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7297
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7298
}
7299
 
7300
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 7301
					 struct intel_link_m_n *m_n,
7302
					 struct intel_link_m_n *m2_n2)
4104 Serge 7303
{
7304
	struct drm_device *dev = crtc->base.dev;
7305
	struct drm_i915_private *dev_priv = dev->dev_private;
7306
	int pipe = crtc->pipe;
6084 serge 7307
	enum transcoder transcoder = crtc->config->cpu_transcoder;
4104 Serge 7308
 
7309
	if (INTEL_INFO(dev)->gen >= 5) {
7310
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7311
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7312
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7313
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5354 serge 7314
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7315
		 * for gen < 8) and if DRRS is supported (to make sure the
7316
		 * registers are not unnecessarily accessed).
7317
		 */
6084 serge 7318
		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7319
			crtc->config->has_drrs) {
5354 serge 7320
			I915_WRITE(PIPE_DATA_M2(transcoder),
7321
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7322
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7323
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7324
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7325
		}
4104 Serge 7326
	} else {
7327
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7328
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7329
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7330
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7331
	}
7332
}
7333
 
6084 serge 7334
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
3031 serge 7335
{
6084 serge 7336
	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7337
 
7338
	if (m_n == M1_N1) {
7339
		dp_m_n = &crtc->config->dp_m_n;
7340
		dp_m2_n2 = &crtc->config->dp_m2_n2;
7341
	} else if (m_n == M2_N2) {
7342
 
7343
		/*
7344
		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7345
		 * needs to be programmed into M1_N1.
7346
		 */
7347
		dp_m_n = &crtc->config->dp_m2_n2;
7348
	} else {
7349
		DRM_ERROR("Unsupported divider value\n");
7350
		return;
7351
	}
7352
 
7353
	if (crtc->config->has_pch_encoder)
7354
		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
3746 Serge 7355
	else
6084 serge 7356
		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
3746 Serge 7357
}
7358
 
6084 serge 7359
static void vlv_compute_dpll(struct intel_crtc *crtc,
7360
			     struct intel_crtc_state *pipe_config)
3746 Serge 7361
{
5060 serge 7362
	u32 dpll, dpll_md;
7363
 
7364
	/*
7365
	 * Enable DPIO clock input. We should never disable the reference
7366
	 * clock for pipe B, since VGA hotplug / manual detection depends
7367
	 * on it.
7368
	 */
6084 serge 7369
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7370
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
5060 serge 7371
	/* We should never disable this, set it here for state tracking */
7372
	if (crtc->pipe == PIPE_B)
7373
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7374
	dpll |= DPLL_VCO_ENABLE;
5354 serge 7375
	pipe_config->dpll_hw_state.dpll = dpll;
5060 serge 7376
 
5354 serge 7377
	dpll_md = (pipe_config->pixel_multiplier - 1)
5060 serge 7378
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 7379
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
5060 serge 7380
}
7381
 
5354 serge 7382
static void vlv_prepare_pll(struct intel_crtc *crtc,
6084 serge 7383
			    const struct intel_crtc_state *pipe_config)
5060 serge 7384
{
3746 Serge 7385
	struct drm_device *dev = crtc->base.dev;
3031 serge 7386
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 7387
	int pipe = crtc->pipe;
5060 serge 7388
	u32 mdiv;
3031 serge 7389
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5060 serge 7390
	u32 coreclk, reg_val;
2327 Serge 7391
 
6084 serge 7392
	mutex_lock(&dev_priv->sb_lock);
3480 Serge 7393
 
5354 serge 7394
	bestn = pipe_config->dpll.n;
7395
	bestm1 = pipe_config->dpll.m1;
7396
	bestm2 = pipe_config->dpll.m2;
7397
	bestp1 = pipe_config->dpll.p1;
7398
	bestp2 = pipe_config->dpll.p2;
3031 serge 7399
 
4104 Serge 7400
	/* See eDP HDMI DPIO driver vbios notes doc */
7401
 
7402
	/* PLL B needs special handling */
5060 serge 7403
	if (pipe == PIPE_B)
4560 Serge 7404
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 7405
 
7406
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 7407
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 7408
 
7409
	/* Disable target IRef on PLL */
4560 Serge 7410
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 7411
	reg_val &= 0x00ffffff;
4560 Serge 7412
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 7413
 
7414
	/* Disable fast lock */
4560 Serge 7415
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 7416
 
7417
	/* Set idtafcrecal before PLL is enabled */
3031 serge 7418
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7419
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7420
	mdiv |= ((bestn << DPIO_N_SHIFT));
7421
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 7422
 
7423
	/*
7424
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7425
	 * but we don't support that).
7426
	 * Note: don't use the DAC post divider as it seems unstable.
7427
	 */
7428
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 7429
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 7430
 
3031 serge 7431
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 7432
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 7433
 
4104 Serge 7434
	/* Set HBR and RBR LPF coefficients */
5354 serge 7435
	if (pipe_config->port_clock == 162000 ||
7436
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7437
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4560 Serge 7438
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 7439
				 0x009f0003);
7440
	else
4560 Serge 7441
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 7442
				 0x00d0000f);
3031 serge 7443
 
6084 serge 7444
	if (pipe_config->has_dp_encoder) {
4104 Serge 7445
		/* Use SSC source */
5060 serge 7446
		if (pipe == PIPE_A)
4560 Serge 7447
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7448
					 0x0df40000);
7449
		else
4560 Serge 7450
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7451
					 0x0df70000);
7452
	} else { /* HDMI or VGA */
7453
		/* Use bend source */
5060 serge 7454
		if (pipe == PIPE_A)
4560 Serge 7455
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7456
					 0x0df70000);
7457
		else
4560 Serge 7458
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7459
					 0x0df40000);
7460
	}
3031 serge 7461
 
4560 Serge 7462
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 7463
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5354 serge 7464
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7465
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
4104 Serge 7466
		coreclk |= 0x01000000;
4560 Serge 7467
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 7468
 
4560 Serge 7469
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6084 serge 7470
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 7471
}
4104 Serge 7472
 
6084 serge 7473
static void chv_compute_dpll(struct intel_crtc *crtc,
7474
			     struct intel_crtc_state *pipe_config)
5060 serge 7475
{
6084 serge 7476
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7477
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5354 serge 7478
		DPLL_VCO_ENABLE;
7479
	if (crtc->pipe != PIPE_A)
7480
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7481
 
7482
	pipe_config->dpll_hw_state.dpll_md =
7483
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7484
}
7485
 
7486
static void chv_prepare_pll(struct intel_crtc *crtc,
6084 serge 7487
			    const struct intel_crtc_state *pipe_config)
5354 serge 7488
{
5060 serge 7489
	struct drm_device *dev = crtc->base.dev;
7490
	struct drm_i915_private *dev_priv = dev->dev_private;
7491
	int pipe = crtc->pipe;
6937 serge 7492
	i915_reg_t dpll_reg = DPLL(crtc->pipe);
5060 serge 7493
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6084 serge 7494
	u32 loopfilter, tribuf_calcntr;
5060 serge 7495
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6084 serge 7496
	u32 dpio_val;
7497
	int vco;
5060 serge 7498
 
5354 serge 7499
	bestn = pipe_config->dpll.n;
7500
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7501
	bestm1 = pipe_config->dpll.m1;
7502
	bestm2 = pipe_config->dpll.m2 >> 22;
7503
	bestp1 = pipe_config->dpll.p1;
7504
	bestp2 = pipe_config->dpll.p2;
6084 serge 7505
	vco = pipe_config->dpll.vco;
7506
	dpio_val = 0;
7507
	loopfilter = 0;
5060 serge 7508
 
4560 Serge 7509
	/*
5060 serge 7510
	 * Enable Refclk and SSC
4560 Serge 7511
	 */
5060 serge 7512
	I915_WRITE(dpll_reg,
5354 serge 7513
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
3031 serge 7514
 
6084 serge 7515
	mutex_lock(&dev_priv->sb_lock);
3031 serge 7516
 
5060 serge 7517
	/* p1 and p2 divider */
7518
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7519
			5 << DPIO_CHV_S1_DIV_SHIFT |
7520
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7521
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7522
			1 << DPIO_CHV_K_DIV_SHIFT);
3243 Serge 7523
 
5060 serge 7524
	/* Feedback post-divider - m2 */
7525
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7526
 
7527
	/* Feedback refclk divider - n and m1 */
7528
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7529
			DPIO_CHV_M1_DIV_BY_2 |
7530
			1 << DPIO_CHV_N_DIV_SHIFT);
7531
 
7532
	/* M2 fraction division */
7533
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7534
 
7535
	/* M2 fraction division enable */
6084 serge 7536
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7537
	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7538
	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7539
	if (bestm2_frac)
7540
		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7541
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
5060 serge 7542
 
6084 serge 7543
	/* Program digital lock detect threshold */
7544
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7545
	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7546
					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7547
	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7548
	if (!bestm2_frac)
7549
		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7550
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7551
 
5060 serge 7552
	/* Loop filter */
6084 serge 7553
	if (vco == 5400000) {
7554
		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7555
		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7556
		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7557
		tribuf_calcntr = 0x9;
7558
	} else if (vco <= 6200000) {
7559
		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7560
		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7561
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7562
		tribuf_calcntr = 0x9;
7563
	} else if (vco <= 6480000) {
7564
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7565
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7566
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7567
		tribuf_calcntr = 0x8;
7568
	} else {
7569
		/* Not supported. Apply the same limits as in the max case */
7570
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7571
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7572
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7573
		tribuf_calcntr = 0;
7574
	}
5060 serge 7575
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7576
 
6084 serge 7577
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7578
	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7579
	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7580
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7581
 
5060 serge 7582
	/* AFC Recal */
7583
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7584
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7585
			DPIO_AFC_RECAL);
7586
 
6084 serge 7587
	mutex_unlock(&dev_priv->sb_lock);
3031 serge 7588
}
7589
 
5354 serge 7590
/**
7591
 * vlv_force_pll_on - forcibly enable just the PLL
7592
 * @dev_priv: i915 private structure
7593
 * @pipe: pipe PLL to enable
7594
 * @dpll: PLL configuration
7595
 *
7596
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7597
 * in cases where we need the PLL enabled even when @pipe is not going to
7598
 * be enabled.
7599
 */
7600
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7601
		      const struct dpll *dpll)
7602
{
7603
	struct intel_crtc *crtc =
7604
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6084 serge 7605
	struct intel_crtc_state pipe_config = {
7606
		.base.crtc = &crtc->base,
5354 serge 7607
		.pixel_multiplier = 1,
7608
		.dpll = *dpll,
7609
	};
7610
 
7611
	if (IS_CHERRYVIEW(dev)) {
6084 serge 7612
		chv_compute_dpll(crtc, &pipe_config);
5354 serge 7613
		chv_prepare_pll(crtc, &pipe_config);
7614
		chv_enable_pll(crtc, &pipe_config);
7615
	} else {
6084 serge 7616
		vlv_compute_dpll(crtc, &pipe_config);
5354 serge 7617
		vlv_prepare_pll(crtc, &pipe_config);
7618
		vlv_enable_pll(crtc, &pipe_config);
7619
	}
7620
}
7621
 
7622
/**
7623
 * vlv_force_pll_off - forcibly disable just the PLL
7624
 * @dev_priv: i915 private structure
7625
 * @pipe: pipe PLL to disable
7626
 *
7627
 * Disable the PLL for @pipe. To be used in cases where we need
7628
 * the PLL enabled even when @pipe is not going to be enabled.
7629
 */
7630
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7631
{
7632
	if (IS_CHERRYVIEW(dev))
7633
		chv_disable_pll(to_i915(dev), pipe);
7634
	else
7635
		vlv_disable_pll(to_i915(dev), pipe);
7636
}
7637
 
6084 serge 7638
static void i9xx_compute_dpll(struct intel_crtc *crtc,
7639
			      struct intel_crtc_state *crtc_state,
7640
			      intel_clock_t *reduced_clock,
7641
			      int num_connectors)
3031 serge 7642
{
3746 Serge 7643
	struct drm_device *dev = crtc->base.dev;
3031 serge 7644
	struct drm_i915_private *dev_priv = dev->dev_private;
7645
	u32 dpll;
7646
	bool is_sdvo;
6084 serge 7647
	struct dpll *clock = &crtc_state->dpll;
3031 serge 7648
 
6084 serge 7649
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
3243 Serge 7650
 
6084 serge 7651
	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7652
		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
3031 serge 7653
 
7654
	dpll = DPLL_VGA_MODE_DIS;
7655
 
6084 serge 7656
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
3031 serge 7657
		dpll |= DPLLB_MODE_LVDS;
7658
	else
7659
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 7660
 
4104 Serge 7661
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6084 serge 7662
		dpll |= (crtc_state->pixel_multiplier - 1)
7663
			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7664
	}
4104 Serge 7665
 
7666
	if (is_sdvo)
7667
		dpll |= DPLL_SDVO_HIGH_SPEED;
7668
 
6084 serge 7669
	if (crtc_state->has_dp_encoder)
4104 Serge 7670
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 7671
 
3031 serge 7672
	/* compute bitmask from p1 value */
7673
	if (IS_PINEVIEW(dev))
7674
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7675
	else {
7676
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7677
		if (IS_G4X(dev) && reduced_clock)
7678
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7679
	}
7680
	switch (clock->p2) {
7681
	case 5:
7682
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7683
		break;
7684
	case 7:
7685
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7686
		break;
7687
	case 10:
7688
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7689
		break;
7690
	case 14:
7691
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7692
		break;
7693
	}
7694
	if (INTEL_INFO(dev)->gen >= 4)
7695
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 7696
 
6084 serge 7697
	if (crtc_state->sdvo_tv_clock)
3031 serge 7698
		dpll |= PLL_REF_INPUT_TVCLKINBC;
6084 serge 7699
	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7700
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7701
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7702
	else
7703
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 7704
 
3031 serge 7705
	dpll |= DPLL_VCO_ENABLE;
6084 serge 7706
	crtc_state->dpll_hw_state.dpll = dpll;
2327 Serge 7707
 
4104 Serge 7708
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 7709
		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7710
			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7711
		crtc_state->dpll_hw_state.dpll_md = dpll_md;
4104 Serge 7712
	}
3031 serge 7713
}
2327 Serge 7714
 
6084 serge 7715
static void i8xx_compute_dpll(struct intel_crtc *crtc,
7716
			      struct intel_crtc_state *crtc_state,
7717
			      intel_clock_t *reduced_clock,
7718
			      int num_connectors)
3031 serge 7719
{
3746 Serge 7720
	struct drm_device *dev = crtc->base.dev;
3031 serge 7721
	struct drm_i915_private *dev_priv = dev->dev_private;
7722
	u32 dpll;
6084 serge 7723
	struct dpll *clock = &crtc_state->dpll;
2327 Serge 7724
 
6084 serge 7725
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
3243 Serge 7726
 
3031 serge 7727
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 7728
 
6084 serge 7729
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3031 serge 7730
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7731
	} else {
7732
		if (clock->p1 == 2)
7733
			dpll |= PLL_P1_DIVIDE_BY_TWO;
7734
		else
7735
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7736
		if (clock->p2 == 4)
7737
			dpll |= PLL_P2_DIVIDE_BY_4;
7738
	}
2327 Serge 7739
 
6084 serge 7740
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4104 Serge 7741
		dpll |= DPLL_DVO_2X_MODE;
7742
 
6084 serge 7743
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7744
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7745
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7746
	else
7747
		dpll |= PLL_REF_INPUT_DREFCLK;
7748
 
7749
	dpll |= DPLL_VCO_ENABLE;
6084 serge 7750
	crtc_state->dpll_hw_state.dpll = dpll;
3031 serge 7751
}
7752
 
4104 Serge 7753
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 7754
{
7755
	struct drm_device *dev = intel_crtc->base.dev;
7756
	struct drm_i915_private *dev_priv = dev->dev_private;
7757
	enum pipe pipe = intel_crtc->pipe;
6084 serge 7758
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7759
	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
5060 serge 7760
	uint32_t crtc_vtotal, crtc_vblank_end;
7761
	int vsyncshift = 0;
3243 Serge 7762
 
4104 Serge 7763
	/* We need to be careful not to changed the adjusted mode, for otherwise
7764
	 * the hw state checker will get angry at the mismatch. */
7765
	crtc_vtotal = adjusted_mode->crtc_vtotal;
7766
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7767
 
5060 serge 7768
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3243 Serge 7769
		/* the chip adds 2 halflines automatically */
4104 Serge 7770
		crtc_vtotal -= 1;
7771
		crtc_vblank_end -= 1;
5060 serge 7772
 
5354 serge 7773
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
5060 serge 7774
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7775
		else
7776
			vsyncshift = adjusted_mode->crtc_hsync_start -
7777
				adjusted_mode->crtc_htotal / 2;
7778
		if (vsyncshift < 0)
7779
			vsyncshift += adjusted_mode->crtc_htotal;
3243 Serge 7780
	}
7781
 
7782
	if (INTEL_INFO(dev)->gen > 3)
7783
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7784
 
7785
	I915_WRITE(HTOTAL(cpu_transcoder),
7786
		   (adjusted_mode->crtc_hdisplay - 1) |
7787
		   ((adjusted_mode->crtc_htotal - 1) << 16));
7788
	I915_WRITE(HBLANK(cpu_transcoder),
7789
		   (adjusted_mode->crtc_hblank_start - 1) |
7790
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7791
	I915_WRITE(HSYNC(cpu_transcoder),
7792
		   (adjusted_mode->crtc_hsync_start - 1) |
7793
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7794
 
7795
	I915_WRITE(VTOTAL(cpu_transcoder),
7796
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 7797
		   ((crtc_vtotal - 1) << 16));
3243 Serge 7798
	I915_WRITE(VBLANK(cpu_transcoder),
7799
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 7800
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 7801
	I915_WRITE(VSYNC(cpu_transcoder),
7802
		   (adjusted_mode->crtc_vsync_start - 1) |
7803
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7804
 
7805
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7806
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7807
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7808
	 * bits. */
7809
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7810
	    (pipe == PIPE_B || pipe == PIPE_C))
7811
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7812
 
7813
	/* pipesrc controls the size that is scaled from, which should
7814
	 * always be the user's requested size.
7815
	 */
7816
	I915_WRITE(PIPESRC(pipe),
6084 serge 7817
		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7818
		   (intel_crtc->config->pipe_src_h - 1));
3243 Serge 7819
}
7820
 
4104 Serge 7821
static void intel_get_pipe_timings(struct intel_crtc *crtc,
6084 serge 7822
				   struct intel_crtc_state *pipe_config)
4104 Serge 7823
{
7824
	struct drm_device *dev = crtc->base.dev;
7825
	struct drm_i915_private *dev_priv = dev->dev_private;
7826
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7827
	uint32_t tmp;
7828
 
7829
	tmp = I915_READ(HTOTAL(cpu_transcoder));
6084 serge 7830
	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7831
	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7832
	tmp = I915_READ(HBLANK(cpu_transcoder));
6084 serge 7833
	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7834
	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7835
	tmp = I915_READ(HSYNC(cpu_transcoder));
6084 serge 7836
	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7837
	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7838
 
7839
	tmp = I915_READ(VTOTAL(cpu_transcoder));
6084 serge 7840
	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7841
	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7842
	tmp = I915_READ(VBLANK(cpu_transcoder));
6084 serge 7843
	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7844
	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7845
	tmp = I915_READ(VSYNC(cpu_transcoder));
6084 serge 7846
	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7847
	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7848
 
7849
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6084 serge 7850
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7851
		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7852
		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
4104 Serge 7853
	}
7854
 
7855
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 7856
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7857
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7858
 
6084 serge 7859
	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7860
	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 7861
}
7862
 
5060 serge 7863
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6084 serge 7864
				 struct intel_crtc_state *pipe_config)
4104 Serge 7865
{
6084 serge 7866
	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7867
	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7868
	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7869
	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
4104 Serge 7870
 
6084 serge 7871
	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7872
	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7873
	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7874
	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
4104 Serge 7875
 
6084 serge 7876
	mode->flags = pipe_config->base.adjusted_mode.flags;
7877
	mode->type = DRM_MODE_TYPE_DRIVER;
4104 Serge 7878
 
6084 serge 7879
	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7880
	mode->flags |= pipe_config->base.adjusted_mode.flags;
7881
 
7882
	mode->hsync = drm_mode_hsync(mode);
7883
	mode->vrefresh = drm_mode_vrefresh(mode);
7884
	drm_mode_set_name(mode);
4104 Serge 7885
}
7886
 
3746 Serge 7887
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7888
{
7889
	struct drm_device *dev = intel_crtc->base.dev;
7890
	struct drm_i915_private *dev_priv = dev->dev_private;
7891
	uint32_t pipeconf;
7892
 
4104 Serge 7893
	pipeconf = 0;
3746 Serge 7894
 
5354 serge 7895
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7896
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7897
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
4104 Serge 7898
 
6084 serge 7899
	if (intel_crtc->config->double_wide)
7900
		pipeconf |= PIPECONF_DOUBLE_WIDE;
3746 Serge 7901
 
4104 Serge 7902
	/* only g4x and later have fancy bpc/dither controls */
6937 serge 7903
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4104 Serge 7904
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6084 serge 7905
		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
4104 Serge 7906
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 7907
				    PIPECONF_DITHER_TYPE_SP;
7908
 
6084 serge 7909
		switch (intel_crtc->config->pipe_bpp) {
4104 Serge 7910
		case 18:
7911
			pipeconf |= PIPECONF_6BPC;
7912
			break;
7913
		case 24:
7914
			pipeconf |= PIPECONF_8BPC;
7915
			break;
7916
		case 30:
7917
			pipeconf |= PIPECONF_10BPC;
7918
			break;
7919
		default:
7920
			/* Case prevented by intel_choose_pipe_bpp_dither. */
7921
			BUG();
3746 Serge 7922
		}
7923
	}
7924
 
7925
	if (HAS_PIPE_CXSR(dev)) {
7926
		if (intel_crtc->lowfreq_avail) {
7927
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7928
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7929
		} else {
7930
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7931
		}
7932
	}
7933
 
6084 serge 7934
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5060 serge 7935
		if (INTEL_INFO(dev)->gen < 4 ||
5354 serge 7936
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6084 serge 7937
			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7938
		else
5060 serge 7939
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7940
	} else
3746 Serge 7941
		pipeconf |= PIPECONF_PROGRESSIVE;
7942
 
6937 serge 7943
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7944
	     intel_crtc->config->limited_color_range)
6084 serge 7945
		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3746 Serge 7946
 
7947
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7948
	POSTING_READ(PIPECONF(intel_crtc->pipe));
7949
}
7950
 
6084 serge 7951
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7952
				   struct intel_crtc_state *crtc_state)
3031 serge 7953
{
5354 serge 7954
	struct drm_device *dev = crtc->base.dev;
3031 serge 7955
	struct drm_i915_private *dev_priv = dev->dev_private;
7956
	int refclk, num_connectors = 0;
6084 serge 7957
	intel_clock_t clock;
7958
	bool ok;
3031 serge 7959
	const intel_limit_t *limit;
6084 serge 7960
	struct drm_atomic_state *state = crtc_state->base.state;
7961
	struct drm_connector *connector;
7962
	struct drm_connector_state *connector_state;
7963
	int i;
3031 serge 7964
 
6084 serge 7965
	memset(&crtc_state->dpll_hw_state, 0,
7966
	       sizeof(crtc_state->dpll_hw_state));
7967
 
6937 serge 7968
	if (crtc_state->has_dsi_encoder)
7969
		return 0;
7970
 
6084 serge 7971
	for_each_connector_in_state(state, connector, connector_state, i) {
6937 serge 7972
		if (connector_state->crtc == &crtc->base)
3031 serge 7973
		num_connectors++;
7974
	}
7975
 
6084 serge 7976
	if (!crtc_state->clock_set) {
7977
		refclk = i9xx_get_refclk(crtc_state, num_connectors);
3031 serge 7978
 
6084 serge 7979
		/*
4560 Serge 7980
		 * Returns a set of divisors for the desired target clock with
7981
		 * the given refclk, or FALSE.  The returned values represent
7982
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7983
		 * 2) / p1 / p2.
6084 serge 7984
		 */
7985
		limit = intel_limit(crtc_state, refclk);
7986
		ok = dev_priv->display.find_dpll(limit, crtc_state,
7987
						 crtc_state->port_clock,
7988
						 refclk, NULL, &clock);
4560 Serge 7989
		if (!ok) {
6084 serge 7990
			DRM_ERROR("Couldn't find PLL settings for mode!\n");
7991
			return -EINVAL;
7992
		}
3031 serge 7993
 
6084 serge 7994
		/* Compat-code for transition, will disappear. */
7995
		crtc_state->dpll.n = clock.n;
7996
		crtc_state->dpll.m1 = clock.m1;
7997
		crtc_state->dpll.m2 = clock.m2;
7998
		crtc_state->dpll.p1 = clock.p1;
7999
		crtc_state->dpll.p2 = clock.p2;
3031 serge 8000
	}
8001
 
4560 Serge 8002
	if (IS_GEN2(dev)) {
6084 serge 8003
		i8xx_compute_dpll(crtc, crtc_state, NULL,
8004
				  num_connectors);
5060 serge 8005
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 8006
		chv_compute_dpll(crtc, crtc_state);
4560 Serge 8007
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 8008
		vlv_compute_dpll(crtc, crtc_state);
4560 Serge 8009
	} else {
6084 serge 8010
		i9xx_compute_dpll(crtc, crtc_state, NULL,
8011
				  num_connectors);
4560 Serge 8012
	}
3031 serge 8013
 
5060 serge 8014
	return 0;
2327 Serge 8015
}
8016
 
4104 Serge 8017
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6084 serge 8018
				 struct intel_crtc_state *pipe_config)
4104 Serge 8019
{
8020
	struct drm_device *dev = crtc->base.dev;
8021
	struct drm_i915_private *dev_priv = dev->dev_private;
8022
	uint32_t tmp;
8023
 
4560 Serge 8024
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8025
		return;
8026
 
4104 Serge 8027
	tmp = I915_READ(PFIT_CONTROL);
8028
	if (!(tmp & PFIT_ENABLE))
8029
		return;
8030
 
8031
	/* Check whether the pfit is attached to our pipe. */
8032
	if (INTEL_INFO(dev)->gen < 4) {
8033
		if (crtc->pipe != PIPE_B)
8034
			return;
8035
	} else {
8036
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8037
			return;
8038
	}
8039
 
8040
	pipe_config->gmch_pfit.control = tmp;
8041
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8042
	if (INTEL_INFO(dev)->gen < 5)
8043
		pipe_config->gmch_pfit.lvds_border_bits =
8044
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
8045
}
8046
 
4398 Serge 8047
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 8048
			       struct intel_crtc_state *pipe_config)
4398 Serge 8049
{
8050
	struct drm_device *dev = crtc->base.dev;
8051
	struct drm_i915_private *dev_priv = dev->dev_private;
8052
	int pipe = pipe_config->cpu_transcoder;
8053
	intel_clock_t clock;
8054
	u32 mdiv;
8055
	int refclk = 100000;
8056
 
5060 serge 8057
	/* In case of MIPI DPLL will not even be used */
8058
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8059
		return;
8060
 
6084 serge 8061
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 8062
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6084 serge 8063
	mutex_unlock(&dev_priv->sb_lock);
4398 Serge 8064
 
8065
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8066
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8067
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8068
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8069
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8070
 
6084 serge 8071
	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4398 Serge 8072
}
8073
 
6084 serge 8074
static void
8075
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8076
			      struct intel_initial_plane_config *plane_config)
5060 serge 8077
{
8078
	struct drm_device *dev = crtc->base.dev;
8079
	struct drm_i915_private *dev_priv = dev->dev_private;
8080
	u32 val, base, offset;
8081
	int pipe = crtc->pipe, plane = crtc->plane;
8082
	int fourcc, pixel_format;
6084 serge 8083
	unsigned int aligned_height;
8084
	struct drm_framebuffer *fb;
8085
	struct intel_framebuffer *intel_fb;
5060 serge 8086
 
6084 serge 8087
	val = I915_READ(DSPCNTR(plane));
8088
	if (!(val & DISPLAY_PLANE_ENABLE))
8089
		return;
8090
 
8091
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8092
	if (!intel_fb) {
5060 serge 8093
		DRM_DEBUG_KMS("failed to alloc fb\n");
8094
		return;
8095
	}
8096
 
6084 serge 8097
	fb = &intel_fb->base;
5060 serge 8098
 
6084 serge 8099
	if (INTEL_INFO(dev)->gen >= 4) {
8100
		if (val & DISPPLANE_TILED) {
8101
			plane_config->tiling = I915_TILING_X;
8102
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8103
		}
8104
	}
5060 serge 8105
 
8106
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6084 serge 8107
	fourcc = i9xx_format_to_fourcc(pixel_format);
8108
	fb->pixel_format = fourcc;
8109
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
5060 serge 8110
 
8111
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 8112
		if (plane_config->tiling)
5060 serge 8113
			offset = I915_READ(DSPTILEOFF(plane));
8114
		else
8115
			offset = I915_READ(DSPLINOFF(plane));
8116
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8117
	} else {
8118
		base = I915_READ(DSPADDR(plane));
8119
	}
8120
	plane_config->base = base;
8121
 
8122
	val = I915_READ(PIPESRC(pipe));
6084 serge 8123
	fb->width = ((val >> 16) & 0xfff) + 1;
8124
	fb->height = ((val >> 0) & 0xfff) + 1;
5060 serge 8125
 
8126
	val = I915_READ(DSPSTRIDE(pipe));
6283 serge 8127
	fb->pitches[0] = val & 0xffffffc0;
5060 serge 8128
 
6084 serge 8129
	aligned_height = intel_fb_align_height(dev, fb->height,
8130
					       fb->pixel_format,
8131
					       fb->modifier[0]);
5060 serge 8132
 
6283 serge 8133
	plane_config->size = fb->pitches[0] * aligned_height;
5060 serge 8134
 
6084 serge 8135
	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8136
		      pipe_name(pipe), plane, fb->width, fb->height,
8137
		      fb->bits_per_pixel, base, fb->pitches[0],
5060 serge 8138
		      plane_config->size);
8139
 
6084 serge 8140
	plane_config->fb = intel_fb;
5060 serge 8141
}
8142
 
8143
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 8144
			       struct intel_crtc_state *pipe_config)
5060 serge 8145
{
8146
	struct drm_device *dev = crtc->base.dev;
8147
	struct drm_i915_private *dev_priv = dev->dev_private;
8148
	int pipe = pipe_config->cpu_transcoder;
8149
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8150
	intel_clock_t clock;
6084 serge 8151
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5060 serge 8152
	int refclk = 100000;
8153
 
6084 serge 8154
	mutex_lock(&dev_priv->sb_lock);
5060 serge 8155
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8156
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8157
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8158
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6084 serge 8159
	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8160
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 8161
 
8162
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6084 serge 8163
	clock.m2 = (pll_dw0 & 0xff) << 22;
8164
	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8165
		clock.m2 |= pll_dw2 & 0x3fffff;
5060 serge 8166
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8167
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8168
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8169
 
6084 serge 8170
	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5060 serge 8171
}
8172
 
3746 Serge 8173
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6084 serge 8174
				 struct intel_crtc_state *pipe_config)
3746 Serge 8175
{
8176
	struct drm_device *dev = crtc->base.dev;
8177
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 8178
	enum intel_display_power_domain power_domain;
3746 Serge 8179
	uint32_t tmp;
6937 serge 8180
	bool ret;
3746 Serge 8181
 
6937 serge 8182
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8183
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 8184
		return false;
8185
 
4104 Serge 8186
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8187
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8188
 
6937 serge 8189
	ret = false;
8190
 
3746 Serge 8191
	tmp = I915_READ(PIPECONF(crtc->pipe));
8192
	if (!(tmp & PIPECONF_ENABLE))
6937 serge 8193
		goto out;
3746 Serge 8194
 
6937 serge 8195
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4280 Serge 8196
		switch (tmp & PIPECONF_BPC_MASK) {
8197
		case PIPECONF_6BPC:
8198
			pipe_config->pipe_bpp = 18;
8199
			break;
8200
		case PIPECONF_8BPC:
8201
			pipe_config->pipe_bpp = 24;
8202
			break;
8203
		case PIPECONF_10BPC:
8204
			pipe_config->pipe_bpp = 30;
8205
			break;
8206
		default:
8207
			break;
8208
		}
8209
	}
8210
 
6937 serge 8211
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8212
	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
5060 serge 8213
		pipe_config->limited_color_range = true;
8214
 
4560 Serge 8215
	if (INTEL_INFO(dev)->gen < 4)
8216
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8217
 
4104 Serge 8218
	intel_get_pipe_timings(crtc, pipe_config);
8219
 
8220
	i9xx_get_pfit_config(crtc, pipe_config);
8221
 
8222
	if (INTEL_INFO(dev)->gen >= 4) {
8223
		tmp = I915_READ(DPLL_MD(crtc->pipe));
8224
		pipe_config->pixel_multiplier =
8225
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8226
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8227
		pipe_config->dpll_hw_state.dpll_md = tmp;
8228
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8229
		tmp = I915_READ(DPLL(crtc->pipe));
8230
		pipe_config->pixel_multiplier =
8231
			((tmp & SDVO_MULTIPLIER_MASK)
8232
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8233
	} else {
8234
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8235
		 * port and will be fixed up in the encoder->get_config
8236
		 * function. */
8237
		pipe_config->pixel_multiplier = 1;
8238
	}
8239
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6937 serge 8240
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
5354 serge 8241
		/*
8242
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8243
		 * on 830. Filter it out here so that we don't
8244
		 * report errors due to that.
8245
		 */
8246
		if (IS_I830(dev))
8247
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8248
 
4104 Serge 8249
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8250
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8251
	} else {
8252
		/* Mask out read-only status bits. */
8253
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8254
						     DPLL_PORTC_READY_MASK |
8255
						     DPLL_PORTB_READY_MASK);
8256
	}
8257
 
5060 serge 8258
	if (IS_CHERRYVIEW(dev))
8259
		chv_crtc_clock_get(crtc, pipe_config);
8260
	else if (IS_VALLEYVIEW(dev))
4560 Serge 8261
		vlv_crtc_clock_get(crtc, pipe_config);
8262
	else
8263
		i9xx_crtc_clock_get(crtc, pipe_config);
8264
 
6084 serge 8265
	/*
8266
	 * Normally the dotclock is filled in by the encoder .get_config()
8267
	 * but in case the pipe is enabled w/o any ports we need a sane
8268
	 * default.
8269
	 */
8270
	pipe_config->base.adjusted_mode.crtc_clock =
8271
		pipe_config->port_clock / pipe_config->pixel_multiplier;
8272
 
6937 serge 8273
	ret = true;
8274
 
8275
out:
8276
	intel_display_power_put(dev_priv, power_domain);
8277
 
8278
	return ret;
3746 Serge 8279
}
8280
 
3243 Serge 8281
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 8282
{
8283
	struct drm_i915_private *dev_priv = dev->dev_private;
8284
	struct intel_encoder *encoder;
3746 Serge 8285
	u32 val, final;
2327 Serge 8286
	bool has_lvds = false;
2342 Serge 8287
	bool has_cpu_edp = false;
8288
	bool has_panel = false;
8289
	bool has_ck505 = false;
8290
	bool can_ssc = false;
2327 Serge 8291
 
8292
	/* We need to take the global config into account */
5354 serge 8293
	for_each_intel_encoder(dev, encoder) {
6084 serge 8294
		switch (encoder->type) {
8295
		case INTEL_OUTPUT_LVDS:
2342 Serge 8296
			has_panel = true;
6084 serge 8297
			has_lvds = true;
2342 Serge 8298
			break;
6084 serge 8299
		case INTEL_OUTPUT_EDP:
2342 Serge 8300
			has_panel = true;
4104 Serge 8301
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 8302
				has_cpu_edp = true;
6084 serge 8303
			break;
5354 serge 8304
		default:
8305
			break;
2327 Serge 8306
		}
6084 serge 8307
	}
2342 Serge 8308
 
8309
	if (HAS_PCH_IBX(dev)) {
4104 Serge 8310
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 8311
		can_ssc = has_ck505;
8312
	} else {
8313
		has_ck505 = false;
8314
		can_ssc = true;
2327 Serge 8315
	}
8316
 
6937 serge 8317
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8318
		      has_panel, has_lvds, has_ck505);
2342 Serge 8319
 
2327 Serge 8320
	/* Ironlake: try to setup display ref clock before DPLL
8321
	 * enabling. This is only under driver's control after
8322
	 * PCH B stepping, previous chipset stepping should be
8323
	 * ignoring this setting.
8324
	 */
3746 Serge 8325
	val = I915_READ(PCH_DREF_CONTROL);
8326
 
8327
	/* As we must carefully and slowly disable/enable each source in turn,
8328
	 * compute the final state we want first and check if we need to
8329
	 * make any changes at all.
8330
	 */
8331
	final = val;
8332
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8333
	if (has_ck505)
8334
		final |= DREF_NONSPREAD_CK505_ENABLE;
8335
	else
8336
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8337
 
8338
	final &= ~DREF_SSC_SOURCE_MASK;
8339
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8340
	final &= ~DREF_SSC1_ENABLE;
8341
 
8342
	if (has_panel) {
8343
		final |= DREF_SSC_SOURCE_ENABLE;
8344
 
8345
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8346
			final |= DREF_SSC1_ENABLE;
8347
 
8348
		if (has_cpu_edp) {
8349
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8350
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8351
			else
8352
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8353
		} else
8354
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6937 serge 8355
	} else {
8356
		final |= DREF_SSC_SOURCE_DISABLE;
8357
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
3746 Serge 8358
	}
8359
 
8360
	if (final == val)
8361
		return;
8362
 
2327 Serge 8363
	/* Always enable nonspread source */
3746 Serge 8364
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 8365
 
8366
	if (has_ck505)
3746 Serge 8367
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 8368
	else
3746 Serge 8369
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 8370
 
8371
	if (has_panel) {
3746 Serge 8372
		val &= ~DREF_SSC_SOURCE_MASK;
8373
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 8374
 
2342 Serge 8375
		/* SSC must be turned on before enabling the CPU output  */
8376
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8377
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 8378
			val |= DREF_SSC1_ENABLE;
3031 serge 8379
		} else
3746 Serge 8380
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 8381
 
2342 Serge 8382
		/* Get SSC going before enabling the outputs */
3746 Serge 8383
		I915_WRITE(PCH_DREF_CONTROL, val);
6084 serge 8384
		POSTING_READ(PCH_DREF_CONTROL);
8385
		udelay(200);
2342 Serge 8386
 
3746 Serge 8387
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 8388
 
8389
		/* Enable CPU source on CPU attached eDP */
2342 Serge 8390
		if (has_cpu_edp) {
8391
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8392
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 8393
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5060 serge 8394
			} else
3746 Serge 8395
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 8396
		} else
3746 Serge 8397
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 8398
 
3746 Serge 8399
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 8400
		POSTING_READ(PCH_DREF_CONTROL);
8401
		udelay(200);
6084 serge 8402
	} else {
6937 serge 8403
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
2342 Serge 8404
 
3746 Serge 8405
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 8406
 
8407
		/* Turn off CPU output */
3746 Serge 8408
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 8409
 
3746 Serge 8410
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 8411
		POSTING_READ(PCH_DREF_CONTROL);
8412
		udelay(200);
2342 Serge 8413
 
8414
		/* Turn off the SSC source */
3746 Serge 8415
		val &= ~DREF_SSC_SOURCE_MASK;
8416
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 8417
 
8418
		/* Turn off SSC1 */
3746 Serge 8419
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 8420
 
3746 Serge 8421
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 8422
		POSTING_READ(PCH_DREF_CONTROL);
8423
		udelay(200);
2327 Serge 8424
	}
3746 Serge 8425
 
8426
	BUG_ON(val != final);
2327 Serge 8427
}
8428
 
4104 Serge 8429
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 8430
{
4104 Serge 8431
	uint32_t tmp;
3243 Serge 8432
 
6084 serge 8433
	tmp = I915_READ(SOUTH_CHICKEN2);
8434
	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8435
	I915_WRITE(SOUTH_CHICKEN2, tmp);
3243 Serge 8436
 
6084 serge 8437
	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8438
			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8439
		DRM_ERROR("FDI mPHY reset assert timeout\n");
3243 Serge 8440
 
6084 serge 8441
	tmp = I915_READ(SOUTH_CHICKEN2);
8442
	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8443
	I915_WRITE(SOUTH_CHICKEN2, tmp);
3243 Serge 8444
 
6084 serge 8445
	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 8446
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6084 serge 8447
		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 8448
}
3243 Serge 8449
 
4104 Serge 8450
/* WaMPhyProgramming:hsw */
8451
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8452
{
8453
	uint32_t tmp;
8454
 
3243 Serge 8455
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8456
	tmp &= ~(0xFF << 24);
8457
	tmp |= (0x12 << 24);
8458
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8459
 
8460
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8461
	tmp |= (1 << 11);
8462
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8463
 
8464
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8465
	tmp |= (1 << 11);
8466
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8467
 
8468
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8469
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8470
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8471
 
8472
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8473
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8474
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8475
 
6084 serge 8476
	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8477
	tmp &= ~(7 << 13);
8478
	tmp |= (5 << 13);
8479
	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
3243 Serge 8480
 
6084 serge 8481
	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8482
	tmp &= ~(7 << 13);
8483
	tmp |= (5 << 13);
8484
	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
3243 Serge 8485
 
8486
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8487
	tmp &= ~0xFF;
8488
	tmp |= 0x1C;
8489
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8490
 
8491
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8492
	tmp &= ~0xFF;
8493
	tmp |= 0x1C;
8494
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8495
 
8496
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8497
	tmp &= ~(0xFF << 16);
8498
	tmp |= (0x1C << 16);
8499
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8500
 
8501
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8502
	tmp &= ~(0xFF << 16);
8503
	tmp |= (0x1C << 16);
8504
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8505
 
6084 serge 8506
	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8507
	tmp |= (1 << 27);
8508
	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
3243 Serge 8509
 
6084 serge 8510
	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8511
	tmp |= (1 << 27);
8512
	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
3243 Serge 8513
 
6084 serge 8514
	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8515
	tmp &= ~(0xF << 28);
8516
	tmp |= (4 << 28);
8517
	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
3243 Serge 8518
 
6084 serge 8519
	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8520
	tmp &= ~(0xF << 28);
8521
	tmp |= (4 << 28);
8522
	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 8523
}
3243 Serge 8524
 
4104 Serge 8525
/* Implements 3 different sequences from BSpec chapter "Display iCLK
8526
 * Programming" based on the parameters passed:
8527
 * - Sequence to enable CLKOUT_DP
8528
 * - Sequence to enable CLKOUT_DP without spread
8529
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8530
 */
8531
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8532
				 bool with_fdi)
8533
{
8534
	struct drm_i915_private *dev_priv = dev->dev_private;
8535
	uint32_t reg, tmp;
3480 Serge 8536
 
4104 Serge 8537
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8538
		with_spread = true;
6084 serge 8539
	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
4104 Serge 8540
		with_fdi = false;
8541
 
6084 serge 8542
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 8543
 
8544
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8545
	tmp &= ~SBI_SSCCTL_DISABLE;
8546
	tmp |= SBI_SSCCTL_PATHALT;
8547
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8548
 
8549
	udelay(24);
8550
 
8551
	if (with_spread) {
8552
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8553
		tmp &= ~SBI_SSCCTL_PATHALT;
8554
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8555
 
8556
		if (with_fdi) {
8557
			lpt_reset_fdi_mphy(dev_priv);
8558
			lpt_program_fdi_mphy(dev_priv);
8559
		}
8560
	}
8561
 
6084 serge 8562
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
4104 Serge 8563
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8564
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8565
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8566
 
6084 serge 8567
	mutex_unlock(&dev_priv->sb_lock);
3243 Serge 8568
}
8569
 
4104 Serge 8570
/* Sequence to disable CLKOUT_DP */
8571
static void lpt_disable_clkout_dp(struct drm_device *dev)
8572
{
8573
	struct drm_i915_private *dev_priv = dev->dev_private;
8574
	uint32_t reg, tmp;
8575
 
6084 serge 8576
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 8577
 
6084 serge 8578
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
4104 Serge 8579
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8580
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8581
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8582
 
8583
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8584
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8585
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8586
			tmp |= SBI_SSCCTL_PATHALT;
8587
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8588
			udelay(32);
8589
		}
8590
		tmp |= SBI_SSCCTL_DISABLE;
8591
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8592
	}
8593
 
6084 serge 8594
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 8595
}
8596
 
6937 serge 8597
#define BEND_IDX(steps) ((50 + (steps)) / 5)
8598
 
8599
static const uint16_t sscdivintphase[] = {
8600
	[BEND_IDX( 50)] = 0x3B23,
8601
	[BEND_IDX( 45)] = 0x3B23,
8602
	[BEND_IDX( 40)] = 0x3C23,
8603
	[BEND_IDX( 35)] = 0x3C23,
8604
	[BEND_IDX( 30)] = 0x3D23,
8605
	[BEND_IDX( 25)] = 0x3D23,
8606
	[BEND_IDX( 20)] = 0x3E23,
8607
	[BEND_IDX( 15)] = 0x3E23,
8608
	[BEND_IDX( 10)] = 0x3F23,
8609
	[BEND_IDX(  5)] = 0x3F23,
8610
	[BEND_IDX(  0)] = 0x0025,
8611
	[BEND_IDX( -5)] = 0x0025,
8612
	[BEND_IDX(-10)] = 0x0125,
8613
	[BEND_IDX(-15)] = 0x0125,
8614
	[BEND_IDX(-20)] = 0x0225,
8615
	[BEND_IDX(-25)] = 0x0225,
8616
	[BEND_IDX(-30)] = 0x0325,
8617
	[BEND_IDX(-35)] = 0x0325,
8618
	[BEND_IDX(-40)] = 0x0425,
8619
	[BEND_IDX(-45)] = 0x0425,
8620
	[BEND_IDX(-50)] = 0x0525,
8621
};
8622
 
8623
/*
8624
 * Bend CLKOUT_DP
8625
 * steps -50 to 50 inclusive, in steps of 5
8626
 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8627
 * change in clock period = -(steps / 10) * 5.787 ps
8628
 */
8629
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8630
{
8631
	uint32_t tmp;
8632
	int idx = BEND_IDX(steps);
8633
 
8634
	if (WARN_ON(steps % 5 != 0))
8635
		return;
8636
 
8637
	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8638
		return;
8639
 
8640
	mutex_lock(&dev_priv->sb_lock);
8641
 
8642
	if (steps % 10 != 0)
8643
		tmp = 0xAAAAAAAB;
8644
	else
8645
		tmp = 0x00000000;
8646
	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8647
 
8648
	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8649
	tmp &= 0xffff0000;
8650
	tmp |= sscdivintphase[idx];
8651
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8652
 
8653
	mutex_unlock(&dev_priv->sb_lock);
8654
}
8655
 
8656
#undef BEND_IDX
8657
 
4104 Serge 8658
static void lpt_init_pch_refclk(struct drm_device *dev)
8659
{
8660
	struct intel_encoder *encoder;
8661
	bool has_vga = false;
8662
 
5354 serge 8663
	for_each_intel_encoder(dev, encoder) {
4104 Serge 8664
		switch (encoder->type) {
8665
		case INTEL_OUTPUT_ANALOG:
8666
			has_vga = true;
8667
			break;
5354 serge 8668
		default:
8669
			break;
4104 Serge 8670
		}
8671
	}
8672
 
6937 serge 8673
	if (has_vga) {
8674
		lpt_bend_clkout_dp(to_i915(dev), 0);
4104 Serge 8675
		lpt_enable_clkout_dp(dev, true, true);
6937 serge 8676
	} else {
4104 Serge 8677
		lpt_disable_clkout_dp(dev);
8678
}
6937 serge 8679
}
4104 Serge 8680
 
3243 Serge 8681
/*
8682
 * Initialize reference clocks when the driver loads
8683
 */
8684
void intel_init_pch_refclk(struct drm_device *dev)
8685
{
8686
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8687
		ironlake_init_pch_refclk(dev);
8688
	else if (HAS_PCH_LPT(dev))
8689
		lpt_init_pch_refclk(dev);
8690
}
8691
 
6084 serge 8692
static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
2342 Serge 8693
{
6084 serge 8694
	struct drm_device *dev = crtc_state->base.crtc->dev;
2342 Serge 8695
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 8696
	struct drm_atomic_state *state = crtc_state->base.state;
8697
	struct drm_connector *connector;
8698
	struct drm_connector_state *connector_state;
2342 Serge 8699
	struct intel_encoder *encoder;
6084 serge 8700
	int num_connectors = 0, i;
2342 Serge 8701
	bool is_lvds = false;
8702
 
6084 serge 8703
	for_each_connector_in_state(state, connector, connector_state, i) {
8704
		if (connector_state->crtc != crtc_state->base.crtc)
5354 serge 8705
			continue;
8706
 
6084 serge 8707
		encoder = to_intel_encoder(connector_state->best_encoder);
8708
 
2342 Serge 8709
		switch (encoder->type) {
8710
		case INTEL_OUTPUT_LVDS:
8711
			is_lvds = true;
8712
			break;
5354 serge 8713
		default:
8714
			break;
2342 Serge 8715
		}
8716
		num_connectors++;
8717
	}
8718
 
8719
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 8720
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 8721
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 8722
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 8723
	}
8724
 
8725
	return 120000;
8726
}
8727
 
4104 Serge 8728
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 8729
{
8730
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8731
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8732
	int pipe = intel_crtc->pipe;
8733
	uint32_t val;
8734
 
4104 Serge 8735
	val = 0;
3031 serge 8736
 
6084 serge 8737
	switch (intel_crtc->config->pipe_bpp) {
3031 serge 8738
	case 18:
3480 Serge 8739
		val |= PIPECONF_6BPC;
3031 serge 8740
		break;
8741
	case 24:
3480 Serge 8742
		val |= PIPECONF_8BPC;
3031 serge 8743
		break;
8744
	case 30:
3480 Serge 8745
		val |= PIPECONF_10BPC;
3031 serge 8746
		break;
8747
	case 36:
3480 Serge 8748
		val |= PIPECONF_12BPC;
3031 serge 8749
		break;
8750
	default:
3243 Serge 8751
		/* Case prevented by intel_choose_pipe_bpp_dither. */
8752
		BUG();
3031 serge 8753
	}
8754
 
6084 serge 8755
	if (intel_crtc->config->dither)
3031 serge 8756
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8757
 
6084 serge 8758
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 8759
		val |= PIPECONF_INTERLACED_ILK;
8760
	else
8761
		val |= PIPECONF_PROGRESSIVE;
8762
 
6084 serge 8763
	if (intel_crtc->config->limited_color_range)
3480 Serge 8764
		val |= PIPECONF_COLOR_RANGE_SELECT;
8765
 
3031 serge 8766
	I915_WRITE(PIPECONF(pipe), val);
8767
	POSTING_READ(PIPECONF(pipe));
8768
}
8769
 
3480 Serge 8770
/*
8771
 * Set up the pipe CSC unit.
8772
 *
8773
 * Currently only full range RGB to limited range RGB conversion
8774
 * is supported, but eventually this should handle various
8775
 * RGB<->YCbCr scenarios as well.
8776
 */
3746 Serge 8777
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 8778
{
8779
	struct drm_device *dev = crtc->dev;
8780
	struct drm_i915_private *dev_priv = dev->dev_private;
8781
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8782
	int pipe = intel_crtc->pipe;
8783
	uint16_t coeff = 0x7800; /* 1.0 */
8784
 
8785
	/*
8786
	 * TODO: Check what kind of values actually come out of the pipe
8787
	 * with these coeff/postoff values and adjust to get the best
8788
	 * accuracy. Perhaps we even need to take the bpc value into
8789
	 * consideration.
8790
	 */
8791
 
6084 serge 8792
	if (intel_crtc->config->limited_color_range)
3480 Serge 8793
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8794
 
8795
	/*
8796
	 * GY/GU and RY/RU should be the other way around according
8797
	 * to BSpec, but reality doesn't agree. Just set them up in
8798
	 * a way that results in the correct picture.
8799
	 */
8800
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8801
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8802
 
8803
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8804
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8805
 
8806
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8807
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8808
 
8809
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8810
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8811
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8812
 
8813
	if (INTEL_INFO(dev)->gen > 6) {
8814
		uint16_t postoff = 0;
8815
 
6084 serge 8816
		if (intel_crtc->config->limited_color_range)
4398 Serge 8817
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 8818
 
8819
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8820
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8821
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8822
 
8823
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8824
	} else {
8825
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
8826
 
6084 serge 8827
		if (intel_crtc->config->limited_color_range)
3480 Serge 8828
			mode |= CSC_BLACK_SCREEN_OFFSET;
8829
 
8830
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8831
	}
8832
}
8833
 
4104 Serge 8834
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 8835
{
4560 Serge 8836
	struct drm_device *dev = crtc->dev;
8837
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 8838
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 8839
	enum pipe pipe = intel_crtc->pipe;
6084 serge 8840
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 8841
	uint32_t val;
8842
 
4104 Serge 8843
	val = 0;
3243 Serge 8844
 
6084 serge 8845
	if (IS_HASWELL(dev) && intel_crtc->config->dither)
3243 Serge 8846
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8847
 
6084 serge 8848
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 8849
		val |= PIPECONF_INTERLACED_ILK;
8850
	else
8851
		val |= PIPECONF_PROGRESSIVE;
8852
 
8853
	I915_WRITE(PIPECONF(cpu_transcoder), val);
8854
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 8855
 
8856
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8857
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 8858
 
5354 serge 8859
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
4560 Serge 8860
		val = 0;
8861
 
6084 serge 8862
		switch (intel_crtc->config->pipe_bpp) {
4560 Serge 8863
		case 18:
8864
			val |= PIPEMISC_DITHER_6_BPC;
8865
			break;
8866
		case 24:
8867
			val |= PIPEMISC_DITHER_8_BPC;
8868
			break;
8869
		case 30:
8870
			val |= PIPEMISC_DITHER_10_BPC;
8871
			break;
8872
		case 36:
8873
			val |= PIPEMISC_DITHER_12_BPC;
8874
			break;
8875
		default:
8876
			/* Case prevented by pipe_config_set_bpp. */
8877
			BUG();
8878
		}
8879
 
6084 serge 8880
		if (intel_crtc->config->dither)
4560 Serge 8881
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8882
 
8883
		I915_WRITE(PIPEMISC(pipe), val);
8884
	}
3243 Serge 8885
}
8886
 
3031 serge 8887
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6084 serge 8888
				    struct intel_crtc_state *crtc_state,
3031 serge 8889
				    intel_clock_t *clock,
8890
				    bool *has_reduced_clock,
8891
				    intel_clock_t *reduced_clock)
8892
{
8893
	struct drm_device *dev = crtc->dev;
8894
	struct drm_i915_private *dev_priv = dev->dev_private;
8895
	int refclk;
8896
	const intel_limit_t *limit;
6084 serge 8897
	bool ret;
3031 serge 8898
 
6084 serge 8899
	refclk = ironlake_get_refclk(crtc_state);
3031 serge 8900
 
8901
	/*
8902
	 * Returns a set of divisors for the desired target clock with the given
8903
	 * refclk, or FALSE.  The returned values represent the clock equation:
8904
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8905
	 */
6084 serge 8906
	limit = intel_limit(crtc_state, refclk);
8907
	ret = dev_priv->display.find_dpll(limit, crtc_state,
8908
					  crtc_state->port_clock,
4104 Serge 8909
					  refclk, NULL, clock);
3031 serge 8910
	if (!ret)
8911
		return false;
8912
 
8913
	return true;
8914
}
8915
 
3243 Serge 8916
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8917
{
8918
	/*
8919
	 * Account for spread spectrum to avoid
8920
	 * oversubscribing the link. Max center spread
8921
	 * is 2.5%; use 5% for safety's sake.
8922
	 */
8923
	u32 bps = target_clock * bpp * 21 / 20;
5060 serge 8924
	return DIV_ROUND_UP(bps, link_bw * 8);
3243 Serge 8925
}
8926
 
4104 Serge 8927
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 8928
{
4104 Serge 8929
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 8930
}
8931
 
3243 Serge 8932
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6084 serge 8933
				      struct intel_crtc_state *crtc_state,
4104 Serge 8934
				      u32 *fp,
3746 Serge 8935
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 8936
{
8937
	struct drm_crtc *crtc = &intel_crtc->base;
8938
	struct drm_device *dev = crtc->dev;
8939
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 8940
	struct drm_atomic_state *state = crtc_state->base.state;
8941
	struct drm_connector *connector;
8942
	struct drm_connector_state *connector_state;
8943
	struct intel_encoder *encoder;
3243 Serge 8944
	uint32_t dpll;
6084 serge 8945
	int factor, num_connectors = 0, i;
4104 Serge 8946
	bool is_lvds = false, is_sdvo = false;
3243 Serge 8947
 
6084 serge 8948
	for_each_connector_in_state(state, connector, connector_state, i) {
8949
		if (connector_state->crtc != crtc_state->base.crtc)
5354 serge 8950
			continue;
8951
 
6084 serge 8952
		encoder = to_intel_encoder(connector_state->best_encoder);
8953
 
8954
		switch (encoder->type) {
3243 Serge 8955
		case INTEL_OUTPUT_LVDS:
8956
			is_lvds = true;
8957
			break;
8958
		case INTEL_OUTPUT_SDVO:
8959
		case INTEL_OUTPUT_HDMI:
8960
			is_sdvo = true;
8961
			break;
5354 serge 8962
		default:
8963
			break;
3243 Serge 8964
		}
8965
 
8966
		num_connectors++;
8967
	}
8968
 
6084 serge 8969
	/* Enable autotuning of the PLL clock (if permissible) */
8970
	factor = 21;
8971
	if (is_lvds) {
8972
		if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 8973
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 8974
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6084 serge 8975
			factor = 25;
8976
	} else if (crtc_state->sdvo_tv_clock)
8977
		factor = 20;
2327 Serge 8978
 
6084 serge 8979
	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
3746 Serge 8980
		*fp |= FP_CB_TUNE;
2327 Serge 8981
 
3746 Serge 8982
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8983
		*fp2 |= FP_CB_TUNE;
8984
 
6084 serge 8985
	dpll = 0;
2327 Serge 8986
 
6084 serge 8987
	if (is_lvds)
8988
		dpll |= DPLLB_MODE_LVDS;
8989
	else
8990
		dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 8991
 
6084 serge 8992
	dpll |= (crtc_state->pixel_multiplier - 1)
8993
		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 8994
 
4104 Serge 8995
	if (is_sdvo)
8996
		dpll |= DPLL_SDVO_HIGH_SPEED;
6084 serge 8997
	if (crtc_state->has_dp_encoder)
4104 Serge 8998
		dpll |= DPLL_SDVO_HIGH_SPEED;
8999
 
6084 serge 9000
	/* compute bitmask from p1 value */
9001
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9002
	/* also FPA1 */
9003
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 9004
 
6084 serge 9005
	switch (crtc_state->dpll.p2) {
9006
	case 5:
9007
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9008
		break;
9009
	case 7:
9010
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9011
		break;
9012
	case 10:
9013
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9014
		break;
9015
	case 14:
9016
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9017
		break;
9018
	}
2327 Serge 9019
 
4104 Serge 9020
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6084 serge 9021
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9022
	else
9023
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 9024
 
4104 Serge 9025
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 9026
}
9027
 
6084 serge 9028
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9029
				       struct intel_crtc_state *crtc_state)
3243 Serge 9030
{
5354 serge 9031
	struct drm_device *dev = crtc->base.dev;
3243 Serge 9032
	intel_clock_t clock, reduced_clock;
4104 Serge 9033
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 9034
	bool ok, has_reduced_clock = false;
3746 Serge 9035
	bool is_lvds = false;
4104 Serge 9036
	struct intel_shared_dpll *pll;
3243 Serge 9037
 
6084 serge 9038
	memset(&crtc_state->dpll_hw_state, 0,
9039
	       sizeof(crtc_state->dpll_hw_state));
9040
 
6937 serge 9041
	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
3243 Serge 9042
 
9043
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
9044
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9045
 
6084 serge 9046
	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
3243 Serge 9047
				     &has_reduced_clock, &reduced_clock);
6084 serge 9048
	if (!ok && !crtc_state->clock_set) {
3243 Serge 9049
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9050
		return -EINVAL;
9051
	}
3746 Serge 9052
	/* Compat-code for transition, will disappear. */
6084 serge 9053
	if (!crtc_state->clock_set) {
9054
		crtc_state->dpll.n = clock.n;
9055
		crtc_state->dpll.m1 = clock.m1;
9056
		crtc_state->dpll.m2 = clock.m2;
9057
		crtc_state->dpll.p1 = clock.p1;
9058
		crtc_state->dpll.p2 = clock.p2;
3746 Serge 9059
	}
3243 Serge 9060
 
4104 Serge 9061
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6084 serge 9062
	if (crtc_state->has_pch_encoder) {
9063
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9064
		if (has_reduced_clock)
4104 Serge 9065
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 9066
 
6084 serge 9067
		dpll = ironlake_compute_dpll(crtc, crtc_state,
4104 Serge 9068
					     &fp, &reduced_clock,
5060 serge 9069
					     has_reduced_clock ? &fp2 : NULL);
3243 Serge 9070
 
6084 serge 9071
		crtc_state->dpll_hw_state.dpll = dpll;
9072
		crtc_state->dpll_hw_state.fp0 = fp;
4104 Serge 9073
		if (has_reduced_clock)
6084 serge 9074
			crtc_state->dpll_hw_state.fp1 = fp2;
4104 Serge 9075
		else
6084 serge 9076
			crtc_state->dpll_hw_state.fp1 = fp;
2327 Serge 9077
 
6084 serge 9078
		pll = intel_get_shared_dpll(crtc, crtc_state);
3031 serge 9079
		if (pll == NULL) {
4104 Serge 9080
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5354 serge 9081
					 pipe_name(crtc->pipe));
2342 Serge 9082
			return -EINVAL;
6084 serge 9083
		}
5354 serge 9084
	}
2327 Serge 9085
 
6084 serge 9086
	if (is_lvds && has_reduced_clock)
5354 serge 9087
		crtc->lowfreq_avail = true;
4104 Serge 9088
	else
5354 serge 9089
		crtc->lowfreq_avail = false;
2327 Serge 9090
 
5060 serge 9091
	return 0;
4104 Serge 9092
}
3243 Serge 9093
 
4560 Serge 9094
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9095
					 struct intel_link_m_n *m_n)
4104 Serge 9096
{
9097
	struct drm_device *dev = crtc->base.dev;
9098
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 9099
	enum pipe pipe = crtc->pipe;
4104 Serge 9100
 
4560 Serge 9101
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9102
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9103
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9104
		& ~TU_SIZE_MASK;
9105
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9106
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9107
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9108
}
9109
 
9110
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9111
					 enum transcoder transcoder,
5354 serge 9112
					 struct intel_link_m_n *m_n,
9113
					 struct intel_link_m_n *m2_n2)
4560 Serge 9114
{
9115
	struct drm_device *dev = crtc->base.dev;
9116
	struct drm_i915_private *dev_priv = dev->dev_private;
9117
	enum pipe pipe = crtc->pipe;
9118
 
9119
	if (INTEL_INFO(dev)->gen >= 5) {
9120
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9121
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9122
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6084 serge 9123
			& ~TU_SIZE_MASK;
4560 Serge 9124
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9125
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6084 serge 9126
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5354 serge 9127
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9128
		 * gen < 8) and if DRRS is supported (to make sure the
9129
		 * registers are not unnecessarily read).
9130
		 */
9131
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6084 serge 9132
			crtc->config->has_drrs) {
5354 serge 9133
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9134
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9135
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9136
					& ~TU_SIZE_MASK;
9137
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9138
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9139
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9140
		}
4560 Serge 9141
	} else {
9142
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9143
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9144
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9145
			& ~TU_SIZE_MASK;
9146
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9147
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9148
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9149
	}
3243 Serge 9150
}
9151
 
4560 Serge 9152
void intel_dp_get_m_n(struct intel_crtc *crtc,
6084 serge 9153
		      struct intel_crtc_state *pipe_config)
4560 Serge 9154
{
6084 serge 9155
	if (pipe_config->has_pch_encoder)
4560 Serge 9156
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9157
	else
9158
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 9159
					     &pipe_config->dp_m_n,
9160
					     &pipe_config->dp_m2_n2);
4560 Serge 9161
}
9162
 
9163
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6084 serge 9164
					struct intel_crtc_state *pipe_config)
4560 Serge 9165
{
9166
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 9167
				     &pipe_config->fdi_m_n, NULL);
4560 Serge 9168
}
9169
 
5354 serge 9170
static void skylake_get_pfit_config(struct intel_crtc *crtc,
6084 serge 9171
				    struct intel_crtc_state *pipe_config)
5354 serge 9172
{
9173
	struct drm_device *dev = crtc->base.dev;
9174
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 9175
	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9176
	uint32_t ps_ctrl = 0;
9177
	int id = -1;
9178
	int i;
5354 serge 9179
 
6084 serge 9180
	/* find scaler attached to this pipe */
9181
	for (i = 0; i < crtc->num_scalers; i++) {
9182
		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9183
		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9184
			id = i;
9185
			pipe_config->pch_pfit.enabled = true;
9186
			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9187
			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9188
			break;
9189
		}
9190
	}
5354 serge 9191
 
6084 serge 9192
	scaler_state->scaler_id = id;
9193
	if (id >= 0) {
9194
		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9195
	} else {
9196
		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5354 serge 9197
	}
9198
}
9199
 
6084 serge 9200
static void
9201
skylake_get_initial_plane_config(struct intel_crtc *crtc,
9202
				 struct intel_initial_plane_config *plane_config)
9203
{
9204
	struct drm_device *dev = crtc->base.dev;
9205
	struct drm_i915_private *dev_priv = dev->dev_private;
9206
	u32 val, base, offset, stride_mult, tiling;
9207
	int pipe = crtc->pipe;
9208
	int fourcc, pixel_format;
9209
	unsigned int aligned_height;
9210
	struct drm_framebuffer *fb;
9211
	struct intel_framebuffer *intel_fb;
9212
 
9213
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9214
	if (!intel_fb) {
9215
		DRM_DEBUG_KMS("failed to alloc fb\n");
9216
		return;
9217
	}
9218
 
9219
	fb = &intel_fb->base;
9220
 
9221
	val = I915_READ(PLANE_CTL(pipe, 0));
9222
	if (!(val & PLANE_CTL_ENABLE))
9223
		goto error;
9224
 
9225
	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9226
	fourcc = skl_format_to_fourcc(pixel_format,
9227
				      val & PLANE_CTL_ORDER_RGBX,
9228
				      val & PLANE_CTL_ALPHA_MASK);
9229
	fb->pixel_format = fourcc;
9230
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9231
 
9232
	tiling = val & PLANE_CTL_TILED_MASK;
9233
	switch (tiling) {
9234
	case PLANE_CTL_TILED_LINEAR:
9235
		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9236
		break;
9237
	case PLANE_CTL_TILED_X:
9238
		plane_config->tiling = I915_TILING_X;
9239
		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9240
		break;
9241
	case PLANE_CTL_TILED_Y:
9242
		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9243
		break;
9244
	case PLANE_CTL_TILED_YF:
9245
		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9246
		break;
9247
	default:
9248
		MISSING_CASE(tiling);
9249
		goto error;
9250
	}
9251
 
9252
	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9253
	plane_config->base = base;
9254
 
9255
	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9256
 
9257
	val = I915_READ(PLANE_SIZE(pipe, 0));
9258
	fb->height = ((val >> 16) & 0xfff) + 1;
9259
	fb->width = ((val >> 0) & 0x1fff) + 1;
9260
 
9261
	val = I915_READ(PLANE_STRIDE(pipe, 0));
9262
	stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9263
						fb->pixel_format);
6283 serge 9264
	fb->pitches[0] = (val & 0x3ff) * stride_mult;
6084 serge 9265
 
9266
	aligned_height = intel_fb_align_height(dev, fb->height,
9267
					       fb->pixel_format,
9268
					       fb->modifier[0]);
9269
 
6283 serge 9270
	plane_config->size = fb->pitches[0] * aligned_height;
6084 serge 9271
 
9272
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9273
		      pipe_name(pipe), fb->width, fb->height,
9274
		      fb->bits_per_pixel, base, fb->pitches[0],
9275
		      plane_config->size);
9276
 
9277
	plane_config->fb = intel_fb;
9278
	return;
9279
 
9280
error:
9281
	kfree(fb);
9282
}
9283
 
4104 Serge 9284
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6084 serge 9285
				     struct intel_crtc_state *pipe_config)
4104 Serge 9286
{
9287
	struct drm_device *dev = crtc->base.dev;
9288
	struct drm_i915_private *dev_priv = dev->dev_private;
9289
	uint32_t tmp;
9290
 
9291
	tmp = I915_READ(PF_CTL(crtc->pipe));
9292
 
9293
	if (tmp & PF_ENABLE) {
9294
		pipe_config->pch_pfit.enabled = true;
9295
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9296
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9297
 
9298
		/* We currently do not free assignements of panel fitters on
9299
		 * ivb/hsw (since we don't use the higher upscaling modes which
9300
		 * differentiates them) so just WARN about this case for now. */
9301
		if (IS_GEN7(dev)) {
9302
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9303
				PF_PIPE_SEL_IVB(crtc->pipe));
9304
		}
9305
	}
9306
}
9307
 
6084 serge 9308
static void
9309
ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9310
				  struct intel_initial_plane_config *plane_config)
5060 serge 9311
{
9312
	struct drm_device *dev = crtc->base.dev;
9313
	struct drm_i915_private *dev_priv = dev->dev_private;
9314
	u32 val, base, offset;
6084 serge 9315
	int pipe = crtc->pipe;
5060 serge 9316
	int fourcc, pixel_format;
6084 serge 9317
	unsigned int aligned_height;
9318
	struct drm_framebuffer *fb;
9319
	struct intel_framebuffer *intel_fb;
5060 serge 9320
 
6084 serge 9321
	val = I915_READ(DSPCNTR(pipe));
9322
	if (!(val & DISPLAY_PLANE_ENABLE))
9323
		return;
9324
 
9325
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9326
	if (!intel_fb) {
5060 serge 9327
		DRM_DEBUG_KMS("failed to alloc fb\n");
9328
		return;
9329
	}
9330
 
6084 serge 9331
	fb = &intel_fb->base;
5060 serge 9332
 
6084 serge 9333
	if (INTEL_INFO(dev)->gen >= 4) {
9334
		if (val & DISPPLANE_TILED) {
9335
			plane_config->tiling = I915_TILING_X;
9336
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9337
		}
9338
	}
5060 serge 9339
 
9340
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6084 serge 9341
	fourcc = i9xx_format_to_fourcc(pixel_format);
9342
	fb->pixel_format = fourcc;
9343
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
5060 serge 9344
 
6084 serge 9345
	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
5060 serge 9346
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 9347
		offset = I915_READ(DSPOFFSET(pipe));
5060 serge 9348
	} else {
6084 serge 9349
		if (plane_config->tiling)
9350
			offset = I915_READ(DSPTILEOFF(pipe));
5060 serge 9351
		else
6084 serge 9352
			offset = I915_READ(DSPLINOFF(pipe));
5060 serge 9353
	}
9354
	plane_config->base = base;
9355
 
9356
	val = I915_READ(PIPESRC(pipe));
6084 serge 9357
	fb->width = ((val >> 16) & 0xfff) + 1;
9358
	fb->height = ((val >> 0) & 0xfff) + 1;
5060 serge 9359
 
9360
	val = I915_READ(DSPSTRIDE(pipe));
6283 serge 9361
	fb->pitches[0] = val & 0xffffffc0;
5060 serge 9362
 
6084 serge 9363
	aligned_height = intel_fb_align_height(dev, fb->height,
9364
					       fb->pixel_format,
9365
					       fb->modifier[0]);
5060 serge 9366
 
6283 serge 9367
	plane_config->size = fb->pitches[0] * aligned_height;
5060 serge 9368
 
6084 serge 9369
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9370
		      pipe_name(pipe), fb->width, fb->height,
9371
		      fb->bits_per_pixel, base, fb->pitches[0],
5060 serge 9372
		      plane_config->size);
6084 serge 9373
 
9374
	plane_config->fb = intel_fb;
5060 serge 9375
}
9376
 
3746 Serge 9377
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6084 serge 9378
				     struct intel_crtc_state *pipe_config)
3746 Serge 9379
{
9380
	struct drm_device *dev = crtc->base.dev;
9381
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 9382
	enum intel_display_power_domain power_domain;
3746 Serge 9383
	uint32_t tmp;
6937 serge 9384
	bool ret;
3746 Serge 9385
 
6937 serge 9386
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9387
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 9388
		return false;
9389
 
4104 Serge 9390
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9391
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9392
 
6937 serge 9393
	ret = false;
3746 Serge 9394
	tmp = I915_READ(PIPECONF(crtc->pipe));
9395
	if (!(tmp & PIPECONF_ENABLE))
6937 serge 9396
		goto out;
3746 Serge 9397
 
4280 Serge 9398
	switch (tmp & PIPECONF_BPC_MASK) {
9399
	case PIPECONF_6BPC:
9400
		pipe_config->pipe_bpp = 18;
9401
		break;
9402
	case PIPECONF_8BPC:
9403
		pipe_config->pipe_bpp = 24;
9404
		break;
9405
	case PIPECONF_10BPC:
9406
		pipe_config->pipe_bpp = 30;
9407
		break;
9408
	case PIPECONF_12BPC:
9409
		pipe_config->pipe_bpp = 36;
9410
		break;
9411
	default:
9412
		break;
9413
	}
9414
 
5060 serge 9415
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9416
		pipe_config->limited_color_range = true;
9417
 
4104 Serge 9418
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9419
		struct intel_shared_dpll *pll;
9420
 
3746 Serge 9421
		pipe_config->has_pch_encoder = true;
9422
 
4104 Serge 9423
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9424
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9425
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9426
 
9427
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9428
 
9429
		if (HAS_PCH_IBX(dev_priv->dev)) {
9430
			pipe_config->shared_dpll =
9431
				(enum intel_dpll_id) crtc->pipe;
9432
		} else {
9433
			tmp = I915_READ(PCH_DPLL_SEL);
9434
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9435
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9436
			else
9437
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9438
		}
9439
 
9440
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9441
 
9442
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9443
					   &pipe_config->dpll_hw_state));
9444
 
9445
		tmp = pipe_config->dpll_hw_state.dpll;
9446
		pipe_config->pixel_multiplier =
9447
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9448
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 9449
 
9450
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 9451
	} else {
9452
		pipe_config->pixel_multiplier = 1;
9453
	}
9454
 
9455
	intel_get_pipe_timings(crtc, pipe_config);
9456
 
9457
	ironlake_get_pfit_config(crtc, pipe_config);
9458
 
6937 serge 9459
	ret = true;
9460
 
9461
out:
9462
	intel_display_power_put(dev_priv, power_domain);
9463
 
9464
	return ret;
3746 Serge 9465
}
9466
 
4104 Serge 9467
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9468
{
9469
	struct drm_device *dev = dev_priv->dev;
9470
	struct intel_crtc *crtc;
9471
 
5060 serge 9472
	for_each_intel_crtc(dev, crtc)
6084 serge 9473
		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 9474
		     pipe_name(crtc->pipe));
9475
 
6084 serge 9476
	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9477
	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
6937 serge 9478
	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9479
	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
6084 serge 9480
	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9481
	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4104 Serge 9482
	     "CPU PWM1 enabled\n");
5060 serge 9483
	if (IS_HASWELL(dev))
6084 serge 9484
		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9485
		     "CPU PWM2 enabled\n");
9486
	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4104 Serge 9487
	     "PCH PWM1 enabled\n");
6084 serge 9488
	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4104 Serge 9489
	     "Utility pin enabled\n");
6084 serge 9490
	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
4104 Serge 9491
 
5060 serge 9492
	/*
9493
	 * In theory we can still leave IRQs enabled, as long as only the HPD
9494
	 * interrupts remain enabled. We used to check for that, but since it's
9495
	 * gen-specific and since we only disable LCPLL after we fully disable
9496
	 * the interrupts, the check below should be enough.
9497
	 */
6084 serge 9498
	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4104 Serge 9499
}
9500
 
5060 serge 9501
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9502
{
9503
	struct drm_device *dev = dev_priv->dev;
9504
 
9505
	if (IS_HASWELL(dev))
9506
		return I915_READ(D_COMP_HSW);
9507
	else
9508
		return I915_READ(D_COMP_BDW);
9509
}
9510
 
9511
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9512
{
9513
	struct drm_device *dev = dev_priv->dev;
9514
 
9515
	if (IS_HASWELL(dev)) {
9516
		mutex_lock(&dev_priv->rps.hw_lock);
9517
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9518
					    val))
9519
			DRM_ERROR("Failed to write to D_COMP\n");
9520
		mutex_unlock(&dev_priv->rps.hw_lock);
9521
	} else {
9522
		I915_WRITE(D_COMP_BDW, val);
9523
		POSTING_READ(D_COMP_BDW);
9524
	}
9525
}
9526
 
4104 Serge 9527
/*
9528
 * This function implements pieces of two sequences from BSpec:
9529
 * - Sequence for display software to disable LCPLL
9530
 * - Sequence for display software to allow package C8+
9531
 * The steps implemented here are just the steps that actually touch the LCPLL
9532
 * register. Callers should take care of disabling all the display engine
9533
 * functions, doing the mode unset, fixing interrupts, etc.
9534
 */
4560 Serge 9535
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6084 serge 9536
			      bool switch_to_fclk, bool allow_power_down)
4104 Serge 9537
{
9538
	uint32_t val;
9539
 
9540
	assert_can_disable_lcpll(dev_priv);
9541
 
9542
	val = I915_READ(LCPLL_CTL);
9543
 
9544
	if (switch_to_fclk) {
9545
		val |= LCPLL_CD_SOURCE_FCLK;
9546
		I915_WRITE(LCPLL_CTL, val);
9547
 
9548
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9549
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9550
			DRM_ERROR("Switching to FCLK failed\n");
9551
 
9552
		val = I915_READ(LCPLL_CTL);
9553
	}
9554
 
9555
	val |= LCPLL_PLL_DISABLE;
9556
	I915_WRITE(LCPLL_CTL, val);
9557
	POSTING_READ(LCPLL_CTL);
9558
 
9559
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9560
		DRM_ERROR("LCPLL still locked\n");
9561
 
5060 serge 9562
	val = hsw_read_dcomp(dev_priv);
4104 Serge 9563
	val |= D_COMP_COMP_DISABLE;
5060 serge 9564
	hsw_write_dcomp(dev_priv, val);
9565
	ndelay(100);
4104 Serge 9566
 
5060 serge 9567
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9568
		     1))
4104 Serge 9569
		DRM_ERROR("D_COMP RCOMP still in progress\n");
9570
 
9571
	if (allow_power_down) {
9572
		val = I915_READ(LCPLL_CTL);
9573
		val |= LCPLL_POWER_DOWN_ALLOW;
9574
		I915_WRITE(LCPLL_CTL, val);
9575
		POSTING_READ(LCPLL_CTL);
9576
	}
9577
}
9578
 
9579
/*
9580
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9581
 * source.
9582
 */
4560 Serge 9583
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 9584
{
9585
	uint32_t val;
9586
 
9587
	val = I915_READ(LCPLL_CTL);
9588
 
9589
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9590
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9591
		return;
9592
 
5060 serge 9593
	/*
9594
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9595
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9596
	 */
6084 serge 9597
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4104 Serge 9598
 
9599
	if (val & LCPLL_POWER_DOWN_ALLOW) {
9600
		val &= ~LCPLL_POWER_DOWN_ALLOW;
9601
		I915_WRITE(LCPLL_CTL, val);
9602
		POSTING_READ(LCPLL_CTL);
9603
	}
9604
 
5060 serge 9605
	val = hsw_read_dcomp(dev_priv);
4104 Serge 9606
	val |= D_COMP_COMP_FORCE;
9607
	val &= ~D_COMP_COMP_DISABLE;
5060 serge 9608
	hsw_write_dcomp(dev_priv, val);
4104 Serge 9609
 
9610
	val = I915_READ(LCPLL_CTL);
9611
	val &= ~LCPLL_PLL_DISABLE;
9612
	I915_WRITE(LCPLL_CTL, val);
9613
 
9614
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9615
		DRM_ERROR("LCPLL not locked yet\n");
9616
 
9617
	if (val & LCPLL_CD_SOURCE_FCLK) {
9618
		val = I915_READ(LCPLL_CTL);
9619
		val &= ~LCPLL_CD_SOURCE_FCLK;
9620
		I915_WRITE(LCPLL_CTL, val);
9621
 
9622
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9623
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9624
			DRM_ERROR("Switching back to LCPLL failed\n");
9625
	}
9626
 
6084 serge 9627
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9628
	intel_update_cdclk(dev_priv->dev);
4104 Serge 9629
}
9630
 
5060 serge 9631
/*
9632
 * Package states C8 and deeper are really deep PC states that can only be
9633
 * reached when all the devices on the system allow it, so even if the graphics
9634
 * device allows PC8+, it doesn't mean the system will actually get to these
9635
 * states. Our driver only allows PC8+ when going into runtime PM.
9636
 *
9637
 * The requirements for PC8+ are that all the outputs are disabled, the power
9638
 * well is disabled and most interrupts are disabled, and these are also
9639
 * requirements for runtime PM. When these conditions are met, we manually do
9640
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9641
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9642
 * hang the machine.
9643
 *
9644
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9645
 * the state of some registers, so when we come back from PC8+ we need to
9646
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9647
 * need to take care of the registers kept by RC6. Notice that this happens even
9648
 * if we don't put the device in PCI D3 state (which is what currently happens
9649
 * because of the runtime PM support).
9650
 *
9651
 * For more, read "Display Sequences for Package C8" on the hardware
9652
 * documentation.
9653
 */
9654
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 9655
{
9656
	struct drm_device *dev = dev_priv->dev;
9657
	uint32_t val;
9658
 
9659
	DRM_DEBUG_KMS("Enabling package C8+\n");
9660
 
6084 serge 9661
	if (HAS_PCH_LPT_LP(dev)) {
4104 Serge 9662
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9663
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9664
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9665
	}
9666
 
9667
	lpt_disable_clkout_dp(dev);
9668
	hsw_disable_lcpll(dev_priv, true, true);
9669
}
9670
 
5060 serge 9671
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 9672
{
9673
	struct drm_device *dev = dev_priv->dev;
9674
	uint32_t val;
9675
 
9676
	DRM_DEBUG_KMS("Disabling package C8+\n");
9677
 
9678
	hsw_restore_lcpll(dev_priv);
9679
	lpt_init_pch_refclk(dev);
9680
 
6084 serge 9681
	if (HAS_PCH_LPT_LP(dev)) {
4104 Serge 9682
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9683
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9684
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9685
	}
9686
 
9687
	intel_prepare_ddi(dev);
9688
}
9689
 
6084 serge 9690
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
4104 Serge 9691
{
6084 serge 9692
	struct drm_device *dev = old_state->dev;
9693
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9694
 
9695
	broxton_set_cdclk(dev, req_cdclk);
9696
}
9697
 
9698
/* compute the max rate for new configuration */
9699
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9700
{
9701
	struct intel_crtc *intel_crtc;
9702
	struct intel_crtc_state *crtc_state;
9703
	int max_pixel_rate = 0;
9704
 
9705
	for_each_intel_crtc(state->dev, intel_crtc) {
9706
		int pixel_rate;
9707
 
9708
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9709
		if (IS_ERR(crtc_state))
9710
			return PTR_ERR(crtc_state);
9711
 
9712
		if (!crtc_state->base.enable)
9713
			continue;
9714
 
9715
		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9716
 
9717
		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9718
		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
9719
			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9720
 
9721
		max_pixel_rate = max(max_pixel_rate, pixel_rate);
9722
	}
9723
 
9724
	return max_pixel_rate;
9725
}
9726
 
9727
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9728
{
9729
	struct drm_i915_private *dev_priv = dev->dev_private;
9730
	uint32_t val, data;
9731
	int ret;
9732
 
9733
	if (WARN((I915_READ(LCPLL_CTL) &
9734
		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9735
		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9736
		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9737
		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9738
		 "trying to change cdclk frequency with cdclk not enabled\n"))
9739
		return;
9740
 
9741
	mutex_lock(&dev_priv->rps.hw_lock);
9742
	ret = sandybridge_pcode_write(dev_priv,
9743
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9744
	mutex_unlock(&dev_priv->rps.hw_lock);
9745
	if (ret) {
9746
		DRM_ERROR("failed to inform pcode about cdclk change\n");
9747
		return;
9748
	}
9749
 
9750
	val = I915_READ(LCPLL_CTL);
9751
	val |= LCPLL_CD_SOURCE_FCLK;
9752
	I915_WRITE(LCPLL_CTL, val);
9753
 
9754
	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9755
			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9756
		DRM_ERROR("Switching to FCLK failed\n");
9757
 
9758
	val = I915_READ(LCPLL_CTL);
9759
	val &= ~LCPLL_CLK_FREQ_MASK;
9760
 
9761
	switch (cdclk) {
9762
	case 450000:
9763
		val |= LCPLL_CLK_FREQ_450;
9764
		data = 0;
9765
		break;
9766
	case 540000:
9767
		val |= LCPLL_CLK_FREQ_54O_BDW;
9768
		data = 1;
9769
		break;
9770
	case 337500:
9771
		val |= LCPLL_CLK_FREQ_337_5_BDW;
9772
		data = 2;
9773
		break;
9774
	case 675000:
9775
		val |= LCPLL_CLK_FREQ_675_BDW;
9776
		data = 3;
9777
		break;
9778
	default:
9779
		WARN(1, "invalid cdclk frequency\n");
9780
		return;
9781
	}
9782
 
9783
	I915_WRITE(LCPLL_CTL, val);
9784
 
9785
	val = I915_READ(LCPLL_CTL);
9786
	val &= ~LCPLL_CD_SOURCE_FCLK;
9787
	I915_WRITE(LCPLL_CTL, val);
9788
 
9789
	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9790
				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9791
		DRM_ERROR("Switching back to LCPLL failed\n");
9792
 
9793
	mutex_lock(&dev_priv->rps.hw_lock);
9794
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9795
	mutex_unlock(&dev_priv->rps.hw_lock);
9796
 
6660 serge 9797
	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9798
 
6084 serge 9799
	intel_update_cdclk(dev);
9800
 
9801
	WARN(cdclk != dev_priv->cdclk_freq,
9802
	     "cdclk requested %d kHz but got %d kHz\n",
9803
	     cdclk, dev_priv->cdclk_freq);
9804
}
9805
 
9806
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9807
{
9808
	struct drm_i915_private *dev_priv = to_i915(state->dev);
9809
	int max_pixclk = ilk_max_pixel_rate(state);
9810
	int cdclk;
9811
 
9812
	/*
9813
	 * FIXME should also account for plane ratio
9814
	 * once 64bpp pixel formats are supported.
9815
	 */
9816
	if (max_pixclk > 540000)
9817
		cdclk = 675000;
9818
	else if (max_pixclk > 450000)
9819
		cdclk = 540000;
9820
	else if (max_pixclk > 337500)
9821
		cdclk = 450000;
9822
	else
9823
		cdclk = 337500;
9824
 
9825
	if (cdclk > dev_priv->max_cdclk_freq) {
6937 serge 9826
		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
6084 serge 9827
			  cdclk, dev_priv->max_cdclk_freq);
6937 serge 9828
		return -EINVAL;
6084 serge 9829
	}
9830
 
9831
	to_intel_atomic_state(state)->cdclk = cdclk;
9832
 
9833
	return 0;
9834
}
9835
 
9836
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9837
{
9838
	struct drm_device *dev = old_state->dev;
9839
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9840
 
9841
	broadwell_set_cdclk(dev, req_cdclk);
9842
}
9843
 
9844
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9845
				      struct intel_crtc_state *crtc_state)
9846
{
9847
	if (!intel_ddi_pll_select(crtc, crtc_state))
5354 serge 9848
		return -EINVAL;
9849
 
9850
	crtc->lowfreq_avail = false;
9851
 
9852
	return 0;
4104 Serge 9853
}
9854
 
6084 serge 9855
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9856
				enum port port,
9857
				struct intel_crtc_state *pipe_config)
9858
{
9859
	switch (port) {
9860
	case PORT_A:
9861
		pipe_config->ddi_pll_sel = SKL_DPLL0;
9862
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9863
		break;
9864
	case PORT_B:
9865
		pipe_config->ddi_pll_sel = SKL_DPLL1;
9866
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9867
		break;
9868
	case PORT_C:
9869
		pipe_config->ddi_pll_sel = SKL_DPLL2;
9870
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9871
		break;
9872
	default:
9873
		DRM_ERROR("Incorrect port type\n");
9874
	}
9875
}
9876
 
5354 serge 9877
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9878
				enum port port,
6084 serge 9879
				struct intel_crtc_state *pipe_config)
4104 Serge 9880
{
6084 serge 9881
	u32 temp, dpll_ctl1;
5354 serge 9882
 
9883
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9884
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9885
 
9886
	switch (pipe_config->ddi_pll_sel) {
6084 serge 9887
	case SKL_DPLL0:
9888
		/*
9889
		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9890
		 * of the shared DPLL framework and thus needs to be read out
9891
		 * separately
9892
		 */
9893
		dpll_ctl1 = I915_READ(DPLL_CTRL1);
9894
		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9895
		break;
5354 serge 9896
	case SKL_DPLL1:
9897
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9898
		break;
9899
	case SKL_DPLL2:
9900
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9901
		break;
9902
	case SKL_DPLL3:
9903
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9904
		break;
9905
	}
4104 Serge 9906
}
9907
 
5354 serge 9908
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9909
				enum port port,
6084 serge 9910
				struct intel_crtc_state *pipe_config)
4104 Serge 9911
{
5354 serge 9912
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
4104 Serge 9913
 
5354 serge 9914
	switch (pipe_config->ddi_pll_sel) {
9915
	case PORT_CLK_SEL_WRPLL1:
9916
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9917
		break;
9918
	case PORT_CLK_SEL_WRPLL2:
9919
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9920
		break;
6084 serge 9921
	case PORT_CLK_SEL_SPLL:
9922
		pipe_config->shared_dpll = DPLL_ID_SPLL;
6937 serge 9923
		break;
5354 serge 9924
	}
4104 Serge 9925
}
9926
 
5060 serge 9927
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
6084 serge 9928
				       struct intel_crtc_state *pipe_config)
4104 Serge 9929
{
5060 serge 9930
	struct drm_device *dev = crtc->base.dev;
4104 Serge 9931
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 9932
	struct intel_shared_dpll *pll;
9933
	enum port port;
9934
	uint32_t tmp;
4104 Serge 9935
 
5060 serge 9936
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4560 Serge 9937
 
5060 serge 9938
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
4104 Serge 9939
 
6937 serge 9940
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5354 serge 9941
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
6084 serge 9942
	else if (IS_BROXTON(dev))
9943
		bxt_get_ddi_pll(dev_priv, port, pipe_config);
5354 serge 9944
	else
9945
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
4104 Serge 9946
 
5060 serge 9947
	if (pipe_config->shared_dpll >= 0) {
9948
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
4560 Serge 9949
 
5060 serge 9950
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9951
					   &pipe_config->dpll_hw_state));
4104 Serge 9952
	}
9953
 
4560 Serge 9954
	/*
5060 serge 9955
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9956
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9957
	 * the PCH transcoder is on.
4560 Serge 9958
	 */
5354 serge 9959
	if (INTEL_INFO(dev)->gen < 9 &&
9960
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
5060 serge 9961
		pipe_config->has_pch_encoder = true;
4560 Serge 9962
 
5060 serge 9963
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9964
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9965
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3480 Serge 9966
 
5060 serge 9967
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
3480 Serge 9968
	}
4560 Serge 9969
}
9970
 
3746 Serge 9971
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6084 serge 9972
				    struct intel_crtc_state *pipe_config)
3746 Serge 9973
{
9974
	struct drm_device *dev = crtc->base.dev;
9975
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 9976
	enum intel_display_power_domain power_domain;
9977
	unsigned long power_domain_mask;
3746 Serge 9978
	uint32_t tmp;
6937 serge 9979
	bool ret;
3746 Serge 9980
 
6937 serge 9981
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9982
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 9983
		return false;
6937 serge 9984
	power_domain_mask = BIT(power_domain);
5060 serge 9985
 
6937 serge 9986
	ret = false;
9987
 
4104 Serge 9988
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9989
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9990
 
9991
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9992
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9993
		enum pipe trans_edp_pipe;
9994
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9995
		default:
9996
			WARN(1, "unknown pipe linked to edp transcoder\n");
9997
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9998
		case TRANS_DDI_EDP_INPUT_A_ON:
9999
			trans_edp_pipe = PIPE_A;
10000
			break;
10001
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
10002
			trans_edp_pipe = PIPE_B;
10003
			break;
10004
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
10005
			trans_edp_pipe = PIPE_C;
10006
			break;
10007
		}
10008
 
10009
		if (trans_edp_pipe == crtc->pipe)
10010
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
10011
	}
10012
 
6937 serge 10013
	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10014
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10015
		goto out;
10016
	power_domain_mask |= BIT(power_domain);
4104 Serge 10017
 
10018
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 10019
	if (!(tmp & PIPECONF_ENABLE))
6937 serge 10020
		goto out;
3746 Serge 10021
 
5060 serge 10022
	haswell_get_ddi_port_state(crtc, pipe_config);
3746 Serge 10023
 
4104 Serge 10024
	intel_get_pipe_timings(crtc, pipe_config);
10025
 
6084 serge 10026
	if (INTEL_INFO(dev)->gen >= 9) {
10027
		skl_init_scalers(dev, crtc, pipe_config);
10028
	}
10029
 
10030
	if (INTEL_INFO(dev)->gen >= 9) {
10031
		pipe_config->scaler_state.scaler_id = -1;
10032
		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10033
	}
10034
 
6937 serge 10035
	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10036
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10037
		power_domain_mask |= BIT(power_domain);
6084 serge 10038
		if (INTEL_INFO(dev)->gen >= 9)
5354 serge 10039
			skylake_get_pfit_config(crtc, pipe_config);
10040
		else
6084 serge 10041
			ironlake_get_pfit_config(crtc, pipe_config);
5354 serge 10042
	}
4104 Serge 10043
 
4560 Serge 10044
	if (IS_HASWELL(dev))
6084 serge 10045
		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10046
			(I915_READ(IPS_CTL) & IPS_ENABLE);
4104 Serge 10047
 
5354 serge 10048
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
10049
		pipe_config->pixel_multiplier =
10050
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10051
	} else {
6084 serge 10052
		pipe_config->pixel_multiplier = 1;
4560 Serge 10053
	}
10054
 
6937 serge 10055
	ret = true;
10056
 
10057
out:
10058
	for_each_power_domain(power_domain, power_domain_mask)
10059
		intel_display_power_put(dev_priv, power_domain);
10060
 
10061
	return ret;
2342 Serge 10062
}
10063
 
6084 serge 10064
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
2342 Serge 10065
{
5354 serge 10066
	struct drm_device *dev = crtc->dev;
10067
	struct drm_i915_private *dev_priv = dev->dev_private;
10068
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10069
	uint32_t cntl = 0, size = 0;
2342 Serge 10070
 
6084 serge 10071
	if (on) {
10072
		unsigned int width = intel_crtc->base.cursor->state->crtc_w;
10073
		unsigned int height = intel_crtc->base.cursor->state->crtc_h;
5354 serge 10074
		unsigned int stride = roundup_pow_of_two(width) * 4;
2342 Serge 10075
 
5354 serge 10076
		switch (stride) {
10077
		default:
10078
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10079
				  width, stride);
10080
			stride = 256;
10081
			/* fallthrough */
10082
		case 256:
10083
		case 512:
10084
		case 1024:
10085
		case 2048:
10086
			break;
6084 serge 10087
		}
3031 serge 10088
 
5354 serge 10089
		cntl |= CURSOR_ENABLE |
10090
			CURSOR_GAMMA_ENABLE |
10091
			CURSOR_FORMAT_ARGB |
10092
			CURSOR_STRIDE(stride);
3031 serge 10093
 
5354 serge 10094
		size = (height << 12) | width;
2342 Serge 10095
	}
10096
 
5354 serge 10097
	if (intel_crtc->cursor_cntl != 0 &&
10098
	    (intel_crtc->cursor_base != base ||
10099
	     intel_crtc->cursor_size != size ||
10100
	     intel_crtc->cursor_cntl != cntl)) {
10101
		/* On these chipsets we can only modify the base/size/stride
10102
		 * whilst the cursor is disabled.
3031 serge 10103
		 */
6084 serge 10104
		I915_WRITE(CURCNTR(PIPE_A), 0);
10105
		POSTING_READ(CURCNTR(PIPE_A));
10106
		intel_crtc->cursor_cntl = 0;
10107
	}
5060 serge 10108
 
5354 serge 10109
	if (intel_crtc->cursor_base != base) {
6084 serge 10110
		I915_WRITE(CURBASE(PIPE_A), base);
5354 serge 10111
		intel_crtc->cursor_base = base;
5060 serge 10112
	}
2327 Serge 10113
 
5354 serge 10114
	if (intel_crtc->cursor_size != size) {
10115
		I915_WRITE(CURSIZE, size);
10116
		intel_crtc->cursor_size = size;
10117
	}
10118
 
5060 serge 10119
	if (intel_crtc->cursor_cntl != cntl) {
6084 serge 10120
		I915_WRITE(CURCNTR(PIPE_A), cntl);
10121
		POSTING_READ(CURCNTR(PIPE_A));
5060 serge 10122
		intel_crtc->cursor_cntl = cntl;
10123
	}
3031 serge 10124
}
2327 Serge 10125
 
6084 serge 10126
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
3031 serge 10127
{
10128
	struct drm_device *dev = crtc->dev;
10129
	struct drm_i915_private *dev_priv = dev->dev_private;
10130
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10131
	int pipe = intel_crtc->pipe;
6084 serge 10132
	uint32_t cntl = 0;
2327 Serge 10133
 
6084 serge 10134
	if (on) {
5060 serge 10135
		cntl = MCURSOR_GAMMA_ENABLE;
6084 serge 10136
		switch (intel_crtc->base.cursor->state->crtc_w) {
5060 serge 10137
			case 64:
10138
				cntl |= CURSOR_MODE_64_ARGB_AX;
10139
				break;
10140
			case 128:
10141
				cntl |= CURSOR_MODE_128_ARGB_AX;
10142
				break;
10143
			case 256:
10144
				cntl |= CURSOR_MODE_256_ARGB_AX;
10145
				break;
10146
			default:
6084 serge 10147
				MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
5060 serge 10148
				return;
6084 serge 10149
		}
10150
		cntl |= pipe << 28; /* Connect to correct pipe */
2327 Serge 10151
 
6084 serge 10152
		if (HAS_DDI(dev))
3480 Serge 10153
			cntl |= CURSOR_PIPE_CSC_ENABLE;
5354 serge 10154
	}
5060 serge 10155
 
6084 serge 10156
	if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
5354 serge 10157
		cntl |= CURSOR_ROTATE_180;
10158
 
5060 serge 10159
	if (intel_crtc->cursor_cntl != cntl) {
10160
		I915_WRITE(CURCNTR(pipe), cntl);
10161
		POSTING_READ(CURCNTR(pipe));
10162
		intel_crtc->cursor_cntl = cntl;
6084 serge 10163
	}
2327 Serge 10164
 
3031 serge 10165
	/* and commit changes on next vblank */
5060 serge 10166
	I915_WRITE(CURBASE(pipe), base);
10167
	POSTING_READ(CURBASE(pipe));
5354 serge 10168
 
10169
	intel_crtc->cursor_base = base;
3031 serge 10170
}
2327 Serge 10171
 
3031 serge 10172
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5060 serge 10173
void intel_crtc_update_cursor(struct drm_crtc *crtc,
3031 serge 10174
				     bool on)
10175
{
10176
	struct drm_device *dev = crtc->dev;
10177
	struct drm_i915_private *dev_priv = dev->dev_private;
10178
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10179
	int pipe = intel_crtc->pipe;
6084 serge 10180
	struct drm_plane_state *cursor_state = crtc->cursor->state;
10181
	int x = cursor_state->crtc_x;
10182
	int y = cursor_state->crtc_y;
4560 Serge 10183
	u32 base = 0, pos = 0;
2327 Serge 10184
 
6084 serge 10185
	base = intel_crtc->cursor_addr;
2327 Serge 10186
 
6084 serge 10187
	if (x >= intel_crtc->config->pipe_src_w)
10188
		on = false;
2327 Serge 10189
 
6084 serge 10190
	if (y >= intel_crtc->config->pipe_src_h)
10191
		on = false;
2327 Serge 10192
 
3031 serge 10193
	if (x < 0) {
6084 serge 10194
		if (x + cursor_state->crtc_w <= 0)
10195
			on = false;
2327 Serge 10196
 
3031 serge 10197
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10198
		x = -x;
10199
	}
10200
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 10201
 
3031 serge 10202
	if (y < 0) {
6084 serge 10203
		if (y + cursor_state->crtc_h <= 0)
10204
			on = false;
2327 Serge 10205
 
3031 serge 10206
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10207
		y = -y;
10208
	}
10209
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 10210
 
5060 serge 10211
	I915_WRITE(CURPOS(pipe), pos);
10212
 
5354 serge 10213
	/* ILK+ do this automagically */
10214
	if (HAS_GMCH_DISPLAY(dev) &&
6084 serge 10215
	    crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
10216
		base += (cursor_state->crtc_h *
10217
			 cursor_state->crtc_w - 1) * 4;
5354 serge 10218
	}
10219
 
10220
	if (IS_845G(dev) || IS_I865G(dev))
6084 serge 10221
		i845_update_cursor(crtc, base, on);
5060 serge 10222
	else
6084 serge 10223
		i9xx_update_cursor(crtc, base, on);
3031 serge 10224
}
2327 Serge 10225
 
5354 serge 10226
static bool cursor_size_ok(struct drm_device *dev,
10227
			   uint32_t width, uint32_t height)
10228
{
10229
	if (width == 0 || height == 0)
10230
		return false;
10231
 
10232
	/*
10233
	 * 845g/865g are special in that they are only limited by
10234
	 * the width of their cursors, the height is arbitrary up to
10235
	 * the precision of the register. Everything else requires
10236
	 * square cursors, limited to a few power-of-two sizes.
6084 serge 10237
	 */
5354 serge 10238
	if (IS_845G(dev) || IS_I865G(dev)) {
10239
		if ((width & 63) != 0)
10240
			return false;
10241
 
10242
		if (width > (IS_845G(dev) ? 64 : 512))
10243
			return false;
10244
 
10245
		if (height > 1023)
10246
			return false;
10247
	} else {
10248
		switch (width | height) {
10249
		case 256:
10250
		case 128:
10251
			if (IS_GEN2(dev))
10252
				return false;
10253
		case 64:
10254
			break;
10255
		default:
10256
			return false;
10257
		}
10258
	}
10259
 
10260
	return true;
10261
}
10262
 
2330 Serge 10263
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10264
				 u16 *blue, uint32_t start, uint32_t size)
10265
{
10266
	int end = (start + size > 256) ? 256 : start + size, i;
10267
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 10268
 
2330 Serge 10269
	for (i = start; i < end; i++) {
10270
		intel_crtc->lut_r[i] = red[i] >> 8;
10271
		intel_crtc->lut_g[i] = green[i] >> 8;
10272
		intel_crtc->lut_b[i] = blue[i] >> 8;
10273
	}
2327 Serge 10274
 
2330 Serge 10275
	intel_crtc_load_lut(crtc);
10276
}
2327 Serge 10277
 
2330 Serge 10278
/* VESA 640x480x72Hz mode to set on the pipe */
10279
static struct drm_display_mode load_detect_mode = {
10280
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10281
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10282
};
2327 Serge 10283
 
4560 Serge 10284
struct drm_framebuffer *
5060 serge 10285
__intel_framebuffer_create(struct drm_device *dev,
6084 serge 10286
			   struct drm_mode_fb_cmd2 *mode_cmd,
10287
			   struct drm_i915_gem_object *obj)
3031 serge 10288
{
10289
	struct intel_framebuffer *intel_fb;
10290
	int ret;
2327 Serge 10291
 
3031 serge 10292
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6937 serge 10293
	if (!intel_fb)
3031 serge 10294
		return ERR_PTR(-ENOMEM);
2327 Serge 10295
 
3031 serge 10296
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 10297
	if (ret)
10298
		goto err;
10299
 
10300
	return &intel_fb->base;
6937 serge 10301
 
4560 Serge 10302
err:
6084 serge 10303
	kfree(intel_fb);
10304
	return ERR_PTR(ret);
3031 serge 10305
}
2327 Serge 10306
 
5060 serge 10307
static struct drm_framebuffer *
10308
intel_framebuffer_create(struct drm_device *dev,
10309
			 struct drm_mode_fb_cmd2 *mode_cmd,
10310
			 struct drm_i915_gem_object *obj)
10311
{
10312
	struct drm_framebuffer *fb;
10313
	int ret;
10314
 
10315
	ret = i915_mutex_lock_interruptible(dev);
10316
	if (ret)
10317
		return ERR_PTR(ret);
10318
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10319
	mutex_unlock(&dev->struct_mutex);
10320
 
10321
	return fb;
10322
}
10323
 
2330 Serge 10324
static u32
10325
intel_framebuffer_pitch_for_width(int width, int bpp)
10326
{
10327
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10328
	return ALIGN(pitch, 64);
10329
}
2327 Serge 10330
 
2330 Serge 10331
static u32
10332
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10333
{
10334
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5060 serge 10335
	return PAGE_ALIGN(pitch * mode->vdisplay);
2330 Serge 10336
}
2327 Serge 10337
 
2330 Serge 10338
static struct drm_framebuffer *
10339
intel_framebuffer_create_for_mode(struct drm_device *dev,
10340
				  struct drm_display_mode *mode,
10341
				  int depth, int bpp)
10342
{
6937 serge 10343
	struct drm_framebuffer *fb;
2330 Serge 10344
	struct drm_i915_gem_object *obj;
3243 Serge 10345
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 10346
 
5060 serge 10347
	obj = i915_gem_alloc_object(dev,
10348
				    intel_framebuffer_size_for_mode(mode, bpp));
10349
	if (obj == NULL)
10350
		return ERR_PTR(-ENOMEM);
10351
 
10352
	mode_cmd.width = mode->hdisplay;
10353
	mode_cmd.height = mode->vdisplay;
10354
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10355
								bpp);
10356
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10357
 
6937 serge 10358
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10359
	if (IS_ERR(fb))
10360
		drm_gem_object_unreference_unlocked(&obj->base);
10361
 
10362
	return fb;
2330 Serge 10363
}
2327 Serge 10364
 
2330 Serge 10365
static struct drm_framebuffer *
10366
mode_fits_in_fbdev(struct drm_device *dev,
10367
		   struct drm_display_mode *mode)
10368
{
6084 serge 10369
#ifdef CONFIG_DRM_FBDEV_EMULATION
2330 Serge 10370
	struct drm_i915_private *dev_priv = dev->dev_private;
10371
	struct drm_i915_gem_object *obj;
10372
	struct drm_framebuffer *fb;
2327 Serge 10373
 
5060 serge 10374
	if (!dev_priv->fbdev)
4280 Serge 10375
		return NULL;
2327 Serge 10376
 
5060 serge 10377
	if (!dev_priv->fbdev->fb)
2330 Serge 10378
		return NULL;
2327 Serge 10379
 
5060 serge 10380
	obj = dev_priv->fbdev->fb->obj;
10381
	BUG_ON(!obj);
10382
 
10383
	fb = &dev_priv->fbdev->fb->base;
3031 serge 10384
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10385
							       fb->bits_per_pixel))
4280 Serge 10386
		return NULL;
2327 Serge 10387
 
3031 serge 10388
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10389
		return NULL;
10390
 
4280 Serge 10391
	return fb;
4560 Serge 10392
#else
10393
	return NULL;
10394
#endif
2330 Serge 10395
}
2327 Serge 10396
 
6084 serge 10397
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10398
					   struct drm_crtc *crtc,
10399
					   struct drm_display_mode *mode,
10400
					   struct drm_framebuffer *fb,
10401
					   int x, int y)
10402
{
10403
	struct drm_plane_state *plane_state;
10404
	int hdisplay, vdisplay;
10405
	int ret;
10406
 
10407
	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10408
	if (IS_ERR(plane_state))
10409
		return PTR_ERR(plane_state);
10410
 
10411
	if (mode)
10412
		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10413
	else
10414
		hdisplay = vdisplay = 0;
10415
 
10416
	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10417
	if (ret)
10418
		return ret;
10419
	drm_atomic_set_fb_for_plane(plane_state, fb);
10420
	plane_state->crtc_x = 0;
10421
	plane_state->crtc_y = 0;
10422
	plane_state->crtc_w = hdisplay;
10423
	plane_state->crtc_h = vdisplay;
10424
	plane_state->src_x = x << 16;
10425
	plane_state->src_y = y << 16;
10426
	plane_state->src_w = hdisplay << 16;
10427
	plane_state->src_h = vdisplay << 16;
10428
 
10429
	return 0;
10430
}
10431
 
3031 serge 10432
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 10433
				struct drm_display_mode *mode,
5060 serge 10434
				struct intel_load_detect_pipe *old,
10435
				struct drm_modeset_acquire_ctx *ctx)
2330 Serge 10436
{
10437
	struct intel_crtc *intel_crtc;
3031 serge 10438
	struct intel_encoder *intel_encoder =
10439
		intel_attached_encoder(connector);
2330 Serge 10440
	struct drm_crtc *possible_crtc;
10441
	struct drm_encoder *encoder = &intel_encoder->base;
10442
	struct drm_crtc *crtc = NULL;
10443
	struct drm_device *dev = encoder->dev;
3031 serge 10444
	struct drm_framebuffer *fb;
5060 serge 10445
	struct drm_mode_config *config = &dev->mode_config;
6084 serge 10446
	struct drm_atomic_state *state = NULL;
10447
	struct drm_connector_state *connector_state;
10448
	struct intel_crtc_state *crtc_state;
5060 serge 10449
	int ret, i = -1;
2327 Serge 10450
 
2330 Serge 10451
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 10452
		      connector->base.id, connector->name,
10453
		      encoder->base.id, encoder->name);
2327 Serge 10454
 
5060 serge 10455
retry:
10456
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10457
	if (ret)
6084 serge 10458
		goto fail;
5060 serge 10459
 
2330 Serge 10460
	/*
10461
	 * Algorithm gets a little messy:
10462
	 *
10463
	 *   - if the connector already has an assigned crtc, use it (but make
10464
	 *     sure it's on first)
10465
	 *
10466
	 *   - try to find the first unused crtc that can drive this connector,
10467
	 *     and use that if we find one
10468
	 */
2327 Serge 10469
 
2330 Serge 10470
	/* See if we already have a CRTC for this connector */
10471
	if (encoder->crtc) {
10472
		crtc = encoder->crtc;
2327 Serge 10473
 
5060 serge 10474
		ret = drm_modeset_lock(&crtc->mutex, ctx);
10475
		if (ret)
6084 serge 10476
			goto fail;
5354 serge 10477
		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10478
		if (ret)
6084 serge 10479
			goto fail;
3480 Serge 10480
 
3031 serge 10481
		old->dpms_mode = connector->dpms;
2330 Serge 10482
		old->load_detect_temp = false;
2327 Serge 10483
 
2330 Serge 10484
		/* Make sure the crtc and connector are running */
3031 serge 10485
		if (connector->dpms != DRM_MODE_DPMS_ON)
10486
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 10487
 
2330 Serge 10488
		return true;
10489
	}
2327 Serge 10490
 
2330 Serge 10491
	/* Find an unused one (if possible) */
5060 serge 10492
	for_each_crtc(dev, possible_crtc) {
2330 Serge 10493
		i++;
10494
		if (!(encoder->possible_crtcs & (1 << i)))
10495
			continue;
6084 serge 10496
		if (possible_crtc->state->enable)
5060 serge 10497
			continue;
10498
 
6084 serge 10499
		crtc = possible_crtc;
10500
		break;
10501
	}
2327 Serge 10502
 
2330 Serge 10503
	/*
10504
	 * If we didn't find an unused CRTC, don't use any.
10505
	 */
10506
	if (!crtc) {
10507
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6084 serge 10508
		goto fail;
2330 Serge 10509
	}
2327 Serge 10510
 
5060 serge 10511
	ret = drm_modeset_lock(&crtc->mutex, ctx);
10512
	if (ret)
6084 serge 10513
		goto fail;
5354 serge 10514
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10515
	if (ret)
6084 serge 10516
		goto fail;
2327 Serge 10517
 
2330 Serge 10518
	intel_crtc = to_intel_crtc(crtc);
3031 serge 10519
	old->dpms_mode = connector->dpms;
2330 Serge 10520
	old->load_detect_temp = true;
10521
	old->release_fb = NULL;
2327 Serge 10522
 
6084 serge 10523
	state = drm_atomic_state_alloc(dev);
10524
	if (!state)
10525
		return false;
10526
 
10527
	state->acquire_ctx = ctx;
10528
 
10529
	connector_state = drm_atomic_get_connector_state(state, connector);
10530
	if (IS_ERR(connector_state)) {
10531
		ret = PTR_ERR(connector_state);
10532
		goto fail;
10533
	}
10534
 
10535
	connector_state->crtc = crtc;
10536
	connector_state->best_encoder = &intel_encoder->base;
10537
 
10538
	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10539
	if (IS_ERR(crtc_state)) {
10540
		ret = PTR_ERR(crtc_state);
10541
		goto fail;
10542
	}
10543
 
10544
	crtc_state->base.active = crtc_state->base.enable = true;
10545
 
2330 Serge 10546
	if (!mode)
10547
		mode = &load_detect_mode;
2327 Serge 10548
 
2330 Serge 10549
	/* We need a framebuffer large enough to accommodate all accesses
10550
	 * that the plane may generate whilst we perform load detection.
10551
	 * We can not rely on the fbcon either being present (we get called
10552
	 * during its initialisation to detect all boot displays, or it may
10553
	 * not even exist) or that it is large enough to satisfy the
10554
	 * requested mode.
10555
	 */
3031 serge 10556
	fb = mode_fits_in_fbdev(dev, mode);
10557
	if (fb == NULL) {
2330 Serge 10558
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 10559
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10560
		old->release_fb = fb;
2330 Serge 10561
	} else
10562
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 10563
	if (IS_ERR(fb)) {
2330 Serge 10564
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5060 serge 10565
		goto fail;
2330 Serge 10566
	}
2327 Serge 10567
 
6084 serge 10568
	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10569
	if (ret)
10570
		goto fail;
10571
 
10572
	drm_mode_copy(&crtc_state->base.mode, mode);
10573
 
10574
	if (drm_atomic_commit(state)) {
2330 Serge 10575
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10576
		if (old->release_fb)
10577
			old->release_fb->funcs->destroy(old->release_fb);
5060 serge 10578
		goto fail;
2330 Serge 10579
	}
6084 serge 10580
	crtc->primary->crtc = crtc;
2327 Serge 10581
 
2330 Serge 10582
	/* let the connector get through one full cycle before testing */
10583
	intel_wait_for_vblank(dev, intel_crtc->pipe);
10584
	return true;
5060 serge 10585
 
6084 serge 10586
fail:
10587
	drm_atomic_state_free(state);
10588
	state = NULL;
10589
 
5060 serge 10590
	if (ret == -EDEADLK) {
10591
		drm_modeset_backoff(ctx);
10592
		goto retry;
10593
	}
10594
 
10595
	return false;
2330 Serge 10596
}
2327 Serge 10597
 
3031 serge 10598
void intel_release_load_detect_pipe(struct drm_connector *connector,
6084 serge 10599
				    struct intel_load_detect_pipe *old,
10600
				    struct drm_modeset_acquire_ctx *ctx)
2330 Serge 10601
{
6084 serge 10602
	struct drm_device *dev = connector->dev;
3031 serge 10603
	struct intel_encoder *intel_encoder =
10604
		intel_attached_encoder(connector);
2330 Serge 10605
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 10606
	struct drm_crtc *crtc = encoder->crtc;
5060 serge 10607
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 10608
	struct drm_atomic_state *state;
10609
	struct drm_connector_state *connector_state;
10610
	struct intel_crtc_state *crtc_state;
10611
	int ret;
2327 Serge 10612
 
2330 Serge 10613
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 10614
		      connector->base.id, connector->name,
10615
		      encoder->base.id, encoder->name);
2327 Serge 10616
 
2330 Serge 10617
	if (old->load_detect_temp) {
6084 serge 10618
		state = drm_atomic_state_alloc(dev);
10619
		if (!state)
10620
			goto fail;
3031 serge 10621
 
6084 serge 10622
		state->acquire_ctx = ctx;
10623
 
10624
		connector_state = drm_atomic_get_connector_state(state, connector);
10625
		if (IS_ERR(connector_state))
10626
			goto fail;
10627
 
10628
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10629
		if (IS_ERR(crtc_state))
10630
			goto fail;
10631
 
10632
		connector_state->best_encoder = NULL;
10633
		connector_state->crtc = NULL;
10634
 
10635
		crtc_state->base.enable = crtc_state->base.active = false;
10636
 
10637
		ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10638
						      0, 0);
10639
		if (ret)
10640
			goto fail;
10641
 
10642
		ret = drm_atomic_commit(state);
10643
		if (ret)
10644
			goto fail;
10645
 
3480 Serge 10646
		if (old->release_fb) {
10647
			drm_framebuffer_unregister_private(old->release_fb);
10648
			drm_framebuffer_unreference(old->release_fb);
10649
		}
2327 Serge 10650
 
2330 Serge 10651
		return;
10652
	}
2327 Serge 10653
 
2330 Serge 10654
	/* Switch crtc and encoder back off if necessary */
3031 serge 10655
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
10656
		connector->funcs->dpms(connector, old->dpms_mode);
6084 serge 10657
 
10658
	return;
10659
fail:
10660
	DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10661
	drm_atomic_state_free(state);
2330 Serge 10662
}
2327 Serge 10663
 
4560 Serge 10664
static int i9xx_pll_refclk(struct drm_device *dev,
6084 serge 10665
			   const struct intel_crtc_state *pipe_config)
4560 Serge 10666
{
10667
	struct drm_i915_private *dev_priv = dev->dev_private;
10668
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10669
 
10670
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10671
		return dev_priv->vbt.lvds_ssc_freq;
10672
	else if (HAS_PCH_SPLIT(dev))
10673
		return 120000;
10674
	else if (!IS_GEN2(dev))
10675
		return 96000;
10676
	else
10677
		return 48000;
10678
}
10679
 
2330 Serge 10680
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 10681
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 10682
				struct intel_crtc_state *pipe_config)
2330 Serge 10683
{
4104 Serge 10684
	struct drm_device *dev = crtc->base.dev;
2330 Serge 10685
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10686
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 10687
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 10688
	u32 fp;
10689
	intel_clock_t clock;
6084 serge 10690
	int port_clock;
4560 Serge 10691
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 10692
 
2330 Serge 10693
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 10694
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 10695
	else
4560 Serge 10696
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 10697
 
2330 Serge 10698
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10699
	if (IS_PINEVIEW(dev)) {
10700
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10701
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10702
	} else {
10703
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10704
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10705
	}
2327 Serge 10706
 
2330 Serge 10707
	if (!IS_GEN2(dev)) {
10708
		if (IS_PINEVIEW(dev))
10709
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10710
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10711
		else
10712
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10713
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 10714
 
2330 Serge 10715
		switch (dpll & DPLL_MODE_MASK) {
10716
		case DPLLB_MODE_DAC_SERIAL:
10717
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10718
				5 : 10;
10719
			break;
10720
		case DPLLB_MODE_LVDS:
10721
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10722
				7 : 14;
10723
			break;
10724
		default:
10725
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10726
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 10727
			return;
2330 Serge 10728
		}
2327 Serge 10729
 
4104 Serge 10730
		if (IS_PINEVIEW(dev))
6084 serge 10731
			port_clock = pnv_calc_dpll_params(refclk, &clock);
4104 Serge 10732
		else
6084 serge 10733
			port_clock = i9xx_calc_dpll_params(refclk, &clock);
2330 Serge 10734
	} else {
4560 Serge 10735
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10736
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 10737
 
2330 Serge 10738
		if (is_lvds) {
10739
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10740
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 10741
 
10742
			if (lvds & LVDS_CLKB_POWER_UP)
10743
				clock.p2 = 7;
10744
			else
6084 serge 10745
				clock.p2 = 14;
2330 Serge 10746
		} else {
10747
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10748
				clock.p1 = 2;
10749
			else {
10750
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10751
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10752
			}
10753
			if (dpll & PLL_P2_DIVIDE_BY_4)
10754
				clock.p2 = 4;
10755
			else
10756
				clock.p2 = 2;
4560 Serge 10757
		}
2327 Serge 10758
 
6084 serge 10759
		port_clock = i9xx_calc_dpll_params(refclk, &clock);
2330 Serge 10760
	}
2327 Serge 10761
 
4560 Serge 10762
	/*
10763
	 * This value includes pixel_multiplier. We will use
10764
	 * port_clock to compute adjusted_mode.crtc_clock in the
10765
	 * encoder's get_config() function.
10766
	 */
6084 serge 10767
	pipe_config->port_clock = port_clock;
4104 Serge 10768
}
10769
 
4560 Serge 10770
int intel_dotclock_calculate(int link_freq,
10771
			     const struct intel_link_m_n *m_n)
4104 Serge 10772
{
10773
	/*
10774
	 * The calculation for the data clock is:
4560 Serge 10775
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 10776
	 * But we want to avoid losing precison if possible, so:
4560 Serge 10777
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 10778
	 *
10779
	 * and the link clock is simpler:
4560 Serge 10780
	 * link_clock = (m * link_clock) / n
2330 Serge 10781
	 */
2327 Serge 10782
 
4560 Serge 10783
	if (!m_n->link_n)
10784
		return 0;
4104 Serge 10785
 
4560 Serge 10786
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10787
}
4104 Serge 10788
 
4560 Serge 10789
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
6084 serge 10790
				   struct intel_crtc_state *pipe_config)
4560 Serge 10791
{
10792
	struct drm_device *dev = crtc->base.dev;
4104 Serge 10793
 
4560 Serge 10794
	/* read out port_clock from the DPLL */
10795
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 10796
 
4560 Serge 10797
	/*
10798
	 * This value does not include pixel_multiplier.
10799
	 * We will check that port_clock and adjusted_mode.crtc_clock
10800
	 * agree once we know their relationship in the encoder's
10801
	 * get_config() function.
10802
	 */
6084 serge 10803
	pipe_config->base.adjusted_mode.crtc_clock =
4560 Serge 10804
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10805
					 &pipe_config->fdi_m_n);
2330 Serge 10806
}
2327 Serge 10807
 
2330 Serge 10808
/** Returns the currently programmed mode of the given pipe. */
10809
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10810
					     struct drm_crtc *crtc)
10811
{
10812
	struct drm_i915_private *dev_priv = dev->dev_private;
10813
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 10814
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
2330 Serge 10815
	struct drm_display_mode *mode;
6084 serge 10816
	struct intel_crtc_state pipe_config;
3243 Serge 10817
	int htot = I915_READ(HTOTAL(cpu_transcoder));
10818
	int hsync = I915_READ(HSYNC(cpu_transcoder));
10819
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10820
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 10821
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 10822
 
2330 Serge 10823
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10824
	if (!mode)
10825
		return NULL;
10826
 
4104 Serge 10827
	/*
10828
	 * Construct a pipe_config sufficient for getting the clock info
10829
	 * back out of crtc_clock_get.
10830
	 *
10831
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10832
	 * to use a real value here instead.
10833
	 */
4560 Serge 10834
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
4104 Serge 10835
	pipe_config.pixel_multiplier = 1;
4560 Serge 10836
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10837
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10838
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
4104 Serge 10839
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10840
 
4560 Serge 10841
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
2330 Serge 10842
	mode->hdisplay = (htot & 0xffff) + 1;
10843
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10844
	mode->hsync_start = (hsync & 0xffff) + 1;
10845
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10846
	mode->vdisplay = (vtot & 0xffff) + 1;
10847
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10848
	mode->vsync_start = (vsync & 0xffff) + 1;
10849
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10850
 
10851
	drm_mode_set_name(mode);
10852
 
10853
	return mode;
10854
}
10855
 
3031 serge 10856
void intel_mark_busy(struct drm_device *dev)
10857
{
4104 Serge 10858
	struct drm_i915_private *dev_priv = dev->dev_private;
10859
 
5060 serge 10860
	if (dev_priv->mm.busy)
10861
		return;
10862
 
10863
	intel_runtime_pm_get(dev_priv);
4104 Serge 10864
	i915_update_gfx_val(dev_priv);
6084 serge 10865
	if (INTEL_INFO(dev)->gen >= 6)
10866
		gen6_rps_busy(dev_priv);
5060 serge 10867
	dev_priv->mm.busy = true;
3031 serge 10868
}
2327 Serge 10869
 
3031 serge 10870
void intel_mark_idle(struct drm_device *dev)
10871
{
4104 Serge 10872
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 10873
 
5060 serge 10874
	if (!dev_priv->mm.busy)
3031 serge 10875
		return;
2327 Serge 10876
 
5060 serge 10877
	dev_priv->mm.busy = false;
10878
 
10879
	if (INTEL_INFO(dev)->gen >= 6)
4560 Serge 10880
		gen6_rps_idle(dev->dev_private);
5060 serge 10881
 
10882
	intel_runtime_pm_put(dev_priv);
3031 serge 10883
}
2327 Serge 10884
 
2330 Serge 10885
static void intel_crtc_destroy(struct drm_crtc *crtc)
10886
{
10887
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10888
	struct drm_device *dev = crtc->dev;
10889
	struct intel_unpin_work *work;
2327 Serge 10890
 
5354 serge 10891
	spin_lock_irq(&dev->event_lock);
2330 Serge 10892
	work = intel_crtc->unpin_work;
10893
	intel_crtc->unpin_work = NULL;
5354 serge 10894
	spin_unlock_irq(&dev->event_lock);
2327 Serge 10895
 
2330 Serge 10896
	if (work) {
6937 serge 10897
//		cancel_work_sync(&work->work);
2330 Serge 10898
		kfree(work);
10899
	}
2327 Serge 10900
 
2330 Serge 10901
	drm_crtc_cleanup(crtc);
2327 Serge 10902
 
2330 Serge 10903
	kfree(intel_crtc);
10904
}
2327 Serge 10905
 
3031 serge 10906
static void intel_unpin_work_fn(struct work_struct *__work)
10907
{
10908
	struct intel_unpin_work *work =
10909
		container_of(__work, struct intel_unpin_work, work);
6084 serge 10910
	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10911
	struct drm_device *dev = crtc->base.dev;
10912
	struct drm_plane *primary = crtc->base.primary;
2327 Serge 10913
 
3243 Serge 10914
	mutex_lock(&dev->struct_mutex);
6084 serge 10915
	intel_unpin_fb_obj(work->old_fb, primary->state);
3031 serge 10916
	drm_gem_object_unreference(&work->pending_flip_obj->base);
2327 Serge 10917
 
6084 serge 10918
	if (work->flip_queued_req)
10919
		i915_gem_request_assign(&work->flip_queued_req, NULL);
3243 Serge 10920
	mutex_unlock(&dev->struct_mutex);
10921
 
6084 serge 10922
	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10923
	drm_framebuffer_unreference(work->old_fb);
5354 serge 10924
 
6084 serge 10925
	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10926
	atomic_dec(&crtc->unpin_work_count);
3243 Serge 10927
 
3031 serge 10928
	kfree(work);
10929
}
2327 Serge 10930
 
3031 serge 10931
static void do_intel_finish_page_flip(struct drm_device *dev,
10932
				      struct drm_crtc *crtc)
10933
{
10934
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10935
	struct intel_unpin_work *work;
10936
	unsigned long flags;
2327 Serge 10937
 
3031 serge 10938
	/* Ignore early vblank irqs */
10939
	if (intel_crtc == NULL)
10940
		return;
2327 Serge 10941
 
5354 serge 10942
	/*
10943
	 * This is called both by irq handlers and the reset code (to complete
10944
	 * lost pageflips) so needs the full irqsave spinlocks.
10945
	 */
3031 serge 10946
	spin_lock_irqsave(&dev->event_lock, flags);
10947
	work = intel_crtc->unpin_work;
3243 Serge 10948
 
10949
	/* Ensure we don't miss a work->pending update ... */
10950
	smp_rmb();
10951
 
10952
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 10953
		spin_unlock_irqrestore(&dev->event_lock, flags);
10954
		return;
10955
	}
2327 Serge 10956
 
5354 serge 10957
	page_flip_completed(intel_crtc);
3243 Serge 10958
 
3031 serge 10959
	spin_unlock_irqrestore(&dev->event_lock, flags);
10960
}
2327 Serge 10961
 
3031 serge 10962
void intel_finish_page_flip(struct drm_device *dev, int pipe)
10963
{
5060 serge 10964
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10965
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 10966
 
3031 serge 10967
	do_intel_finish_page_flip(dev, crtc);
10968
}
2327 Serge 10969
 
3031 serge 10970
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10971
{
5060 serge 10972
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10973
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 10974
 
3031 serge 10975
	do_intel_finish_page_flip(dev, crtc);
10976
}
2327 Serge 10977
 
5060 serge 10978
/* Is 'a' after or equal to 'b'? */
10979
static bool g4x_flip_count_after_eq(u32 a, u32 b)
10980
{
10981
	return !((a - b) & 0x80000000);
10982
}
10983
 
10984
static bool page_flip_finished(struct intel_crtc *crtc)
10985
{
10986
	struct drm_device *dev = crtc->base.dev;
10987
	struct drm_i915_private *dev_priv = dev->dev_private;
10988
 
5354 serge 10989
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10990
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10991
		return true;
10992
 
5060 serge 10993
	/*
10994
	 * The relevant registers doen't exist on pre-ctg.
10995
	 * As the flip done interrupt doesn't trigger for mmio
10996
	 * flips on gmch platforms, a flip count check isn't
10997
	 * really needed there. But since ctg has the registers,
10998
	 * include it in the check anyway.
10999
	 */
11000
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11001
		return true;
11002
 
11003
	/*
11004
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11005
	 * used the same base address. In that case the mmio flip might
11006
	 * have completed, but the CS hasn't even executed the flip yet.
11007
	 *
11008
	 * A flip count check isn't enough as the CS might have updated
11009
	 * the base address just after start of vblank, but before we
11010
	 * managed to process the interrupt. This means we'd complete the
11011
	 * CS flip too soon.
11012
	 *
11013
	 * Combining both checks should get us a good enough result. It may
11014
	 * still happen that the CS flip has been executed, but has not
11015
	 * yet actually completed. But in case the base address is the same
11016
	 * anyway, we don't really care.
11017
	 */
11018
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11019
		crtc->unpin_work->gtt_offset &&
6084 serge 11020
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
5060 serge 11021
				    crtc->unpin_work->flip_count);
11022
}
11023
 
3031 serge 11024
void intel_prepare_page_flip(struct drm_device *dev, int plane)
11025
{
5060 serge 11026
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 11027
	struct intel_crtc *intel_crtc =
11028
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
11029
	unsigned long flags;
2327 Serge 11030
 
5354 serge 11031
 
11032
	/*
11033
	 * This is called both by irq handlers and the reset code (to complete
11034
	 * lost pageflips) so needs the full irqsave spinlocks.
11035
	 *
11036
	 * NB: An MMIO update of the plane base pointer will also
3243 Serge 11037
	 * generate a page-flip completion irq, i.e. every modeset
11038
	 * is also accompanied by a spurious intel_prepare_page_flip().
11039
	 */
3031 serge 11040
	spin_lock_irqsave(&dev->event_lock, flags);
5060 serge 11041
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
3243 Serge 11042
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 11043
	spin_unlock_irqrestore(&dev->event_lock, flags);
11044
}
2327 Serge 11045
 
6084 serge 11046
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
3243 Serge 11047
{
11048
	/* Ensure that the work item is consistent when activating it ... */
11049
	smp_wmb();
6084 serge 11050
	atomic_set(&work->pending, INTEL_FLIP_PENDING);
3243 Serge 11051
	/* and that it is marked active as soon as the irq could fire. */
11052
	smp_wmb();
11053
}
6320 serge 11054
 
3031 serge 11055
static int intel_gen2_queue_flip(struct drm_device *dev,
11056
				 struct drm_crtc *crtc,
11057
				 struct drm_framebuffer *fb,
4104 Serge 11058
				 struct drm_i915_gem_object *obj,
6084 serge 11059
				 struct drm_i915_gem_request *req,
4104 Serge 11060
				 uint32_t flags)
3031 serge 11061
{
6084 serge 11062
	struct intel_engine_cs *ring = req->ring;
3031 serge 11063
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11064
	u32 flip_mask;
11065
	int ret;
2327 Serge 11066
 
6084 serge 11067
	ret = intel_ring_begin(req, 6);
3031 serge 11068
	if (ret)
5060 serge 11069
		return ret;
2327 Serge 11070
 
3031 serge 11071
	/* Can't queue multiple flips, so wait for the previous
11072
	 * one to finish before executing the next.
11073
	 */
11074
	if (intel_crtc->plane)
11075
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11076
	else
11077
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11078
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11079
	intel_ring_emit(ring, MI_NOOP);
11080
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11081
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11082
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11083
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11084
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 11085
 
6084 serge 11086
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11087
	return 0;
11088
}
2327 Serge 11089
 
3031 serge 11090
static int intel_gen3_queue_flip(struct drm_device *dev,
11091
				 struct drm_crtc *crtc,
11092
				 struct drm_framebuffer *fb,
4104 Serge 11093
				 struct drm_i915_gem_object *obj,
6084 serge 11094
				 struct drm_i915_gem_request *req,
4104 Serge 11095
				 uint32_t flags)
3031 serge 11096
{
6084 serge 11097
	struct intel_engine_cs *ring = req->ring;
3031 serge 11098
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11099
	u32 flip_mask;
11100
	int ret;
2327 Serge 11101
 
6084 serge 11102
	ret = intel_ring_begin(req, 6);
3031 serge 11103
	if (ret)
5060 serge 11104
		return ret;
2327 Serge 11105
 
3031 serge 11106
	if (intel_crtc->plane)
11107
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11108
	else
11109
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11110
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11111
	intel_ring_emit(ring, MI_NOOP);
11112
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11113
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11114
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11115
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11116
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 11117
 
6084 serge 11118
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11119
	return 0;
11120
}
2327 Serge 11121
 
3031 serge 11122
static int intel_gen4_queue_flip(struct drm_device *dev,
11123
				 struct drm_crtc *crtc,
11124
				 struct drm_framebuffer *fb,
4104 Serge 11125
				 struct drm_i915_gem_object *obj,
6084 serge 11126
				 struct drm_i915_gem_request *req,
4104 Serge 11127
				 uint32_t flags)
3031 serge 11128
{
6084 serge 11129
	struct intel_engine_cs *ring = req->ring;
3031 serge 11130
	struct drm_i915_private *dev_priv = dev->dev_private;
11131
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11132
	uint32_t pf, pipesrc;
11133
	int ret;
2327 Serge 11134
 
6084 serge 11135
	ret = intel_ring_begin(req, 4);
3031 serge 11136
	if (ret)
5060 serge 11137
		return ret;
2327 Serge 11138
 
3031 serge 11139
	/* i965+ uses the linear or tiled offsets from the
11140
	 * Display Registers (which do not change across a page-flip)
11141
	 * so we need only reprogram the base address.
11142
	 */
11143
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11144
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11145
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11146
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
3031 serge 11147
			obj->tiling_mode);
2327 Serge 11148
 
3031 serge 11149
	/* XXX Enabling the panel-fitter across page-flip is so far
11150
	 * untested on non-native modes, so ignore it for now.
11151
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11152
	 */
11153
	pf = 0;
11154
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11155
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 11156
 
6084 serge 11157
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11158
	return 0;
11159
}
2327 Serge 11160
 
3031 serge 11161
static int intel_gen6_queue_flip(struct drm_device *dev,
11162
				 struct drm_crtc *crtc,
11163
				 struct drm_framebuffer *fb,
4104 Serge 11164
				 struct drm_i915_gem_object *obj,
6084 serge 11165
				 struct drm_i915_gem_request *req,
4104 Serge 11166
				 uint32_t flags)
3031 serge 11167
{
6084 serge 11168
	struct intel_engine_cs *ring = req->ring;
3031 serge 11169
	struct drm_i915_private *dev_priv = dev->dev_private;
11170
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11171
	uint32_t pf, pipesrc;
11172
	int ret;
2327 Serge 11173
 
6084 serge 11174
	ret = intel_ring_begin(req, 4);
3031 serge 11175
	if (ret)
5060 serge 11176
		return ret;
2327 Serge 11177
 
3031 serge 11178
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11179
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11180
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5060 serge 11181
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
2327 Serge 11182
 
3031 serge 11183
	/* Contrary to the suggestions in the documentation,
11184
	 * "Enable Panel Fitter" does not seem to be required when page
11185
	 * flipping with a non-native mode, and worse causes a normal
11186
	 * modeset to fail.
11187
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11188
	 */
11189
	pf = 0;
11190
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11191
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 11192
 
6084 serge 11193
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11194
	return 0;
11195
}
2327 Serge 11196
 
3031 serge 11197
static int intel_gen7_queue_flip(struct drm_device *dev,
11198
				 struct drm_crtc *crtc,
11199
				 struct drm_framebuffer *fb,
4104 Serge 11200
				 struct drm_i915_gem_object *obj,
6084 serge 11201
				 struct drm_i915_gem_request *req,
4104 Serge 11202
				 uint32_t flags)
3031 serge 11203
{
6084 serge 11204
	struct intel_engine_cs *ring = req->ring;
3031 serge 11205
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11206
	uint32_t plane_bit = 0;
4104 Serge 11207
	int len, ret;
2327 Serge 11208
 
5060 serge 11209
	switch (intel_crtc->plane) {
3031 serge 11210
	case PLANE_A:
11211
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11212
		break;
11213
	case PLANE_B:
11214
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11215
		break;
11216
	case PLANE_C:
11217
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11218
		break;
11219
	default:
11220
		WARN_ONCE(1, "unknown plane in flip command\n");
5060 serge 11221
		return -ENODEV;
3031 serge 11222
	}
2327 Serge 11223
 
4104 Serge 11224
	len = 4;
5060 serge 11225
	if (ring->id == RCS) {
4104 Serge 11226
		len += 6;
5060 serge 11227
		/*
11228
		 * On Gen 8, SRM is now taking an extra dword to accommodate
11229
		 * 48bits addresses, and we need a NOOP for the batch size to
11230
		 * stay even.
11231
		 */
11232
		if (IS_GEN8(dev))
11233
			len += 2;
11234
	}
4104 Serge 11235
 
5060 serge 11236
	/*
11237
	 * BSpec MI_DISPLAY_FLIP for IVB:
11238
	 * "The full packet must be contained within the same cache line."
11239
	 *
11240
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11241
	 * cacheline, if we ever start emitting more commands before
11242
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11243
	 * then do the cacheline alignment, and finally emit the
11244
	 * MI_DISPLAY_FLIP.
11245
	 */
6084 serge 11246
	ret = intel_ring_cacheline_align(req);
5060 serge 11247
	if (ret)
11248
		return ret;
11249
 
6084 serge 11250
	ret = intel_ring_begin(req, len);
3031 serge 11251
	if (ret)
5060 serge 11252
		return ret;
2327 Serge 11253
 
4104 Serge 11254
	/* Unmask the flip-done completion message. Note that the bspec says that
11255
	 * we should do this for both the BCS and RCS, and that we must not unmask
11256
	 * more than one flip event at any time (or ensure that one flip message
11257
	 * can be sent by waiting for flip-done prior to queueing new flips).
11258
	 * Experimentation says that BCS works despite DERRMR masking all
11259
	 * flip-done completion events and that unmasking all planes at once
11260
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11261
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11262
	 */
11263
	if (ring->id == RCS) {
11264
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
6937 serge 11265
		intel_ring_emit_reg(ring, DERRMR);
4104 Serge 11266
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11267
					DERRMR_PIPEB_PRI_FLIP_DONE |
11268
					DERRMR_PIPEC_PRI_FLIP_DONE));
5060 serge 11269
		if (IS_GEN8(dev))
6084 serge 11270
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
5060 serge 11271
					      MI_SRM_LRM_GLOBAL_GTT);
11272
		else
6084 serge 11273
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11274
					      MI_SRM_LRM_GLOBAL_GTT);
6937 serge 11275
		intel_ring_emit_reg(ring, DERRMR);
4104 Serge 11276
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
5060 serge 11277
		if (IS_GEN8(dev)) {
11278
			intel_ring_emit(ring, 0);
11279
			intel_ring_emit(ring, MI_NOOP);
11280
		}
4104 Serge 11281
	}
11282
 
3031 serge 11283
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11284
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5060 serge 11285
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11286
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 11287
 
6084 serge 11288
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11289
	return 0;
11290
}
2327 Serge 11291
 
6084 serge 11292
static bool use_mmio_flip(struct intel_engine_cs *ring,
11293
			  struct drm_i915_gem_object *obj)
11294
{
11295
	/*
11296
	 * This is not being used for older platforms, because
11297
	 * non-availability of flip done interrupt forces us to use
11298
	 * CS flips. Older platforms derive flip done using some clever
11299
	 * tricks involving the flip_pending status bits and vblank irqs.
11300
	 * So using MMIO flips there would disrupt this mechanism.
11301
	 */
11302
 
11303
	if (ring == NULL)
11304
		return true;
11305
 
11306
	if (INTEL_INFO(ring->dev)->gen < 5)
11307
		return false;
11308
 
11309
	if (i915.use_mmio_flip < 0)
11310
		return false;
11311
	else if (i915.use_mmio_flip > 0)
11312
		return true;
11313
	else if (i915.enable_execlists)
11314
		return true;
6937 serge 11315
//	else if (obj->base.dma_buf &&
11316
//		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11317
//						       false))
11318
//		return true;
6084 serge 11319
	else
11320
		return ring != i915_gem_request_get_ring(obj->last_write_req);
11321
}
11322
 
11323
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
6937 serge 11324
			     unsigned int rotation,
6084 serge 11325
			     struct intel_unpin_work *work)
11326
{
11327
	struct drm_device *dev = intel_crtc->base.dev;
11328
	struct drm_i915_private *dev_priv = dev->dev_private;
11329
	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11330
	const enum pipe pipe = intel_crtc->pipe;
6937 serge 11331
	u32 ctl, stride, tile_height;
6084 serge 11332
 
11333
	ctl = I915_READ(PLANE_CTL(pipe, 0));
11334
	ctl &= ~PLANE_CTL_TILED_MASK;
11335
	switch (fb->modifier[0]) {
11336
	case DRM_FORMAT_MOD_NONE:
11337
		break;
11338
	case I915_FORMAT_MOD_X_TILED:
11339
		ctl |= PLANE_CTL_TILED_X;
11340
		break;
11341
	case I915_FORMAT_MOD_Y_TILED:
11342
		ctl |= PLANE_CTL_TILED_Y;
11343
		break;
11344
	case I915_FORMAT_MOD_Yf_TILED:
11345
		ctl |= PLANE_CTL_TILED_YF;
11346
		break;
11347
	default:
11348
		MISSING_CASE(fb->modifier[0]);
11349
	}
11350
 
11351
	/*
11352
	 * The stride is either expressed as a multiple of 64 bytes chunks for
11353
	 * linear buffers or in number of tiles for tiled buffers.
11354
	 */
6937 serge 11355
	if (intel_rotation_90_or_270(rotation)) {
11356
		/* stride = Surface height in tiles */
11357
		tile_height = intel_tile_height(dev, fb->pixel_format,
11358
						fb->modifier[0], 0);
11359
		stride = DIV_ROUND_UP(fb->height, tile_height);
11360
	} else {
6084 serge 11361
	stride = fb->pitches[0] /
11362
		 intel_fb_stride_alignment(dev, fb->modifier[0],
11363
					   fb->pixel_format);
6937 serge 11364
	}
6084 serge 11365
 
11366
	/*
11367
	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11368
	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11369
	 */
11370
	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11371
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11372
 
11373
	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11374
	POSTING_READ(PLANE_SURF(pipe, 0));
11375
}
11376
 
11377
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11378
			     struct intel_unpin_work *work)
11379
{
11380
	struct drm_device *dev = intel_crtc->base.dev;
11381
	struct drm_i915_private *dev_priv = dev->dev_private;
11382
	struct intel_framebuffer *intel_fb =
11383
		to_intel_framebuffer(intel_crtc->base.primary->fb);
11384
	struct drm_i915_gem_object *obj = intel_fb->obj;
6937 serge 11385
	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
6084 serge 11386
	u32 dspcntr;
11387
 
11388
	dspcntr = I915_READ(reg);
11389
 
11390
	if (obj->tiling_mode != I915_TILING_NONE)
11391
		dspcntr |= DISPPLANE_TILED;
11392
	else
11393
		dspcntr &= ~DISPPLANE_TILED;
11394
 
11395
	I915_WRITE(reg, dspcntr);
11396
 
11397
	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11398
	POSTING_READ(DSPSURF(intel_crtc->plane));
11399
}
11400
 
11401
/*
11402
 * XXX: This is the temporary way to update the plane registers until we get
11403
 * around to using the usual plane update functions for MMIO flips
11404
 */
11405
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11406
{
11407
	struct intel_crtc *crtc = mmio_flip->crtc;
11408
	struct intel_unpin_work *work;
11409
 
11410
	spin_lock_irq(&crtc->base.dev->event_lock);
11411
	work = crtc->unpin_work;
11412
	spin_unlock_irq(&crtc->base.dev->event_lock);
11413
	if (work == NULL)
11414
		return;
11415
 
11416
	intel_mark_page_flip_active(work);
11417
 
11418
	intel_pipe_update_start(crtc);
11419
 
11420
	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
6937 serge 11421
		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
6084 serge 11422
	else
11423
		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11424
		ilk_do_mmio_flip(crtc, work);
11425
 
11426
	intel_pipe_update_end(crtc);
11427
}
11428
 
11429
static void intel_mmio_flip_work_func(struct work_struct *work)
11430
{
11431
	struct intel_mmio_flip *mmio_flip =
11432
		container_of(work, struct intel_mmio_flip, work);
6937 serge 11433
	struct intel_framebuffer *intel_fb =
11434
		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11435
	struct drm_i915_gem_object *obj = intel_fb->obj;
6084 serge 11436
 
11437
	if (mmio_flip->req) {
11438
		WARN_ON(__i915_wait_request(mmio_flip->req,
11439
					    mmio_flip->crtc->reset_counter,
11440
					    false, NULL,
11441
					    &mmio_flip->i915->rps.mmioflips));
11442
		i915_gem_request_unreference__unlocked(mmio_flip->req);
11443
	}
11444
 
6937 serge 11445
	/* For framebuffer backed by dmabuf, wait for fence */
11446
//	if (obj->base.dma_buf)
11447
//		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11448
//							    false, false,
11449
//							    MAX_SCHEDULE_TIMEOUT) < 0);
11450
 
6084 serge 11451
	intel_do_mmio_flip(mmio_flip);
11452
	kfree(mmio_flip);
11453
}
11454
 
11455
static int intel_queue_mmio_flip(struct drm_device *dev,
11456
				 struct drm_crtc *crtc,
6937 serge 11457
				 struct drm_i915_gem_object *obj)
6084 serge 11458
{
11459
	struct intel_mmio_flip *mmio_flip;
11460
 
11461
	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11462
	if (mmio_flip == NULL)
11463
		return -ENOMEM;
11464
 
11465
	mmio_flip->i915 = to_i915(dev);
11466
	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11467
	mmio_flip->crtc = to_intel_crtc(crtc);
6937 serge 11468
	mmio_flip->rotation = crtc->primary->state->rotation;
6084 serge 11469
 
11470
	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11471
	schedule_work(&mmio_flip->work);
11472
 
11473
	return 0;
11474
}
11475
 
3031 serge 11476
static int intel_default_queue_flip(struct drm_device *dev,
11477
				    struct drm_crtc *crtc,
11478
				    struct drm_framebuffer *fb,
4104 Serge 11479
				    struct drm_i915_gem_object *obj,
6084 serge 11480
				    struct drm_i915_gem_request *req,
4104 Serge 11481
				    uint32_t flags)
3031 serge 11482
{
11483
	return -ENODEV;
11484
}
2327 Serge 11485
 
6084 serge 11486
static bool __intel_pageflip_stall_check(struct drm_device *dev,
11487
					 struct drm_crtc *crtc)
11488
{
11489
	struct drm_i915_private *dev_priv = dev->dev_private;
11490
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11491
	struct intel_unpin_work *work = intel_crtc->unpin_work;
11492
	u32 addr;
11493
 
11494
	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11495
		return true;
11496
 
11497
	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11498
		return false;
11499
 
11500
	if (!work->enable_stall_check)
11501
		return false;
11502
 
11503
	if (work->flip_ready_vblank == 0) {
11504
		if (work->flip_queued_req &&
11505
		    !i915_gem_request_completed(work->flip_queued_req, true))
11506
			return false;
11507
 
11508
		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11509
	}
11510
 
11511
	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11512
		return false;
11513
 
11514
	/* Potential stall - if we see that the flip has happened,
11515
	 * assume a missed interrupt. */
11516
	if (INTEL_INFO(dev)->gen >= 4)
11517
		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11518
	else
11519
		addr = I915_READ(DSPADDR(intel_crtc->plane));
11520
 
11521
	/* There is a potential issue here with a false positive after a flip
11522
	 * to the same address. We could address this by checking for a
11523
	 * non-incrementing frame counter.
11524
	 */
11525
	return addr == work->gtt_offset;
11526
}
11527
 
11528
void intel_check_page_flip(struct drm_device *dev, int pipe)
11529
{
11530
	struct drm_i915_private *dev_priv = dev->dev_private;
11531
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11532
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11533
	struct intel_unpin_work *work;
11534
 
6937 serge 11535
	WARN_ON(!in_interrupt());
11536
 
6084 serge 11537
	if (crtc == NULL)
11538
		return;
11539
 
11540
	spin_lock(&dev->event_lock);
11541
	work = intel_crtc->unpin_work;
11542
	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11543
		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11544
			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11545
		page_flip_completed(intel_crtc);
11546
		work = NULL;
11547
	}
11548
	if (work != NULL &&
11549
	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11550
		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11551
	spin_unlock(&dev->event_lock);
11552
}
6320 serge 11553
 
3031 serge 11554
static int intel_crtc_page_flip(struct drm_crtc *crtc,
11555
				struct drm_framebuffer *fb,
4104 Serge 11556
				struct drm_pending_vblank_event *event,
11557
				uint32_t page_flip_flags)
3031 serge 11558
{
11559
	struct drm_device *dev = crtc->dev;
11560
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 11561
	struct drm_framebuffer *old_fb = crtc->primary->fb;
11562
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3031 serge 11563
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 11564
	struct drm_plane *primary = crtc->primary;
5060 serge 11565
	enum pipe pipe = intel_crtc->pipe;
3031 serge 11566
	struct intel_unpin_work *work;
5060 serge 11567
	struct intel_engine_cs *ring;
6084 serge 11568
	bool mmio_flip;
11569
	struct drm_i915_gem_request *request = NULL;
3031 serge 11570
	int ret;
2327 Serge 11571
 
5060 serge 11572
	/*
11573
	 * drm_mode_page_flip_ioctl() should already catch this, but double
11574
	 * check to be safe.  In the future we may enable pageflipping from
11575
	 * a disabled primary plane.
11576
	 */
11577
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11578
		return -EBUSY;
11579
 
3031 serge 11580
	/* Can't change pixel format via MI display flips. */
5060 serge 11581
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
3031 serge 11582
		return -EINVAL;
2327 Serge 11583
 
3031 serge 11584
	/*
11585
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11586
	 * Note that pitch changes could also affect these register.
11587
	 */
11588
	if (INTEL_INFO(dev)->gen > 3 &&
5060 serge 11589
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11590
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
3031 serge 11591
		return -EINVAL;
2327 Serge 11592
 
5354 serge 11593
	if (i915_terminally_wedged(&dev_priv->gpu_error))
11594
		goto out_hang;
11595
 
4560 Serge 11596
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 11597
	if (work == NULL)
11598
		return -ENOMEM;
2327 Serge 11599
 
3031 serge 11600
	work->event = event;
3243 Serge 11601
	work->crtc = crtc;
6084 serge 11602
	work->old_fb = old_fb;
6320 serge 11603
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 11604
 
5060 serge 11605
	ret = drm_crtc_vblank_get(crtc);
3031 serge 11606
	if (ret)
11607
		goto free_work;
2327 Serge 11608
 
3031 serge 11609
	/* We borrow the event spin lock for protecting unpin_work */
5354 serge 11610
	spin_lock_irq(&dev->event_lock);
3031 serge 11611
	if (intel_crtc->unpin_work) {
5354 serge 11612
		/* Before declaring the flip queue wedged, check if
11613
		 * the hardware completed the operation behind our backs.
11614
		 */
11615
		if (__intel_pageflip_stall_check(dev, crtc)) {
11616
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11617
			page_flip_completed(intel_crtc);
11618
		} else {
11619
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11620
			spin_unlock_irq(&dev->event_lock);
11621
 
11622
			drm_crtc_vblank_put(crtc);
6084 serge 11623
			kfree(work);
11624
			return -EBUSY;
11625
		}
3031 serge 11626
	}
11627
	intel_crtc->unpin_work = work;
5354 serge 11628
	spin_unlock_irq(&dev->event_lock);
2327 Serge 11629
 
6320 serge 11630
//   if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11631
//       flush_workqueue(dev_priv->wq);
3243 Serge 11632
 
3031 serge 11633
	/* Reference the objects for the scheduled work. */
6084 serge 11634
	drm_framebuffer_reference(work->old_fb);
3031 serge 11635
	drm_gem_object_reference(&obj->base);
2327 Serge 11636
 
5060 serge 11637
	crtc->primary->fb = fb;
6084 serge 11638
	update_state_fb(crtc->primary);
2327 Serge 11639
 
3031 serge 11640
	work->pending_flip_obj = obj;
2327 Serge 11641
 
6084 serge 11642
	ret = i915_mutex_lock_interruptible(dev);
11643
	if (ret)
11644
		goto cleanup;
11645
 
3243 Serge 11646
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 11647
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 11648
 
5060 serge 11649
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
6084 serge 11650
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
5060 serge 11651
 
6937 serge 11652
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5060 serge 11653
		ring = &dev_priv->ring[BCS];
6084 serge 11654
		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
5060 serge 11655
			/* vlv: DISPLAY_FLIP fails to change tiling */
11656
			ring = NULL;
6084 serge 11657
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5060 serge 11658
		ring = &dev_priv->ring[BCS];
11659
	} else if (INTEL_INFO(dev)->gen >= 7) {
6084 serge 11660
		ring = i915_gem_request_get_ring(obj->last_write_req);
5060 serge 11661
		if (ring == NULL || ring->id != RCS)
11662
			ring = &dev_priv->ring[BCS];
11663
	} else {
11664
		ring = &dev_priv->ring[RCS];
11665
	}
11666
 
6084 serge 11667
	mmio_flip = use_mmio_flip(ring, obj);
11668
 
11669
	/* When using CS flips, we want to emit semaphores between rings.
11670
	 * However, when using mmio flips we will create a task to do the
11671
	 * synchronisation, so all we want here is to pin the framebuffer
11672
	 * into the display plane and skip any waits.
11673
	 */
6937 serge 11674
	if (!mmio_flip) {
11675
		ret = i915_gem_object_sync(obj, ring, &request);
11676
		if (ret)
11677
			goto cleanup_pending;
11678
	}
11679
 
6084 serge 11680
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
6937 serge 11681
					 crtc->primary->state);
3031 serge 11682
	if (ret)
11683
		goto cleanup_pending;
11684
 
6084 serge 11685
	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11686
						  obj, 0);
11687
	work->gtt_offset += intel_crtc->dspaddr_offset;
5060 serge 11688
 
6084 serge 11689
	if (mmio_flip) {
6937 serge 11690
		ret = intel_queue_mmio_flip(dev, crtc, obj);
5354 serge 11691
		if (ret)
11692
			goto cleanup_unpin;
11693
 
6084 serge 11694
		i915_gem_request_assign(&work->flip_queued_req,
11695
					obj->last_write_req);
5354 serge 11696
	} else {
6084 serge 11697
		if (!request) {
11698
			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11699
			if (ret)
11700
				goto cleanup_unpin;
11701
		}
5060 serge 11702
 
6084 serge 11703
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11704
						   page_flip_flags);
11705
		if (ret)
11706
			goto cleanup_unpin;
11707
 
11708
		i915_gem_request_assign(&work->flip_queued_req, request);
5354 serge 11709
	}
11710
 
6084 serge 11711
	if (request)
11712
		i915_add_request_no_flush(request);
11713
 
11714
	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
5354 serge 11715
	work->enable_stall_check = true;
11716
 
6084 serge 11717
	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11718
			  to_intel_plane(primary)->frontbuffer_bit);
3031 serge 11719
	mutex_unlock(&dev->struct_mutex);
11720
 
6937 serge 11721
	intel_fbc_deactivate(intel_crtc);
6084 serge 11722
	intel_frontbuffer_flip_prepare(dev,
11723
				       to_intel_plane(primary)->frontbuffer_bit);
11724
 
3031 serge 11725
	trace_i915_flip_request(intel_crtc->plane, obj);
11726
 
11727
	return 0;
11728
 
5060 serge 11729
cleanup_unpin:
6084 serge 11730
	intel_unpin_fb_obj(fb, crtc->primary->state);
3031 serge 11731
cleanup_pending:
6084 serge 11732
	if (request)
11733
		i915_gem_request_cancel(request);
3243 Serge 11734
	atomic_dec(&intel_crtc->unpin_work_count);
6084 serge 11735
	mutex_unlock(&dev->struct_mutex);
11736
cleanup:
5060 serge 11737
	crtc->primary->fb = old_fb;
6084 serge 11738
	update_state_fb(crtc->primary);
3031 serge 11739
 
6084 serge 11740
	drm_gem_object_unreference_unlocked(&obj->base);
11741
	drm_framebuffer_unreference(work->old_fb);
11742
 
5354 serge 11743
	spin_lock_irq(&dev->event_lock);
3031 serge 11744
	intel_crtc->unpin_work = NULL;
5354 serge 11745
	spin_unlock_irq(&dev->event_lock);
3031 serge 11746
 
5060 serge 11747
	drm_crtc_vblank_put(crtc);
3031 serge 11748
free_work:
11749
	kfree(work);
11750
 
5060 serge 11751
	if (ret == -EIO) {
6084 serge 11752
		struct drm_atomic_state *state;
11753
		struct drm_plane_state *plane_state;
11754
 
5060 serge 11755
out_hang:
6084 serge 11756
		state = drm_atomic_state_alloc(dev);
11757
		if (!state)
11758
			return -ENOMEM;
11759
		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11760
 
11761
retry:
11762
		plane_state = drm_atomic_get_plane_state(state, primary);
11763
		ret = PTR_ERR_OR_ZERO(plane_state);
11764
		if (!ret) {
11765
			drm_atomic_set_fb_for_plane(plane_state, fb);
11766
 
11767
			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11768
			if (!ret)
11769
				ret = drm_atomic_commit(state);
11770
		}
11771
 
11772
		if (ret == -EDEADLK) {
11773
			drm_modeset_backoff(state->acquire_ctx);
11774
			drm_atomic_state_clear(state);
11775
			goto retry;
11776
		}
11777
 
11778
		if (ret)
11779
			drm_atomic_state_free(state);
11780
 
5354 serge 11781
		if (ret == 0 && event) {
11782
			spin_lock_irq(&dev->event_lock);
5060 serge 11783
			drm_send_vblank_event(dev, pipe, event);
5354 serge 11784
			spin_unlock_irq(&dev->event_lock);
11785
		}
5060 serge 11786
	}
3031 serge 11787
	return ret;
11788
}
11789
 
11790
 
11791
/**
6084 serge 11792
 * intel_wm_need_update - Check whether watermarks need updating
11793
 * @plane: drm plane
11794
 * @state: new plane state
3031 serge 11795
 *
6084 serge 11796
 * Check current plane state versus the new one to determine whether
11797
 * watermarks need to be recalculated.
11798
 *
11799
 * Returns true or false.
3031 serge 11800
 */
6084 serge 11801
static bool intel_wm_need_update(struct drm_plane *plane,
11802
				 struct drm_plane_state *state)
3031 serge 11803
{
6937 serge 11804
	struct intel_plane_state *new = to_intel_plane_state(state);
11805
	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11806
 
11807
	/* Update watermarks on tiling or size changes. */
11808
	if (new->visible != cur->visible)
6084 serge 11809
		return true;
3031 serge 11810
 
6937 serge 11811
	if (!cur->base.fb || !new->base.fb)
11812
		return false;
11813
 
11814
	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11815
	    cur->base.rotation != new->base.rotation ||
11816
	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11817
	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11818
	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11819
	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
6084 serge 11820
		return true;
11821
 
11822
	return false;
11823
}
11824
 
6937 serge 11825
static bool needs_scaling(struct intel_plane_state *state)
11826
{
11827
	int src_w = drm_rect_width(&state->src) >> 16;
11828
	int src_h = drm_rect_height(&state->src) >> 16;
11829
	int dst_w = drm_rect_width(&state->dst);
11830
	int dst_h = drm_rect_height(&state->dst);
11831
 
11832
	return (src_w != dst_w || src_h != dst_h);
11833
}
11834
 
6084 serge 11835
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11836
				    struct drm_plane_state *plane_state)
11837
{
6937 serge 11838
	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
6084 serge 11839
	struct drm_crtc *crtc = crtc_state->crtc;
11840
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11841
	struct drm_plane *plane = plane_state->plane;
11842
	struct drm_device *dev = crtc->dev;
11843
	struct drm_i915_private *dev_priv = dev->dev_private;
11844
	struct intel_plane_state *old_plane_state =
11845
		to_intel_plane_state(plane->state);
11846
	int idx = intel_crtc->base.base.id, ret;
11847
	int i = drm_plane_index(plane);
11848
	bool mode_changed = needs_modeset(crtc_state);
11849
	bool was_crtc_enabled = crtc->state->active;
11850
	bool is_crtc_enabled = crtc_state->active;
11851
	bool turn_off, turn_on, visible, was_visible;
11852
	struct drm_framebuffer *fb = plane_state->fb;
11853
 
11854
	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11855
	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11856
		ret = skl_update_scaler_plane(
11857
			to_intel_crtc_state(crtc_state),
11858
			to_intel_plane_state(plane_state));
11859
		if (ret)
11860
			return ret;
3031 serge 11861
	}
11862
 
6084 serge 11863
	was_visible = old_plane_state->visible;
11864
	visible = to_intel_plane_state(plane_state)->visible;
11865
 
11866
	if (!was_crtc_enabled && WARN_ON(was_visible))
11867
		was_visible = false;
11868
 
11869
	if (!is_crtc_enabled && WARN_ON(visible))
11870
		visible = false;
11871
 
11872
	if (!was_visible && !visible)
11873
		return 0;
11874
 
11875
	turn_off = was_visible && (!visible || mode_changed);
11876
	turn_on = visible && (!was_visible || mode_changed);
11877
 
11878
	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11879
			 plane->base.id, fb ? fb->base.id : -1);
11880
 
11881
	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11882
			 plane->base.id, was_visible, visible,
11883
			 turn_off, turn_on, mode_changed);
11884
 
11885
	if (turn_on) {
6937 serge 11886
		pipe_config->update_wm_pre = true;
11887
 
6084 serge 11888
		/* must disable cxsr around plane enable/disable */
6937 serge 11889
		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11890
			pipe_config->disable_cxsr = true;
6084 serge 11891
	} else if (turn_off) {
6937 serge 11892
		pipe_config->update_wm_post = true;
11893
 
6084 serge 11894
		/* must disable cxsr around plane enable/disable */
11895
		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11896
			if (is_crtc_enabled)
11897
				intel_crtc->atomic.wait_vblank = true;
6937 serge 11898
			pipe_config->disable_cxsr = true;
6084 serge 11899
		}
11900
	} else if (intel_wm_need_update(plane, plane_state)) {
6937 serge 11901
		/* FIXME bollocks */
11902
		pipe_config->update_wm_pre = true;
11903
		pipe_config->update_wm_post = true;
3031 serge 11904
	}
5060 serge 11905
 
6084 serge 11906
	if (visible || was_visible)
11907
		intel_crtc->atomic.fb_bits |=
11908
			to_intel_plane(plane)->frontbuffer_bit;
5060 serge 11909
 
6084 serge 11910
	switch (plane->type) {
11911
	case DRM_PLANE_TYPE_PRIMARY:
11912
		intel_crtc->atomic.pre_disable_primary = turn_off;
11913
		intel_crtc->atomic.post_enable_primary = turn_on;
11914
 
11915
		if (turn_off) {
11916
			/*
11917
			 * FIXME: Actually if we will still have any other
11918
			 * plane enabled on the pipe we could let IPS enabled
11919
			 * still, but for now lets consider that when we make
11920
			 * primary invisible by setting DSPCNTR to 0 on
11921
			 * update_primary_plane function IPS needs to be
11922
			 * disable.
11923
			 */
11924
			intel_crtc->atomic.disable_ips = true;
11925
 
11926
			intel_crtc->atomic.disable_fbc = true;
11927
		}
11928
 
11929
		/*
11930
		 * FBC does not work on some platforms for rotated
11931
		 * planes, so disable it when rotation is not 0 and
11932
		 * update it when rotation is set back to 0.
11933
		 *
11934
		 * FIXME: This is redundant with the fbc update done in
11935
		 * the primary plane enable function except that that
11936
		 * one is done too late. We eventually need to unify
11937
		 * this.
11938
		 */
11939
 
11940
		if (visible &&
11941
		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11942
		    dev_priv->fbc.crtc == intel_crtc &&
11943
		    plane_state->rotation != BIT(DRM_ROTATE_0))
11944
			intel_crtc->atomic.disable_fbc = true;
11945
 
11946
		/*
11947
		 * BDW signals flip done immediately if the plane
11948
		 * is disabled, even if the plane enable is already
11949
		 * armed to occur at the next vblank :(
11950
		 */
11951
		if (turn_on && IS_BROADWELL(dev))
11952
			intel_crtc->atomic.wait_vblank = true;
11953
 
11954
		intel_crtc->atomic.update_fbc |= visible || mode_changed;
11955
		break;
11956
	case DRM_PLANE_TYPE_CURSOR:
11957
		break;
11958
	case DRM_PLANE_TYPE_OVERLAY:
6937 serge 11959
		/*
11960
		 * WaCxSRDisabledForSpriteScaling:ivb
11961
		 *
11962
		 * cstate->update_wm was already set above, so this flag will
11963
		 * take effect when we commit and program watermarks.
11964
		 */
11965
		if (IS_IVYBRIDGE(dev) &&
11966
		    needs_scaling(to_intel_plane_state(plane_state)) &&
11967
		    !needs_scaling(old_plane_state)) {
11968
			to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
11969
		} else if (turn_off && !mode_changed) {
6084 serge 11970
			intel_crtc->atomic.wait_vblank = true;
11971
			intel_crtc->atomic.update_sprite_watermarks |=
11972
				1 << i;
11973
		}
6937 serge 11974
 
11975
		break;
5060 serge 11976
	}
6084 serge 11977
	return 0;
3031 serge 11978
}
11979
 
6084 serge 11980
static bool encoders_cloneable(const struct intel_encoder *a,
11981
			       const struct intel_encoder *b)
3031 serge 11982
{
6084 serge 11983
	/* masks could be asymmetric, so check both ways */
11984
	return a == b || (a->cloneable & (1 << b->type) &&
11985
			  b->cloneable & (1 << a->type));
11986
}
11987
 
11988
static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11989
					 struct intel_crtc *crtc,
11990
					 struct intel_encoder *encoder)
11991
{
11992
	struct intel_encoder *source_encoder;
11993
	struct drm_connector *connector;
11994
	struct drm_connector_state *connector_state;
11995
	int i;
11996
 
11997
	for_each_connector_in_state(state, connector, connector_state, i) {
11998
		if (connector_state->crtc != &crtc->base)
11999
			continue;
12000
 
12001
		source_encoder =
12002
			to_intel_encoder(connector_state->best_encoder);
12003
		if (!encoders_cloneable(encoder, source_encoder))
12004
			return false;
12005
	}
12006
 
12007
	return true;
12008
}
12009
 
12010
static bool check_encoder_cloning(struct drm_atomic_state *state,
12011
				  struct intel_crtc *crtc)
12012
{
3031 serge 12013
	struct intel_encoder *encoder;
6084 serge 12014
	struct drm_connector *connector;
12015
	struct drm_connector_state *connector_state;
12016
	int i;
3031 serge 12017
 
6084 serge 12018
	for_each_connector_in_state(state, connector, connector_state, i) {
12019
		if (connector_state->crtc != &crtc->base)
12020
			continue;
12021
 
12022
		encoder = to_intel_encoder(connector_state->best_encoder);
12023
		if (!check_single_encoder_cloning(state, crtc, encoder))
12024
			return false;
3031 serge 12025
	}
12026
 
6084 serge 12027
	return true;
12028
}
12029
 
12030
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12031
				   struct drm_crtc_state *crtc_state)
12032
{
12033
	struct drm_device *dev = crtc->dev;
12034
	struct drm_i915_private *dev_priv = dev->dev_private;
12035
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12036
	struct intel_crtc_state *pipe_config =
12037
		to_intel_crtc_state(crtc_state);
12038
	struct drm_atomic_state *state = crtc_state->state;
12039
	int ret;
12040
	bool mode_changed = needs_modeset(crtc_state);
12041
 
12042
	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12043
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12044
		return -EINVAL;
3031 serge 12045
	}
5060 serge 12046
 
6084 serge 12047
	if (mode_changed && !crtc_state->active)
6937 serge 12048
		pipe_config->update_wm_post = true;
6084 serge 12049
 
12050
	if (mode_changed && crtc_state->enable &&
12051
	    dev_priv->display.crtc_compute_clock &&
12052
	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
12053
		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12054
							   pipe_config);
12055
		if (ret)
12056
			return ret;
5060 serge 12057
	}
6084 serge 12058
 
12059
	ret = 0;
6937 serge 12060
	if (dev_priv->display.compute_pipe_wm) {
12061
		ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
12062
		if (ret)
12063
			return ret;
12064
	}
12065
 
6084 serge 12066
	if (INTEL_INFO(dev)->gen >= 9) {
12067
		if (mode_changed)
12068
			ret = skl_update_scaler_crtc(pipe_config);
12069
 
12070
		if (!ret)
12071
			ret = intel_atomic_setup_scalers(dev, intel_crtc,
12072
							 pipe_config);
12073
	}
12074
 
12075
	return ret;
3031 serge 12076
}
12077
 
6084 serge 12078
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12079
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12080
	.load_lut = intel_crtc_load_lut,
12081
	.atomic_begin = intel_begin_crtc_commit,
12082
	.atomic_flush = intel_finish_crtc_commit,
12083
	.atomic_check = intel_crtc_atomic_check,
12084
};
12085
 
12086
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12087
{
12088
	struct intel_connector *connector;
12089
 
12090
	for_each_intel_connector(dev, connector) {
12091
		if (connector->base.encoder) {
12092
			connector->base.state->best_encoder =
12093
				connector->base.encoder;
12094
			connector->base.state->crtc =
12095
				connector->base.encoder->crtc;
12096
		} else {
12097
			connector->base.state->best_encoder = NULL;
12098
			connector->base.state->crtc = NULL;
12099
		}
12100
	}
12101
}
12102
 
4104 Serge 12103
static void
5060 serge 12104
connected_sink_compute_bpp(struct intel_connector *connector,
6084 serge 12105
			   struct intel_crtc_state *pipe_config)
4104 Serge 12106
{
12107
	int bpp = pipe_config->pipe_bpp;
12108
 
12109
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12110
		connector->base.base.id,
5060 serge 12111
		connector->base.name);
4104 Serge 12112
 
12113
	/* Don't use an invalid EDID bpc value */
12114
	if (connector->base.display_info.bpc &&
12115
	    connector->base.display_info.bpc * 3 < bpp) {
12116
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12117
			      bpp, connector->base.display_info.bpc*3);
12118
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12119
	}
12120
 
6937 serge 12121
	/* Clamp bpp to default limit on screens without EDID 1.4 */
12122
	if (connector->base.display_info.bpc == 0) {
12123
		int type = connector->base.connector_type;
12124
		int clamp_bpp = 24;
12125
 
12126
		/* Fall back to 18 bpp when DP sink capability is unknown. */
12127
		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12128
		    type == DRM_MODE_CONNECTOR_eDP)
12129
			clamp_bpp = 18;
12130
 
12131
		if (bpp > clamp_bpp) {
12132
			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12133
				      bpp, clamp_bpp);
12134
			pipe_config->pipe_bpp = clamp_bpp;
12135
		}
4104 Serge 12136
	}
12137
}
12138
 
3746 Serge 12139
static int
4104 Serge 12140
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
6084 serge 12141
			  struct intel_crtc_state *pipe_config)
3746 Serge 12142
{
4104 Serge 12143
	struct drm_device *dev = crtc->base.dev;
6084 serge 12144
	struct drm_atomic_state *state;
12145
	struct drm_connector *connector;
12146
	struct drm_connector_state *connector_state;
12147
	int bpp, i;
3746 Serge 12148
 
6937 serge 12149
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
6084 serge 12150
		bpp = 10*3;
12151
	else if (INTEL_INFO(dev)->gen >= 5)
12152
		bpp = 12*3;
12153
	else
3746 Serge 12154
		bpp = 8*3;
12155
 
6084 serge 12156
 
3746 Serge 12157
	pipe_config->pipe_bpp = bpp;
12158
 
6084 serge 12159
	state = pipe_config->base.state;
12160
 
3746 Serge 12161
	/* Clamp display bpp to EDID value */
6084 serge 12162
	for_each_connector_in_state(state, connector, connector_state, i) {
12163
		if (connector_state->crtc != &crtc->base)
3746 Serge 12164
			continue;
12165
 
6084 serge 12166
		connected_sink_compute_bpp(to_intel_connector(connector),
12167
					   pipe_config);
3746 Serge 12168
	}
12169
 
12170
	return bpp;
12171
}
12172
 
4560 Serge 12173
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12174
{
12175
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12176
			"type: 0x%x flags: 0x%x\n",
12177
		mode->crtc_clock,
12178
		mode->crtc_hdisplay, mode->crtc_hsync_start,
12179
		mode->crtc_hsync_end, mode->crtc_htotal,
12180
		mode->crtc_vdisplay, mode->crtc_vsync_start,
12181
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12182
}
12183
 
4104 Serge 12184
static void intel_dump_pipe_config(struct intel_crtc *crtc,
6084 serge 12185
				   struct intel_crtc_state *pipe_config,
4104 Serge 12186
				   const char *context)
12187
{
6084 serge 12188
	struct drm_device *dev = crtc->base.dev;
12189
	struct drm_plane *plane;
12190
	struct intel_plane *intel_plane;
12191
	struct intel_plane_state *state;
12192
	struct drm_framebuffer *fb;
4104 Serge 12193
 
6084 serge 12194
	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12195
		      context, pipe_config, pipe_name(crtc->pipe));
12196
 
4104 Serge 12197
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12198
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12199
		      pipe_config->pipe_bpp, pipe_config->dither);
12200
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12201
		      pipe_config->has_pch_encoder,
12202
		      pipe_config->fdi_lanes,
12203
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12204
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12205
		      pipe_config->fdi_m_n.tu);
6084 serge 12206
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
4560 Serge 12207
		      pipe_config->has_dp_encoder,
6084 serge 12208
		      pipe_config->lane_count,
4560 Serge 12209
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12210
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12211
		      pipe_config->dp_m_n.tu);
5354 serge 12212
 
6084 serge 12213
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
5354 serge 12214
		      pipe_config->has_dp_encoder,
6084 serge 12215
		      pipe_config->lane_count,
5354 serge 12216
		      pipe_config->dp_m2_n2.gmch_m,
12217
		      pipe_config->dp_m2_n2.gmch_n,
12218
		      pipe_config->dp_m2_n2.link_m,
12219
		      pipe_config->dp_m2_n2.link_n,
12220
		      pipe_config->dp_m2_n2.tu);
12221
 
12222
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12223
		      pipe_config->has_audio,
12224
		      pipe_config->has_infoframe);
12225
 
4104 Serge 12226
	DRM_DEBUG_KMS("requested mode:\n");
6084 serge 12227
	drm_mode_debug_printmodeline(&pipe_config->base.mode);
4104 Serge 12228
	DRM_DEBUG_KMS("adjusted mode:\n");
6084 serge 12229
	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12230
	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
4560 Serge 12231
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12232
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12233
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
6084 serge 12234
	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12235
		      crtc->num_scalers,
12236
		      pipe_config->scaler_state.scaler_users,
12237
		      pipe_config->scaler_state.scaler_id);
4104 Serge 12238
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12239
		      pipe_config->gmch_pfit.control,
12240
		      pipe_config->gmch_pfit.pgm_ratios,
12241
		      pipe_config->gmch_pfit.lvds_border_bits);
12242
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12243
		      pipe_config->pch_pfit.pos,
12244
		      pipe_config->pch_pfit.size,
12245
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12246
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 12247
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 12248
 
6084 serge 12249
	if (IS_BROXTON(dev)) {
12250
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12251
			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12252
			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12253
			      pipe_config->ddi_pll_sel,
12254
			      pipe_config->dpll_hw_state.ebb0,
12255
			      pipe_config->dpll_hw_state.ebb4,
12256
			      pipe_config->dpll_hw_state.pll0,
12257
			      pipe_config->dpll_hw_state.pll1,
12258
			      pipe_config->dpll_hw_state.pll2,
12259
			      pipe_config->dpll_hw_state.pll3,
12260
			      pipe_config->dpll_hw_state.pll6,
12261
			      pipe_config->dpll_hw_state.pll8,
12262
			      pipe_config->dpll_hw_state.pll9,
12263
			      pipe_config->dpll_hw_state.pll10,
12264
			      pipe_config->dpll_hw_state.pcsdw12);
6937 serge 12265
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 12266
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12267
			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12268
			      pipe_config->ddi_pll_sel,
12269
			      pipe_config->dpll_hw_state.ctrl1,
12270
			      pipe_config->dpll_hw_state.cfgcr1,
12271
			      pipe_config->dpll_hw_state.cfgcr2);
12272
	} else if (HAS_DDI(dev)) {
12273
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12274
			      pipe_config->ddi_pll_sel,
12275
			      pipe_config->dpll_hw_state.wrpll,
12276
			      pipe_config->dpll_hw_state.spll);
12277
	} else {
12278
		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12279
			      "fp0: 0x%x, fp1: 0x%x\n",
12280
			      pipe_config->dpll_hw_state.dpll,
12281
			      pipe_config->dpll_hw_state.dpll_md,
12282
			      pipe_config->dpll_hw_state.fp0,
12283
			      pipe_config->dpll_hw_state.fp1);
12284
	}
5060 serge 12285
 
6084 serge 12286
	DRM_DEBUG_KMS("planes on this crtc\n");
12287
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12288
		intel_plane = to_intel_plane(plane);
12289
		if (intel_plane->pipe != crtc->pipe)
5060 serge 12290
			continue;
12291
 
6084 serge 12292
		state = to_intel_plane_state(plane->state);
12293
		fb = state->base.fb;
12294
		if (!fb) {
12295
			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12296
				"disabled, scaler_id = %d\n",
12297
				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12298
				plane->base.id, intel_plane->pipe,
12299
				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12300
				drm_plane_index(plane), state->scaler_id);
4104 Serge 12301
			continue;
6084 serge 12302
		}
4104 Serge 12303
 
6084 serge 12304
		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12305
			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12306
			plane->base.id, intel_plane->pipe,
12307
			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12308
			drm_plane_index(plane));
12309
		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12310
			fb->base.id, fb->width, fb->height, fb->pixel_format);
12311
		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12312
			state->scaler_id,
12313
			state->src.x1 >> 16, state->src.y1 >> 16,
12314
			drm_rect_width(&state->src) >> 16,
12315
			drm_rect_height(&state->src) >> 16,
12316
			state->dst.x1, state->dst.y1,
12317
			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
4104 Serge 12318
	}
12319
}
12320
 
6084 serge 12321
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
5354 serge 12322
{
6084 serge 12323
	struct drm_device *dev = state->dev;
12324
	struct drm_connector *connector;
5354 serge 12325
	unsigned int used_ports = 0;
12326
 
12327
	/*
12328
	 * Walk the connector list instead of the encoder
12329
	 * list to detect the problem on ddi platforms
12330
	 * where there's just one encoder per digital port.
12331
	 */
6084 serge 12332
	drm_for_each_connector(connector, dev) {
12333
		struct drm_connector_state *connector_state;
12334
		struct intel_encoder *encoder;
5354 serge 12335
 
6084 serge 12336
		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12337
		if (!connector_state)
12338
			connector_state = connector->state;
12339
 
12340
		if (!connector_state->best_encoder)
5354 serge 12341
			continue;
12342
 
6084 serge 12343
		encoder = to_intel_encoder(connector_state->best_encoder);
5354 serge 12344
 
6084 serge 12345
		WARN_ON(!connector_state->crtc);
12346
 
5354 serge 12347
		switch (encoder->type) {
12348
			unsigned int port_mask;
12349
		case INTEL_OUTPUT_UNKNOWN:
12350
			if (WARN_ON(!HAS_DDI(dev)))
12351
				break;
12352
		case INTEL_OUTPUT_DISPLAYPORT:
12353
		case INTEL_OUTPUT_HDMI:
12354
		case INTEL_OUTPUT_EDP:
12355
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12356
 
12357
			/* the same port mustn't appear more than once */
12358
			if (used_ports & port_mask)
12359
				return false;
12360
 
12361
			used_ports |= port_mask;
12362
		default:
12363
			break;
12364
		}
12365
	}
12366
 
12367
	return true;
12368
}
12369
 
6084 serge 12370
static void
12371
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12372
{
12373
	struct drm_crtc_state tmp_state;
12374
	struct intel_crtc_scaler_state scaler_state;
12375
	struct intel_dpll_hw_state dpll_hw_state;
12376
	enum intel_dpll_id shared_dpll;
12377
	uint32_t ddi_pll_sel;
12378
	bool force_thru;
12379
 
12380
	/* FIXME: before the switch to atomic started, a new pipe_config was
12381
	 * kzalloc'd. Code that depends on any field being zero should be
12382
	 * fixed, so that the crtc_state can be safely duplicated. For now,
12383
	 * only fields that are know to not cause problems are preserved. */
12384
 
12385
	tmp_state = crtc_state->base;
12386
	scaler_state = crtc_state->scaler_state;
12387
	shared_dpll = crtc_state->shared_dpll;
12388
	dpll_hw_state = crtc_state->dpll_hw_state;
12389
	ddi_pll_sel = crtc_state->ddi_pll_sel;
12390
	force_thru = crtc_state->pch_pfit.force_thru;
12391
 
12392
	memset(crtc_state, 0, sizeof *crtc_state);
12393
 
12394
	crtc_state->base = tmp_state;
12395
	crtc_state->scaler_state = scaler_state;
12396
	crtc_state->shared_dpll = shared_dpll;
12397
	crtc_state->dpll_hw_state = dpll_hw_state;
12398
	crtc_state->ddi_pll_sel = ddi_pll_sel;
12399
	crtc_state->pch_pfit.force_thru = force_thru;
12400
}
12401
 
12402
static int
3746 Serge 12403
intel_modeset_pipe_config(struct drm_crtc *crtc,
6084 serge 12404
			  struct intel_crtc_state *pipe_config)
3031 serge 12405
{
6084 serge 12406
	struct drm_atomic_state *state = pipe_config->base.state;
3031 serge 12407
	struct intel_encoder *encoder;
6084 serge 12408
	struct drm_connector *connector;
12409
	struct drm_connector_state *connector_state;
12410
	int base_bpp, ret = -EINVAL;
12411
	int i;
4104 Serge 12412
	bool retry = true;
3031 serge 12413
 
6084 serge 12414
	clear_intel_crtc_state(pipe_config);
4104 Serge 12415
 
12416
	pipe_config->cpu_transcoder =
12417
		(enum transcoder) to_intel_crtc(crtc)->pipe;
3746 Serge 12418
 
4104 Serge 12419
	/*
12420
	 * Sanitize sync polarity flags based on requested ones. If neither
12421
	 * positive or negative polarity is requested, treat this as meaning
12422
	 * negative polarity.
12423
	 */
6084 serge 12424
	if (!(pipe_config->base.adjusted_mode.flags &
4104 Serge 12425
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
6084 serge 12426
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4104 Serge 12427
 
6084 serge 12428
	if (!(pipe_config->base.adjusted_mode.flags &
4104 Serge 12429
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
6084 serge 12430
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4104 Serge 12431
 
6084 serge 12432
	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12433
					     pipe_config);
12434
	if (base_bpp < 0)
3746 Serge 12435
		goto fail;
12436
 
4560 Serge 12437
	/*
12438
	 * Determine the real pipe dimensions. Note that stereo modes can
12439
	 * increase the actual pipe size due to the frame doubling and
12440
	 * insertion of additional space for blanks between the frame. This
12441
	 * is stored in the crtc timings. We use the requested mode to do this
12442
	 * computation to clearly distinguish it from the adjusted mode, which
12443
	 * can be changed by the connectors in the below retry loop.
12444
	 */
6084 serge 12445
	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12446
			       &pipe_config->pipe_src_w,
12447
			       &pipe_config->pipe_src_h);
4560 Serge 12448
 
4104 Serge 12449
encoder_retry:
12450
	/* Ensure the port clock defaults are reset when retrying. */
12451
	pipe_config->port_clock = 0;
12452
	pipe_config->pixel_multiplier = 1;
12453
 
12454
	/* Fill in default crtc timings, allow encoders to overwrite them. */
6084 serge 12455
	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12456
			      CRTC_STEREO_DOUBLE);
4104 Serge 12457
 
3031 serge 12458
	/* Pass our mode to the connectors and the CRTC to give them a chance to
12459
	 * adjust it according to limitations or connector properties, and also
12460
	 * a chance to reject the mode entirely.
2330 Serge 12461
	 */
6084 serge 12462
	for_each_connector_in_state(state, connector, connector_state, i) {
12463
		if (connector_state->crtc != crtc)
3031 serge 12464
			continue;
3746 Serge 12465
 
6084 serge 12466
		encoder = to_intel_encoder(connector_state->best_encoder);
12467
 
12468
		if (!(encoder->compute_config(encoder, pipe_config))) {
12469
			DRM_DEBUG_KMS("Encoder config failure\n");
12470
			goto fail;
3746 Serge 12471
		}
6084 serge 12472
	}
3746 Serge 12473
 
4104 Serge 12474
	/* Set default port clock if not overwritten by the encoder. Needs to be
12475
	 * done afterwards in case the encoder adjusts the mode. */
12476
	if (!pipe_config->port_clock)
6084 serge 12477
		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
4560 Serge 12478
			* pipe_config->pixel_multiplier;
2327 Serge 12479
 
4104 Serge 12480
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12481
	if (ret < 0) {
3031 serge 12482
		DRM_DEBUG_KMS("CRTC fixup failed\n");
12483
		goto fail;
12484
	}
2327 Serge 12485
 
4104 Serge 12486
	if (ret == RETRY) {
12487
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12488
			ret = -EINVAL;
12489
			goto fail;
12490
		}
12491
 
12492
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12493
		retry = false;
12494
		goto encoder_retry;
12495
	}
12496
 
6084 serge 12497
	/* Dithering seems to not pass-through bits correctly when it should, so
12498
	 * only enable it on 6bpc panels. */
12499
	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12500
	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12501
		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
3746 Serge 12502
 
3031 serge 12503
fail:
6084 serge 12504
	return ret;
3031 serge 12505
}
2327 Serge 12506
 
3031 serge 12507
static void
6084 serge 12508
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
3031 serge 12509
{
6084 serge 12510
	struct drm_crtc *crtc;
12511
	struct drm_crtc_state *crtc_state;
12512
	int i;
3031 serge 12513
 
6084 serge 12514
	/* Double check state. */
12515
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12516
		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
3031 serge 12517
 
6084 serge 12518
		/* Update hwmode for vblank functions */
12519
		if (crtc->state->active)
12520
			crtc->hwmode = crtc->state->adjusted_mode;
5060 serge 12521
		else
6084 serge 12522
			crtc->hwmode.crtc_clock = 0;
6937 serge 12523
 
12524
		/*
12525
		 * Update legacy state to satisfy fbc code. This can
12526
		 * be removed when fbc uses the atomic state.
12527
		 */
12528
		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12529
			struct drm_plane_state *plane_state = crtc->primary->state;
12530
 
12531
			crtc->primary->fb = plane_state->fb;
12532
			crtc->x = plane_state->src_x >> 16;
12533
			crtc->y = plane_state->src_y >> 16;
12534
		}
3031 serge 12535
	}
2330 Serge 12536
}
2327 Serge 12537
 
4560 Serge 12538
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 12539
{
4560 Serge 12540
	int diff;
4104 Serge 12541
 
12542
	if (clock1 == clock2)
12543
		return true;
12544
 
12545
	if (!clock1 || !clock2)
12546
		return false;
12547
 
12548
	diff = abs(clock1 - clock2);
12549
 
12550
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12551
		return true;
12552
 
12553
	return false;
12554
}
12555
 
3031 serge 12556
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12557
	list_for_each_entry((intel_crtc), \
12558
			    &(dev)->mode_config.crtc_list, \
12559
			    base.head) \
6937 serge 12560
		for_each_if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 12561
 
3746 Serge 12562
static bool
6084 serge 12563
intel_compare_m_n(unsigned int m, unsigned int n,
12564
		  unsigned int m2, unsigned int n2,
12565
		  bool exact)
12566
{
12567
	if (m == m2 && n == n2)
12568
		return true;
12569
 
12570
	if (exact || !m || !n || !m2 || !n2)
12571
		return false;
12572
 
12573
	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12574
 
12575
	if (m > m2) {
12576
		while (m > m2) {
12577
			m2 <<= 1;
12578
			n2 <<= 1;
12579
		}
12580
	} else if (m < m2) {
12581
		while (m < m2) {
12582
			m <<= 1;
12583
			n <<= 1;
12584
		}
12585
	}
12586
 
12587
	return m == m2 && n == n2;
12588
}
12589
 
12590
static bool
12591
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12592
		       struct intel_link_m_n *m2_n2,
12593
		       bool adjust)
12594
{
12595
	if (m_n->tu == m2_n2->tu &&
12596
	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12597
			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12598
	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12599
			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12600
		if (adjust)
12601
			*m2_n2 = *m_n;
12602
 
12603
		return true;
12604
	}
12605
 
12606
	return false;
12607
}
12608
 
12609
static bool
4104 Serge 12610
intel_pipe_config_compare(struct drm_device *dev,
6084 serge 12611
			  struct intel_crtc_state *current_config,
12612
			  struct intel_crtc_state *pipe_config,
12613
			  bool adjust)
3746 Serge 12614
{
6084 serge 12615
	bool ret = true;
12616
 
12617
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12618
	do { \
12619
		if (!adjust) \
12620
			DRM_ERROR(fmt, ##__VA_ARGS__); \
12621
		else \
12622
			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12623
	} while (0)
12624
 
4104 Serge 12625
#define PIPE_CONF_CHECK_X(name)	\
12626
	if (current_config->name != pipe_config->name) { \
6084 serge 12627
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4104 Serge 12628
			  "(expected 0x%08x, found 0x%08x)\n", \
12629
			  current_config->name, \
12630
			  pipe_config->name); \
6084 serge 12631
		ret = false; \
3746 Serge 12632
	}
12633
 
4104 Serge 12634
#define PIPE_CONF_CHECK_I(name)	\
12635
	if (current_config->name != pipe_config->name) { \
6084 serge 12636
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4104 Serge 12637
			  "(expected %i, found %i)\n", \
12638
			  current_config->name, \
12639
			  pipe_config->name); \
6084 serge 12640
		ret = false; \
4104 Serge 12641
	}
12642
 
6084 serge 12643
#define PIPE_CONF_CHECK_M_N(name) \
12644
	if (!intel_compare_link_m_n(¤t_config->name, \
12645
				    &pipe_config->name,\
12646
				    adjust)) { \
12647
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12648
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12649
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12650
			  current_config->name.tu, \
12651
			  current_config->name.gmch_m, \
12652
			  current_config->name.gmch_n, \
12653
			  current_config->name.link_m, \
12654
			  current_config->name.link_n, \
12655
			  pipe_config->name.tu, \
12656
			  pipe_config->name.gmch_m, \
12657
			  pipe_config->name.gmch_n, \
12658
			  pipe_config->name.link_m, \
12659
			  pipe_config->name.link_n); \
12660
		ret = false; \
12661
	}
12662
 
12663
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12664
	if (!intel_compare_link_m_n(¤t_config->name, \
12665
				    &pipe_config->name, adjust) && \
12666
	    !intel_compare_link_m_n(¤t_config->alt_name, \
12667
				    &pipe_config->name, adjust)) { \
12668
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12669
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12670
			  "or tu %i gmch %i/%i link %i/%i, " \
12671
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12672
			  current_config->name.tu, \
12673
			  current_config->name.gmch_m, \
12674
			  current_config->name.gmch_n, \
12675
			  current_config->name.link_m, \
12676
			  current_config->name.link_n, \
12677
			  current_config->alt_name.tu, \
12678
			  current_config->alt_name.gmch_m, \
12679
			  current_config->alt_name.gmch_n, \
12680
			  current_config->alt_name.link_m, \
12681
			  current_config->alt_name.link_n, \
12682
			  pipe_config->name.tu, \
12683
			  pipe_config->name.gmch_m, \
12684
			  pipe_config->name.gmch_n, \
12685
			  pipe_config->name.link_m, \
12686
			  pipe_config->name.link_n); \
12687
		ret = false; \
12688
	}
12689
 
5354 serge 12690
/* This is required for BDW+ where there is only one set of registers for
12691
 * switching between high and low RR.
12692
 * This macro can be used whenever a comparison has to be made between one
12693
 * hw state and multiple sw state variables.
12694
 */
12695
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12696
	if ((current_config->name != pipe_config->name) && \
12697
		(current_config->alt_name != pipe_config->name)) { \
6084 serge 12698
			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5354 serge 12699
				  "(expected %i or %i, found %i)\n", \
12700
				  current_config->name, \
12701
				  current_config->alt_name, \
12702
				  pipe_config->name); \
6084 serge 12703
			ret = false; \
5354 serge 12704
	}
12705
 
4104 Serge 12706
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12707
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
6084 serge 12708
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
4104 Serge 12709
			  "(expected %i, found %i)\n", \
12710
			  current_config->name & (mask), \
12711
			  pipe_config->name & (mask)); \
6084 serge 12712
		ret = false; \
4104 Serge 12713
	}
12714
 
4560 Serge 12715
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12716
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6084 serge 12717
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4560 Serge 12718
			  "(expected %i, found %i)\n", \
12719
			  current_config->name, \
12720
			  pipe_config->name); \
6084 serge 12721
		ret = false; \
4560 Serge 12722
	}
12723
 
4104 Serge 12724
#define PIPE_CONF_QUIRK(quirk)	\
12725
	((current_config->quirks | pipe_config->quirks) & (quirk))
12726
 
12727
	PIPE_CONF_CHECK_I(cpu_transcoder);
12728
 
12729
	PIPE_CONF_CHECK_I(has_pch_encoder);
12730
	PIPE_CONF_CHECK_I(fdi_lanes);
6084 serge 12731
	PIPE_CONF_CHECK_M_N(fdi_m_n);
4104 Serge 12732
 
4560 Serge 12733
	PIPE_CONF_CHECK_I(has_dp_encoder);
6084 serge 12734
	PIPE_CONF_CHECK_I(lane_count);
5354 serge 12735
 
12736
	if (INTEL_INFO(dev)->gen < 8) {
6084 serge 12737
		PIPE_CONF_CHECK_M_N(dp_m_n);
4560 Serge 12738
 
6084 serge 12739
		if (current_config->has_drrs)
12740
			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12741
	} else
12742
		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
5354 serge 12743
 
6937 serge 12744
	PIPE_CONF_CHECK_I(has_dsi_encoder);
12745
 
6084 serge 12746
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12747
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12748
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12749
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12750
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12751
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
4104 Serge 12752
 
6084 serge 12753
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12754
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12755
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12756
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12757
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12758
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
4104 Serge 12759
 
6084 serge 12760
	PIPE_CONF_CHECK_I(pixel_multiplier);
5060 serge 12761
	PIPE_CONF_CHECK_I(has_hdmi_sink);
12762
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
6937 serge 12763
	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5060 serge 12764
		PIPE_CONF_CHECK_I(limited_color_range);
5354 serge 12765
	PIPE_CONF_CHECK_I(has_infoframe);
4104 Serge 12766
 
5060 serge 12767
	PIPE_CONF_CHECK_I(has_audio);
12768
 
6084 serge 12769
	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12770
			      DRM_MODE_FLAG_INTERLACE);
12771
 
12772
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6084 serge 12773
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12774
				      DRM_MODE_FLAG_PHSYNC);
6084 serge 12775
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12776
				      DRM_MODE_FLAG_NHSYNC);
6084 serge 12777
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12778
				      DRM_MODE_FLAG_PVSYNC);
6084 serge 12779
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12780
				      DRM_MODE_FLAG_NVSYNC);
12781
	}
12782
 
6084 serge 12783
	PIPE_CONF_CHECK_X(gmch_pfit.control);
4104 Serge 12784
	/* pfit ratios are autocomputed by the hw on gen4+ */
12785
	if (INTEL_INFO(dev)->gen < 4)
12786
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
6084 serge 12787
	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5060 serge 12788
 
6084 serge 12789
	if (!adjust) {
12790
		PIPE_CONF_CHECK_I(pipe_src_w);
12791
		PIPE_CONF_CHECK_I(pipe_src_h);
12792
 
12793
		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12794
		if (current_config->pch_pfit.enabled) {
12795
			PIPE_CONF_CHECK_X(pch_pfit.pos);
12796
			PIPE_CONF_CHECK_X(pch_pfit.size);
12797
		}
12798
 
12799
		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
4104 Serge 12800
	}
12801
 
4560 Serge 12802
	/* BDW+ don't expose a synchronous way to read the state */
12803
	if (IS_HASWELL(dev))
6084 serge 12804
		PIPE_CONF_CHECK_I(ips_enabled);
4104 Serge 12805
 
4560 Serge 12806
	PIPE_CONF_CHECK_I(double_wide);
12807
 
5060 serge 12808
	PIPE_CONF_CHECK_X(ddi_pll_sel);
12809
 
4104 Serge 12810
	PIPE_CONF_CHECK_I(shared_dpll);
12811
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12812
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12813
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12814
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5060 serge 12815
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6084 serge 12816
	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5354 serge 12817
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12818
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12819
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
4104 Serge 12820
 
4280 Serge 12821
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12822
		PIPE_CONF_CHECK_I(pipe_bpp);
12823
 
6084 serge 12824
	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12825
	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
4560 Serge 12826
 
4104 Serge 12827
#undef PIPE_CONF_CHECK_X
12828
#undef PIPE_CONF_CHECK_I
5354 serge 12829
#undef PIPE_CONF_CHECK_I_ALT
4104 Serge 12830
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 12831
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 12832
#undef PIPE_CONF_QUIRK
6084 serge 12833
#undef INTEL_ERR_OR_DBG_KMS
4104 Serge 12834
 
6084 serge 12835
	return ret;
3746 Serge 12836
}
12837
 
5354 serge 12838
static void check_wm_state(struct drm_device *dev)
12839
{
12840
	struct drm_i915_private *dev_priv = dev->dev_private;
12841
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12842
	struct intel_crtc *intel_crtc;
12843
	int plane;
12844
 
12845
	if (INTEL_INFO(dev)->gen < 9)
12846
		return;
12847
 
12848
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12849
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12850
 
12851
	for_each_intel_crtc(dev, intel_crtc) {
12852
		struct skl_ddb_entry *hw_entry, *sw_entry;
12853
		const enum pipe pipe = intel_crtc->pipe;
12854
 
12855
		if (!intel_crtc->active)
12856
			continue;
12857
 
12858
		/* planes */
6084 serge 12859
		for_each_plane(dev_priv, pipe, plane) {
5354 serge 12860
			hw_entry = &hw_ddb.plane[pipe][plane];
12861
			sw_entry = &sw_ddb->plane[pipe][plane];
12862
 
12863
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
12864
				continue;
12865
 
12866
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12867
				  "(expected (%u,%u), found (%u,%u))\n",
12868
				  pipe_name(pipe), plane + 1,
12869
				  sw_entry->start, sw_entry->end,
12870
				  hw_entry->start, hw_entry->end);
12871
		}
12872
 
12873
		/* cursor */
6084 serge 12874
		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12875
		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
5354 serge 12876
 
12877
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12878
			continue;
12879
 
12880
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12881
			  "(expected (%u,%u), found (%u,%u))\n",
12882
			  pipe_name(pipe),
12883
			  sw_entry->start, sw_entry->end,
12884
			  hw_entry->start, hw_entry->end);
12885
	}
12886
}
12887
 
4104 Serge 12888
static void
6084 serge 12889
check_connector_state(struct drm_device *dev,
12890
		      struct drm_atomic_state *old_state)
3031 serge 12891
{
6084 serge 12892
	struct drm_connector_state *old_conn_state;
12893
	struct drm_connector *connector;
12894
	int i;
3031 serge 12895
 
6084 serge 12896
	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12897
		struct drm_encoder *encoder = connector->encoder;
12898
		struct drm_connector_state *state = connector->state;
12899
 
3031 serge 12900
		/* This also checks the encoder/connector hw state with the
12901
		 * ->get_hw_state callbacks. */
6084 serge 12902
		intel_connector_check_state(to_intel_connector(connector));
3031 serge 12903
 
6084 serge 12904
		I915_STATE_WARN(state->best_encoder != encoder,
12905
		     "connector's atomic encoder doesn't match legacy encoder\n");
3031 serge 12906
	}
4104 Serge 12907
}
3031 serge 12908
 
4104 Serge 12909
static void
12910
check_encoder_state(struct drm_device *dev)
12911
{
12912
	struct intel_encoder *encoder;
12913
	struct intel_connector *connector;
12914
 
5354 serge 12915
	for_each_intel_encoder(dev, encoder) {
3031 serge 12916
		bool enabled = false;
6084 serge 12917
		enum pipe pipe;
3031 serge 12918
 
12919
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12920
			      encoder->base.base.id,
5060 serge 12921
			      encoder->base.name);
3031 serge 12922
 
6084 serge 12923
		for_each_intel_connector(dev, connector) {
12924
			if (connector->base.state->best_encoder != &encoder->base)
3031 serge 12925
				continue;
12926
			enabled = true;
6084 serge 12927
 
12928
			I915_STATE_WARN(connector->base.state->crtc !=
12929
					encoder->base.crtc,
12930
			     "connector's crtc doesn't match encoder crtc\n");
3031 serge 12931
		}
5060 serge 12932
 
6084 serge 12933
		I915_STATE_WARN(!!encoder->base.crtc != enabled,
3031 serge 12934
		     "encoder's enabled state mismatch "
12935
		     "(expected %i, found %i)\n",
12936
		     !!encoder->base.crtc, enabled);
12937
 
6084 serge 12938
		if (!encoder->base.crtc) {
12939
			bool active;
3031 serge 12940
 
6084 serge 12941
			active = encoder->get_hw_state(encoder, &pipe);
12942
			I915_STATE_WARN(active,
12943
			     "encoder detached but still enabled on pipe %c.\n",
12944
			     pipe_name(pipe));
12945
		}
3031 serge 12946
	}
4104 Serge 12947
}
3031 serge 12948
 
4104 Serge 12949
static void
6084 serge 12950
check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
4104 Serge 12951
{
5060 serge 12952
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 12953
	struct intel_encoder *encoder;
6084 serge 12954
	struct drm_crtc_state *old_crtc_state;
12955
	struct drm_crtc *crtc;
12956
	int i;
4104 Serge 12957
 
6084 serge 12958
	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12959
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12960
		struct intel_crtc_state *pipe_config, *sw_config;
12961
		bool active;
3031 serge 12962
 
6084 serge 12963
		if (!needs_modeset(crtc->state) &&
12964
		    !to_intel_crtc_state(crtc->state)->update_pipe)
12965
			continue;
4104 Serge 12966
 
6084 serge 12967
		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12968
		pipe_config = to_intel_crtc_state(old_crtc_state);
12969
		memset(pipe_config, 0, sizeof(*pipe_config));
12970
		pipe_config->base.crtc = crtc;
12971
		pipe_config->base.state = old_state;
12972
 
3031 serge 12973
		DRM_DEBUG_KMS("[CRTC:%d]\n",
6084 serge 12974
			      crtc->base.id);
3031 serge 12975
 
6084 serge 12976
		active = dev_priv->display.get_pipe_config(intel_crtc,
12977
							   pipe_config);
3031 serge 12978
 
6084 serge 12979
		/* hw state is inconsistent with the pipe quirk */
12980
		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12981
		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12982
			active = crtc->state->active;
4104 Serge 12983
 
6084 serge 12984
		I915_STATE_WARN(crtc->state->active != active,
12985
		     "crtc active state doesn't match with hw state "
12986
		     "(expected %i, found %i)\n", crtc->state->active, active);
3031 serge 12987
 
6084 serge 12988
		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12989
		     "transitional active state does not match atomic hw state "
12990
		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
3746 Serge 12991
 
6084 serge 12992
		for_each_encoder_on_crtc(dev, crtc, encoder) {
12993
			enum pipe pipe;
3746 Serge 12994
 
6084 serge 12995
			active = encoder->get_hw_state(encoder, &pipe);
12996
			I915_STATE_WARN(active != crtc->state->active,
12997
				"[ENCODER:%i] active %i with crtc active %i\n",
12998
				encoder->base.base.id, active, crtc->state->active);
12999
 
13000
			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13001
					"Encoder connected to wrong pipe %c\n",
13002
					pipe_name(pipe));
13003
 
13004
			if (active)
13005
				encoder->get_config(encoder, pipe_config);
4104 Serge 13006
		}
13007
 
6084 serge 13008
		if (!crtc->state->active)
13009
			continue;
3746 Serge 13010
 
6084 serge 13011
		sw_config = to_intel_crtc_state(crtc->state);
13012
		if (!intel_pipe_config_compare(dev, sw_config,
13013
					       pipe_config, false)) {
13014
			I915_STATE_WARN(1, "pipe state doesn't match!\n");
13015
			intel_dump_pipe_config(intel_crtc, pipe_config,
4104 Serge 13016
					       "[hw state]");
6084 serge 13017
			intel_dump_pipe_config(intel_crtc, sw_config,
4104 Serge 13018
					       "[sw state]");
13019
		}
3031 serge 13020
	}
13021
}
13022
 
4104 Serge 13023
static void
13024
check_shared_dpll_state(struct drm_device *dev)
13025
{
5060 serge 13026
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 13027
	struct intel_crtc *crtc;
13028
	struct intel_dpll_hw_state dpll_hw_state;
13029
	int i;
13030
 
13031
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13032
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13033
		int enabled_crtcs = 0, active_crtcs = 0;
13034
		bool active;
13035
 
13036
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13037
 
13038
		DRM_DEBUG_KMS("%s\n", pll->name);
13039
 
13040
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
13041
 
6084 serge 13042
		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
4104 Serge 13043
		     "more active pll users than references: %i vs %i\n",
5354 serge 13044
		     pll->active, hweight32(pll->config.crtc_mask));
6084 serge 13045
		I915_STATE_WARN(pll->active && !pll->on,
4104 Serge 13046
		     "pll in active use but not on in sw tracking\n");
6084 serge 13047
		I915_STATE_WARN(pll->on && !pll->active,
4104 Serge 13048
		     "pll in on but not on in use in sw tracking\n");
6084 serge 13049
		I915_STATE_WARN(pll->on != active,
4104 Serge 13050
		     "pll on state mismatch (expected %i, found %i)\n",
13051
		     pll->on, active);
13052
 
5060 serge 13053
		for_each_intel_crtc(dev, crtc) {
6084 serge 13054
			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
4104 Serge 13055
				enabled_crtcs++;
13056
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
13057
				active_crtcs++;
13058
		}
6084 serge 13059
		I915_STATE_WARN(pll->active != active_crtcs,
4104 Serge 13060
		     "pll active crtcs mismatch (expected %i, found %i)\n",
13061
		     pll->active, active_crtcs);
6084 serge 13062
		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
4104 Serge 13063
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
5354 serge 13064
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
4104 Serge 13065
 
6084 serge 13066
		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
4104 Serge 13067
				       sizeof(dpll_hw_state)),
13068
		     "pll hw state mismatch\n");
13069
	}
13070
}
13071
 
6084 serge 13072
static void
13073
intel_modeset_check_state(struct drm_device *dev,
13074
			  struct drm_atomic_state *old_state)
4104 Serge 13075
{
5354 serge 13076
	check_wm_state(dev);
6084 serge 13077
	check_connector_state(dev, old_state);
4104 Serge 13078
	check_encoder_state(dev);
6084 serge 13079
	check_crtc_state(dev, old_state);
4104 Serge 13080
	check_shared_dpll_state(dev);
13081
}
13082
 
6084 serge 13083
void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
4560 Serge 13084
				     int dotclock)
13085
{
13086
	/*
13087
	 * FDI already provided one idea for the dotclock.
13088
	 * Yell if the encoder disagrees.
13089
	 */
6084 serge 13090
	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
4560 Serge 13091
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6084 serge 13092
	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
4560 Serge 13093
}
13094
 
5060 serge 13095
static void update_scanline_offset(struct intel_crtc *crtc)
13096
{
13097
	struct drm_device *dev = crtc->base.dev;
13098
 
13099
	/*
13100
	 * The scanline counter increments at the leading edge of hsync.
13101
	 *
13102
	 * On most platforms it starts counting from vtotal-1 on the
13103
	 * first active line. That means the scanline counter value is
13104
	 * always one less than what we would expect. Ie. just after
13105
	 * start of vblank, which also occurs at start of hsync (on the
13106
	 * last active line), the scanline counter will read vblank_start-1.
13107
	 *
13108
	 * On gen2 the scanline counter starts counting from 1 instead
13109
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13110
	 * to keep the value positive), instead of adding one.
13111
	 *
13112
	 * On HSW+ the behaviour of the scanline counter depends on the output
13113
	 * type. For DP ports it behaves like most other platforms, but on HDMI
13114
	 * there's an extra 1 line difference. So we need to add two instead of
13115
	 * one to the value.
13116
	 */
13117
	if (IS_GEN2(dev)) {
6084 serge 13118
		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
5060 serge 13119
		int vtotal;
13120
 
6084 serge 13121
		vtotal = adjusted_mode->crtc_vtotal;
13122
		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5060 serge 13123
			vtotal /= 2;
13124
 
13125
		crtc->scanline_offset = vtotal - 1;
13126
	} else if (HAS_DDI(dev) &&
5354 serge 13127
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
5060 serge 13128
		crtc->scanline_offset = 2;
13129
	} else
13130
		crtc->scanline_offset = 1;
13131
}
13132
 
6084 serge 13133
static void intel_modeset_clear_plls(struct drm_atomic_state *state)
5354 serge 13134
{
6084 serge 13135
	struct drm_device *dev = state->dev;
13136
	struct drm_i915_private *dev_priv = to_i915(dev);
13137
	struct intel_shared_dpll_config *shared_dpll = NULL;
13138
	struct intel_crtc *intel_crtc;
13139
	struct intel_crtc_state *intel_crtc_state;
13140
	struct drm_crtc *crtc;
13141
	struct drm_crtc_state *crtc_state;
13142
	int i;
5354 serge 13143
 
6084 serge 13144
	if (!dev_priv->display.crtc_compute_clock)
13145
		return;
5354 serge 13146
 
6084 serge 13147
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13148
		int dpll;
5354 serge 13149
 
6084 serge 13150
		intel_crtc = to_intel_crtc(crtc);
13151
		intel_crtc_state = to_intel_crtc_state(crtc_state);
13152
		dpll = intel_crtc_state->shared_dpll;
13153
 
13154
		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
13155
			continue;
13156
 
13157
		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
13158
 
13159
		if (!shared_dpll)
13160
			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13161
 
13162
		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
5354 serge 13163
	}
13164
}
13165
 
6084 serge 13166
/*
13167
 * This implements the workaround described in the "notes" section of the mode
13168
 * set sequence documentation. When going from no pipes or single pipe to
13169
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13170
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13171
 */
13172
static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
3031 serge 13173
{
6084 serge 13174
	struct drm_crtc_state *crtc_state;
3031 serge 13175
	struct intel_crtc *intel_crtc;
6084 serge 13176
	struct drm_crtc *crtc;
13177
	struct intel_crtc_state *first_crtc_state = NULL;
13178
	struct intel_crtc_state *other_crtc_state = NULL;
13179
	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13180
	int i;
3031 serge 13181
 
6084 serge 13182
	/* look at all crtc's that are going to be enabled in during modeset */
13183
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13184
		intel_crtc = to_intel_crtc(crtc);
3480 Serge 13185
 
6084 serge 13186
		if (!crtc_state->active || !needs_modeset(crtc_state))
13187
			continue;
3031 serge 13188
 
6084 serge 13189
		if (first_crtc_state) {
13190
			other_crtc_state = to_intel_crtc_state(crtc_state);
13191
			break;
13192
		} else {
13193
			first_crtc_state = to_intel_crtc_state(crtc_state);
13194
			first_pipe = intel_crtc->pipe;
13195
		}
13196
	}
3031 serge 13197
 
6084 serge 13198
	/* No workaround needed? */
13199
	if (!first_crtc_state)
13200
		return 0;
4560 Serge 13201
 
6084 serge 13202
	/* w/a possibly needed, check how many crtc's are already enabled. */
13203
	for_each_intel_crtc(state->dev, intel_crtc) {
13204
		struct intel_crtc_state *pipe_config;
4560 Serge 13205
 
6084 serge 13206
		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13207
		if (IS_ERR(pipe_config))
13208
			return PTR_ERR(pipe_config);
5354 serge 13209
 
6084 serge 13210
		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
5354 serge 13211
 
6084 serge 13212
		if (!pipe_config->base.active ||
13213
		    needs_modeset(&pipe_config->base))
13214
			continue;
5354 serge 13215
 
6084 serge 13216
		/* 2 or more enabled crtcs means no need for w/a */
13217
		if (enabled_pipe != INVALID_PIPE)
13218
			return 0;
3746 Serge 13219
 
6084 serge 13220
		enabled_pipe = intel_crtc->pipe;
3031 serge 13221
	}
13222
 
6084 serge 13223
	if (enabled_pipe != INVALID_PIPE)
13224
		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13225
	else if (other_crtc_state)
13226
		other_crtc_state->hsw_workaround_pipe = first_pipe;
4560 Serge 13227
 
6084 serge 13228
	return 0;
13229
}
2327 Serge 13230
 
6084 serge 13231
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13232
{
13233
	struct drm_crtc *crtc;
13234
	struct drm_crtc_state *crtc_state;
13235
	int ret = 0;
3031 serge 13236
 
6084 serge 13237
	/* add all active pipes to the state */
13238
	for_each_crtc(state->dev, crtc) {
13239
		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13240
		if (IS_ERR(crtc_state))
13241
			return PTR_ERR(crtc_state);
3243 Serge 13242
 
6084 serge 13243
		if (!crtc_state->active || needs_modeset(crtc_state))
13244
			continue;
5060 serge 13245
 
6084 serge 13246
		crtc_state->mode_changed = true;
5060 serge 13247
 
6084 serge 13248
		ret = drm_atomic_add_affected_connectors(state, crtc);
13249
		if (ret)
13250
			break;
3031 serge 13251
 
6084 serge 13252
		ret = drm_atomic_add_affected_planes(state, crtc);
13253
		if (ret)
13254
			break;
5060 serge 13255
	}
3031 serge 13256
 
13257
	return ret;
2330 Serge 13258
}
2327 Serge 13259
 
6084 serge 13260
static int intel_modeset_checks(struct drm_atomic_state *state)
3746 Serge 13261
{
6084 serge 13262
	struct drm_device *dev = state->dev;
13263
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 13264
	int ret;
13265
 
6084 serge 13266
	if (!check_digital_port_conflicts(state)) {
13267
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13268
		return -EINVAL;
13269
	}
3746 Serge 13270
 
6084 serge 13271
	/*
13272
	 * See if the config requires any additional preparation, e.g.
13273
	 * to adjust global state with pipes off.  We need to do this
13274
	 * here so we can get the modeset_pipe updated config for the new
13275
	 * mode set on this crtc.  For other crtcs we need to use the
13276
	 * adjusted_mode bits in the crtc directly.
13277
	 */
13278
	if (dev_priv->display.modeset_calc_cdclk) {
13279
		unsigned int cdclk;
3746 Serge 13280
 
6084 serge 13281
		ret = dev_priv->display.modeset_calc_cdclk(state);
3746 Serge 13282
 
6084 serge 13283
		cdclk = to_intel_atomic_state(state)->cdclk;
13284
		if (!ret && cdclk != dev_priv->cdclk_freq)
13285
			ret = intel_modeset_all_pipes(state);
5354 serge 13286
 
6084 serge 13287
		if (ret < 0)
13288
			return ret;
13289
	} else
13290
		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
5354 serge 13291
 
6084 serge 13292
	intel_modeset_clear_plls(state);
5354 serge 13293
 
6084 serge 13294
	if (IS_HASWELL(dev))
13295
		return haswell_mode_set_planes_workaround(state);
5354 serge 13296
 
6084 serge 13297
	return 0;
3480 Serge 13298
}
13299
 
6937 serge 13300
/*
13301
 * Handle calculation of various watermark data at the end of the atomic check
13302
 * phase.  The code here should be run after the per-crtc and per-plane 'check'
13303
 * handlers to ensure that all derived state has been updated.
13304
 */
13305
static void calc_watermark_data(struct drm_atomic_state *state)
13306
{
13307
	struct drm_device *dev = state->dev;
13308
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13309
	struct drm_crtc *crtc;
13310
	struct drm_crtc_state *cstate;
13311
	struct drm_plane *plane;
13312
	struct drm_plane_state *pstate;
13313
 
13314
	/*
13315
	 * Calculate watermark configuration details now that derived
13316
	 * plane/crtc state is all properly updated.
13317
	 */
13318
	drm_for_each_crtc(crtc, dev) {
13319
		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13320
			crtc->state;
13321
 
13322
		if (cstate->active)
13323
			intel_state->wm_config.num_pipes_active++;
13324
	}
13325
	drm_for_each_legacy_plane(plane, dev) {
13326
		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13327
			plane->state;
13328
 
13329
		if (!to_intel_plane_state(pstate)->visible)
13330
			continue;
13331
 
13332
		intel_state->wm_config.sprites_enabled = true;
13333
		if (pstate->crtc_w != pstate->src_w >> 16 ||
13334
		    pstate->crtc_h != pstate->src_h >> 16)
13335
			intel_state->wm_config.sprites_scaled = true;
13336
	}
13337
}
13338
 
6084 serge 13339
/**
13340
 * intel_atomic_check - validate state object
13341
 * @dev: drm device
13342
 * @state: state to validate
13343
 */
13344
static int intel_atomic_check(struct drm_device *dev,
13345
			      struct drm_atomic_state *state)
3031 serge 13346
{
6937 serge 13347
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6084 serge 13348
	struct drm_crtc *crtc;
13349
	struct drm_crtc_state *crtc_state;
13350
	int ret, i;
13351
	bool any_ms = false;
3031 serge 13352
 
6084 serge 13353
	ret = drm_atomic_helper_check_modeset(dev, state);
13354
	if (ret)
13355
		return ret;
3031 serge 13356
 
6084 serge 13357
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13358
		struct intel_crtc_state *pipe_config =
13359
			to_intel_crtc_state(crtc_state);
3031 serge 13360
 
6084 serge 13361
		memset(&to_intel_crtc(crtc)->atomic, 0,
13362
		       sizeof(struct intel_crtc_atomic_commit));
5060 serge 13363
 
6084 serge 13364
		/* Catch I915_MODE_FLAG_INHERITED */
13365
		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13366
			crtc_state->mode_changed = true;
3031 serge 13367
 
6084 serge 13368
		if (!crtc_state->enable) {
13369
			if (needs_modeset(crtc_state))
13370
				any_ms = true;
13371
			continue;
13372
		}
3031 serge 13373
 
6084 serge 13374
		if (!needs_modeset(crtc_state))
13375
			continue;
5060 serge 13376
 
6084 serge 13377
		/* FIXME: For only active_changed we shouldn't need to do any
13378
		 * state recomputation at all. */
3031 serge 13379
 
6084 serge 13380
		ret = drm_atomic_add_affected_connectors(state, crtc);
13381
		if (ret)
13382
			return ret;
3031 serge 13383
 
6084 serge 13384
		ret = intel_modeset_pipe_config(crtc, pipe_config);
13385
		if (ret)
13386
			return ret;
3031 serge 13387
 
6084 serge 13388
		if (i915.fastboot &&
13389
		    intel_pipe_config_compare(state->dev,
13390
					to_intel_crtc_state(crtc->state),
13391
					pipe_config, true)) {
13392
			crtc_state->mode_changed = false;
13393
			to_intel_crtc_state(crtc_state)->update_pipe = true;
13394
		}
3031 serge 13395
 
6084 serge 13396
		if (needs_modeset(crtc_state)) {
13397
			any_ms = true;
5060 serge 13398
 
6084 serge 13399
			ret = drm_atomic_add_affected_planes(state, crtc);
13400
			if (ret)
13401
				return ret;
13402
		}
5060 serge 13403
 
6084 serge 13404
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13405
				       needs_modeset(crtc_state) ?
13406
				       "[modeset]" : "[fastset]");
3031 serge 13407
	}
13408
 
6084 serge 13409
	if (any_ms) {
13410
		ret = intel_modeset_checks(state);
3031 serge 13411
 
6084 serge 13412
		if (ret)
13413
			return ret;
13414
	} else
6937 serge 13415
		intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
3746 Serge 13416
 
6937 serge 13417
	ret = drm_atomic_helper_check_planes(state->dev, state);
13418
	if (ret)
13419
		return ret;
13420
 
13421
	calc_watermark_data(state);
13422
 
13423
	return 0;
3746 Serge 13424
}
13425
 
6937 serge 13426
static int intel_atomic_prepare_commit(struct drm_device *dev,
13427
				       struct drm_atomic_state *state,
13428
				       bool async)
13429
{
13430
	struct drm_i915_private *dev_priv = dev->dev_private;
13431
	struct drm_plane_state *plane_state;
13432
	struct drm_crtc_state *crtc_state;
13433
	struct drm_plane *plane;
13434
	struct drm_crtc *crtc;
13435
	int i, ret;
13436
 
13437
	if (async) {
13438
		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13439
		return -EINVAL;
13440
	}
13441
 
13442
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13443
		if (state->legacy_cursor_update)
13444
			continue;
13445
 
13446
		ret = intel_crtc_wait_for_pending_flips(crtc);
13447
		if (ret)
13448
			return ret;
13449
 
13450
//		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13451
//			flush_workqueue(dev_priv->wq);
13452
	}
13453
 
13454
	ret = mutex_lock_interruptible(&dev->struct_mutex);
13455
	if (ret)
13456
		return ret;
13457
 
13458
	ret = drm_atomic_helper_prepare_planes(dev, state);
13459
	if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13460
		u32 reset_counter;
13461
 
13462
		reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13463
		mutex_unlock(&dev->struct_mutex);
13464
 
13465
		for_each_plane_in_state(state, plane, plane_state, i) {
13466
			struct intel_plane_state *intel_plane_state =
13467
				to_intel_plane_state(plane_state);
13468
 
13469
			if (!intel_plane_state->wait_req)
13470
				continue;
13471
 
13472
			ret = __i915_wait_request(intel_plane_state->wait_req,
13473
						  reset_counter, true,
13474
						  NULL, NULL);
13475
 
13476
			/* Swallow -EIO errors to allow updates during hw lockup. */
13477
			if (ret == -EIO)
13478
				ret = 0;
13479
 
13480
			if (ret)
13481
				break;
13482
		}
13483
 
13484
		if (!ret)
13485
			return 0;
13486
 
13487
		mutex_lock(&dev->struct_mutex);
13488
		drm_atomic_helper_cleanup_planes(dev, state);
13489
	}
13490
 
13491
	mutex_unlock(&dev->struct_mutex);
13492
	return ret;
13493
}
13494
 
6084 serge 13495
/**
13496
 * intel_atomic_commit - commit validated state object
13497
 * @dev: DRM device
13498
 * @state: the top-level driver state object
13499
 * @async: asynchronous commit
13500
 *
13501
 * This function commits a top-level state object that has been validated
13502
 * with drm_atomic_helper_check().
13503
 *
13504
 * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13505
 * we can only handle plane-related operations and do not yet support
13506
 * asynchronous commit.
13507
 *
13508
 * RETURNS
13509
 * Zero for success or -errno.
13510
 */
13511
static int intel_atomic_commit(struct drm_device *dev,
13512
			       struct drm_atomic_state *state,
13513
			       bool async)
3031 serge 13514
{
6084 serge 13515
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 13516
	struct drm_crtc_state *crtc_state;
6084 serge 13517
	struct drm_crtc *crtc;
13518
	int ret = 0;
13519
	int i;
13520
	bool any_ms = false;
3031 serge 13521
 
6937 serge 13522
	ret = intel_atomic_prepare_commit(dev, state, async);
13523
	if (ret) {
13524
		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13525
		return ret;
3031 serge 13526
	}
13527
 
6084 serge 13528
	drm_atomic_helper_swap_state(dev, state);
6937 serge 13529
	dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
4104 Serge 13530
 
6084 serge 13531
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13532
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 13533
 
6084 serge 13534
		if (!needs_modeset(crtc->state))
13535
			continue;
3031 serge 13536
 
6084 serge 13537
		any_ms = true;
13538
		intel_pre_plane_update(intel_crtc);
3031 serge 13539
 
6084 serge 13540
		if (crtc_state->active) {
13541
			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13542
			dev_priv->display.crtc_disable(crtc);
13543
			intel_crtc->active = false;
13544
			intel_disable_shared_dpll(intel_crtc);
6937 serge 13545
 
13546
			/*
13547
			 * Underruns don't always raise
13548
			 * interrupts, so check manually.
13549
			 */
13550
			intel_check_cpu_fifo_underruns(dev_priv);
13551
			intel_check_pch_fifo_underruns(dev_priv);
13552
 
13553
			if (!crtc->state->active)
13554
				intel_update_watermarks(crtc);
3031 serge 13555
		}
6084 serge 13556
	}
3031 serge 13557
 
6084 serge 13558
	/* Only after disabling all output pipelines that will be changed can we
13559
	 * update the the output configuration. */
13560
	intel_modeset_update_crtc_state(state);
3031 serge 13561
 
6084 serge 13562
	if (any_ms) {
13563
		intel_shared_dpll_commit(state);
3031 serge 13564
 
6084 serge 13565
		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13566
		modeset_update_crtc_power_domains(state);
3031 serge 13567
	}
13568
 
6084 serge 13569
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13570
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13571
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13572
		bool modeset = needs_modeset(crtc->state);
13573
		bool update_pipe = !modeset &&
13574
			to_intel_crtc_state(crtc->state)->update_pipe;
13575
		unsigned long put_domains = 0;
5060 serge 13576
 
6937 serge 13577
		if (modeset)
13578
			intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13579
 
6084 serge 13580
		if (modeset && crtc->state->active) {
13581
			update_scanline_offset(to_intel_crtc(crtc));
13582
			dev_priv->display.crtc_enable(crtc);
3031 serge 13583
		}
13584
 
6084 serge 13585
		if (update_pipe) {
13586
			put_domains = modeset_get_crtc_power_domains(crtc);
3031 serge 13587
 
6084 serge 13588
			/* make sure intel_modeset_check_state runs */
13589
			any_ms = true;
3031 serge 13590
		}
4560 Serge 13591
 
6084 serge 13592
		if (!modeset)
13593
			intel_pre_plane_update(intel_crtc);
4560 Serge 13594
 
6937 serge 13595
		if (crtc->state->active &&
13596
		    (crtc->state->planes_changed || update_pipe))
6084 serge 13597
		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
3031 serge 13598
 
6084 serge 13599
		if (put_domains)
13600
			modeset_put_power_domains(dev_priv, put_domains);
5060 serge 13601
 
6084 serge 13602
		intel_post_plane_update(intel_crtc);
6937 serge 13603
 
13604
		if (modeset)
13605
			intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
5060 serge 13606
	}
13607
 
6084 serge 13608
	/* FIXME: add subpixel order */
3031 serge 13609
 
6088 serge 13610
	drm_atomic_helper_wait_for_vblanks(dev, state);
6937 serge 13611
 
13612
	mutex_lock(&dev->struct_mutex);
6084 serge 13613
	drm_atomic_helper_cleanup_planes(dev, state);
6937 serge 13614
	mutex_unlock(&dev->struct_mutex);
5060 serge 13615
 
6084 serge 13616
	if (any_ms)
13617
		intel_modeset_check_state(dev, state);
5060 serge 13618
 
6084 serge 13619
	drm_atomic_state_free(state);
5060 serge 13620
 
6084 serge 13621
	return 0;
5060 serge 13622
}
13623
 
6084 serge 13624
void intel_crtc_restore_mode(struct drm_crtc *crtc)
3031 serge 13625
{
6084 serge 13626
	struct drm_device *dev = crtc->dev;
13627
	struct drm_atomic_state *state;
13628
	struct drm_crtc_state *crtc_state;
3031 serge 13629
	int ret;
13630
 
6084 serge 13631
	state = drm_atomic_state_alloc(dev);
13632
	if (!state) {
13633
		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13634
			      crtc->base.id);
13635
		return;
3031 serge 13636
	}
13637
 
6084 serge 13638
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
3031 serge 13639
 
6084 serge 13640
retry:
13641
	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13642
	ret = PTR_ERR_OR_ZERO(crtc_state);
13643
	if (!ret) {
13644
		if (!crtc_state->active)
13645
			goto out;
3031 serge 13646
 
6084 serge 13647
		crtc_state->mode_changed = true;
13648
		ret = drm_atomic_commit(state);
5354 serge 13649
	}
13650
 
6084 serge 13651
	if (ret == -EDEADLK) {
13652
		drm_atomic_state_clear(state);
13653
		drm_modeset_backoff(state->acquire_ctx);
13654
		goto retry;
3031 serge 13655
	}
13656
 
6084 serge 13657
	if (ret)
13658
out:
13659
		drm_atomic_state_free(state);
13660
}
3031 serge 13661
 
6084 serge 13662
#undef for_each_intel_crtc_masked
5060 serge 13663
 
2330 Serge 13664
static const struct drm_crtc_funcs intel_crtc_funcs = {
13665
	.gamma_set = intel_crtc_gamma_set,
6084 serge 13666
	.set_config = drm_atomic_helper_set_config,
2330 Serge 13667
	.destroy = intel_crtc_destroy,
6320 serge 13668
	.page_flip = intel_crtc_page_flip,
6084 serge 13669
	.atomic_duplicate_state = intel_crtc_duplicate_state,
13670
	.atomic_destroy_state = intel_crtc_destroy_state,
2330 Serge 13671
};
2327 Serge 13672
 
4104 Serge 13673
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13674
				      struct intel_shared_dpll *pll,
13675
				      struct intel_dpll_hw_state *hw_state)
3031 serge 13676
{
4104 Serge 13677
	uint32_t val;
3031 serge 13678
 
6937 serge 13679
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
5060 serge 13680
		return false;
13681
 
4104 Serge 13682
	val = I915_READ(PCH_DPLL(pll->id));
13683
	hw_state->dpll = val;
13684
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13685
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13686
 
6937 serge 13687
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13688
 
4104 Serge 13689
	return val & DPLL_VCO_ENABLE;
13690
}
13691
 
13692
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13693
				  struct intel_shared_dpll *pll)
13694
{
5354 serge 13695
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13696
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
4104 Serge 13697
}
13698
 
13699
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13700
				struct intel_shared_dpll *pll)
13701
{
13702
	/* PCH refclock must be enabled first */
4560 Serge 13703
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 13704
 
5354 serge 13705
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 13706
 
13707
	/* Wait for the clocks to stabilize. */
13708
	POSTING_READ(PCH_DPLL(pll->id));
13709
	udelay(150);
13710
 
13711
	/* The pixel multiplier can only be updated once the
13712
	 * DPLL is enabled and the clocks are stable.
13713
	 *
13714
	 * So write it again.
13715
	 */
5354 serge 13716
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 13717
	POSTING_READ(PCH_DPLL(pll->id));
13718
	udelay(200);
13719
}
13720
 
13721
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13722
				 struct intel_shared_dpll *pll)
13723
{
13724
	struct drm_device *dev = dev_priv->dev;
13725
	struct intel_crtc *crtc;
13726
 
13727
	/* Make sure no transcoder isn't still depending on us. */
5060 serge 13728
	for_each_intel_crtc(dev, crtc) {
4104 Serge 13729
		if (intel_crtc_to_shared_dpll(crtc) == pll)
13730
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 13731
	}
13732
 
4104 Serge 13733
	I915_WRITE(PCH_DPLL(pll->id), 0);
13734
	POSTING_READ(PCH_DPLL(pll->id));
13735
	udelay(200);
13736
}
13737
 
13738
static char *ibx_pch_dpll_names[] = {
13739
	"PCH DPLL A",
13740
	"PCH DPLL B",
13741
};
13742
 
13743
static void ibx_pch_dpll_init(struct drm_device *dev)
13744
{
13745
	struct drm_i915_private *dev_priv = dev->dev_private;
13746
	int i;
13747
 
13748
	dev_priv->num_shared_dpll = 2;
13749
 
13750
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13751
		dev_priv->shared_dplls[i].id = i;
13752
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13753
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13754
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13755
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13756
		dev_priv->shared_dplls[i].get_hw_state =
13757
			ibx_pch_dpll_get_hw_state;
3031 serge 13758
	}
13759
}
13760
 
4104 Serge 13761
static void intel_shared_dpll_init(struct drm_device *dev)
13762
{
13763
	struct drm_i915_private *dev_priv = dev->dev_private;
13764
 
5060 serge 13765
	if (HAS_DDI(dev))
13766
		intel_ddi_pll_init(dev);
13767
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4104 Serge 13768
		ibx_pch_dpll_init(dev);
13769
	else
13770
		dev_priv->num_shared_dpll = 0;
13771
 
13772
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13773
}
13774
 
6084 serge 13775
/**
13776
 * intel_prepare_plane_fb - Prepare fb for usage on plane
13777
 * @plane: drm plane to prepare for
13778
 * @fb: framebuffer to prepare for presentation
13779
 *
13780
 * Prepares a framebuffer for usage on a display plane.  Generally this
13781
 * involves pinning the underlying object and updating the frontbuffer tracking
13782
 * bits.  Some older platforms need special physical address handling for
13783
 * cursor planes.
13784
 *
6937 serge 13785
 * Must be called with struct_mutex held.
13786
 *
6084 serge 13787
 * Returns 0 on success, negative error code on failure.
13788
 */
13789
int
13790
intel_prepare_plane_fb(struct drm_plane *plane,
13791
		       const struct drm_plane_state *new_state)
5060 serge 13792
{
13793
	struct drm_device *dev = plane->dev;
6084 serge 13794
	struct drm_framebuffer *fb = new_state->fb;
13795
	struct intel_plane *intel_plane = to_intel_plane(plane);
13796
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6937 serge 13797
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
6084 serge 13798
	int ret = 0;
5060 serge 13799
 
6937 serge 13800
	if (!obj && !old_obj)
5060 serge 13801
		return 0;
13802
 
6937 serge 13803
	if (old_obj) {
13804
		struct drm_crtc_state *crtc_state =
13805
			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
5060 serge 13806
 
6937 serge 13807
		/* Big Hammer, we also need to ensure that any pending
13808
		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13809
		 * current scanout is retired before unpinning the old
13810
		 * framebuffer. Note that we rely on userspace rendering
13811
		 * into the buffer attached to the pipe they are waiting
13812
		 * on. If not, userspace generates a GPU hang with IPEHR
13813
		 * point to the MI_WAIT_FOR_EVENT.
13814
		 *
13815
		 * This should only fail upon a hung GPU, in which case we
13816
		 * can safely continue.
13817
		 */
13818
		if (needs_modeset(crtc_state))
13819
			ret = i915_gem_object_wait_rendering(old_obj, true);
13820
 
13821
		/* Swallow -EIO errors to allow updates during hw lockup. */
13822
		if (ret && ret != -EIO)
13823
			return ret;
13824
	}
13825
 
13826
	/* For framebuffer backed by dmabuf, wait for fence */
13827
 
13828
	if (!obj) {
13829
		ret = 0;
13830
	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
6084 serge 13831
	    INTEL_INFO(dev)->cursor_needs_physical) {
13832
		int align = IS_I830(dev) ? 16 * 1024 : 256;
6937 serge 13833
		ret = i915_gem_object_attach_phys(obj, align);
6084 serge 13834
		if (ret)
13835
			DRM_DEBUG_KMS("failed to attach phys object\n");
13836
	} else {
6937 serge 13837
		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
6084 serge 13838
	}
5060 serge 13839
 
6937 serge 13840
	if (ret == 0) {
13841
		if (obj) {
13842
			struct intel_plane_state *plane_state =
13843
				to_intel_plane_state(new_state);
13844
 
13845
			i915_gem_request_assign(&plane_state->wait_req,
13846
						obj->last_write_req);
13847
	}
13848
 
6084 serge 13849
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
6937 serge 13850
	}
5060 serge 13851
 
6084 serge 13852
	return ret;
5060 serge 13853
}
13854
 
6084 serge 13855
/**
13856
 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13857
 * @plane: drm plane to clean up for
13858
 * @fb: old framebuffer that was on plane
13859
 *
13860
 * Cleans up a framebuffer that has just been removed from a plane.
6937 serge 13861
 *
13862
 * Must be called with struct_mutex held.
6084 serge 13863
 */
13864
void
13865
intel_cleanup_plane_fb(struct drm_plane *plane,
13866
		       const struct drm_plane_state *old_state)
5060 serge 13867
{
6084 serge 13868
	struct drm_device *dev = plane->dev;
6937 serge 13869
	struct intel_plane *intel_plane = to_intel_plane(plane);
13870
	struct intel_plane_state *old_intel_state;
13871
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13872
	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
5354 serge 13873
 
6937 serge 13874
	old_intel_state = to_intel_plane_state(old_state);
13875
 
13876
	if (!obj && !old_obj)
6084 serge 13877
		return;
13878
 
6937 serge 13879
	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13880
	    !INTEL_INFO(dev)->cursor_needs_physical))
6084 serge 13881
		intel_unpin_fb_obj(old_state->fb, old_state);
6937 serge 13882
 
13883
	/* prepare_fb aborted? */
13884
	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13885
	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13886
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13887
 
13888
	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13889
 
5354 serge 13890
}
13891
 
6084 serge 13892
int
13893
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
5354 serge 13894
{
6084 serge 13895
	int max_scale;
13896
	struct drm_device *dev;
13897
	struct drm_i915_private *dev_priv;
13898
	int crtc_clock, cdclk;
5060 serge 13899
 
6084 serge 13900
	if (!intel_crtc || !crtc_state)
13901
		return DRM_PLANE_HELPER_NO_SCALING;
5060 serge 13902
 
6084 serge 13903
	dev = intel_crtc->base.dev;
13904
	dev_priv = dev->dev_private;
13905
	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13906
	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
5060 serge 13907
 
6937 serge 13908
	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
6084 serge 13909
		return DRM_PLANE_HELPER_NO_SCALING;
13910
 
13911
	/*
13912
	 * skl max scale is lower of:
13913
	 *    close to 3 but not 3, -1 is for that purpose
13914
	 *            or
13915
	 *    cdclk/crtc_clock
13916
	 */
13917
	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13918
 
13919
	return max_scale;
13920
}
13921
 
13922
static int
13923
intel_check_primary_plane(struct drm_plane *plane,
13924
			  struct intel_crtc_state *crtc_state,
13925
			  struct intel_plane_state *state)
13926
{
13927
	struct drm_crtc *crtc = state->base.crtc;
13928
	struct drm_framebuffer *fb = state->base.fb;
13929
	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13930
	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13931
	bool can_position = false;
13932
 
6320 serge 13933
	if (INTEL_INFO(plane->dev)->gen >= 9) {
6084 serge 13934
	/* use scaler when colorkey is not required */
6320 serge 13935
		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
6084 serge 13936
		min_scale = 1;
13937
		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
6320 serge 13938
		}
6084 serge 13939
		can_position = true;
5354 serge 13940
	}
5060 serge 13941
 
6084 serge 13942
	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13943
					     &state->dst, &state->clip,
13944
					     min_scale, max_scale,
13945
					     can_position, true,
13946
					     &state->visible);
5354 serge 13947
}
13948
 
13949
static void
13950
intel_commit_primary_plane(struct drm_plane *plane,
13951
			   struct intel_plane_state *state)
13952
{
6084 serge 13953
	struct drm_crtc *crtc = state->base.crtc;
13954
	struct drm_framebuffer *fb = state->base.fb;
13955
	struct drm_device *dev = plane->dev;
5354 serge 13956
	struct drm_i915_private *dev_priv = dev->dev_private;
13957
 
6084 serge 13958
	crtc = crtc ? crtc : plane->crtc;
13959
 
13960
	dev_priv->display.update_primary_plane(crtc, fb,
13961
					       state->src.x1 >> 16,
13962
					       state->src.y1 >> 16);
13963
}
5060 serge 13964
 
6084 serge 13965
static void
13966
intel_disable_primary_plane(struct drm_plane *plane,
13967
			    struct drm_crtc *crtc)
13968
{
13969
	struct drm_device *dev = plane->dev;
13970
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 13971
 
6084 serge 13972
	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
13973
}
5060 serge 13974
 
6084 serge 13975
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13976
				    struct drm_crtc_state *old_crtc_state)
13977
{
13978
	struct drm_device *dev = crtc->dev;
13979
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13980
	struct intel_crtc_state *old_intel_state =
13981
		to_intel_crtc_state(old_crtc_state);
13982
	bool modeset = needs_modeset(crtc->state);
5060 serge 13983
 
6084 serge 13984
	/* Perform vblank evasion around commit operation */
13985
		intel_pipe_update_start(intel_crtc);
5354 serge 13986
 
6084 serge 13987
	if (modeset)
13988
		return;
5354 serge 13989
 
6084 serge 13990
	if (to_intel_crtc_state(crtc->state)->update_pipe)
13991
		intel_update_pipe_config(intel_crtc, old_intel_state);
13992
	else if (INTEL_INFO(dev)->gen >= 9)
13993
		skl_detach_scalers(intel_crtc);
5354 serge 13994
}
5060 serge 13995
 
6084 serge 13996
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13997
				     struct drm_crtc_state *old_crtc_state)
5354 serge 13998
{
13999
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 14000
 
6084 serge 14001
		intel_pipe_update_end(intel_crtc);
5060 serge 14002
}
14003
 
6084 serge 14004
/**
14005
 * intel_plane_destroy - destroy a plane
14006
 * @plane: plane to destroy
14007
 *
14008
 * Common destruction function for all types of planes (primary, cursor,
14009
 * sprite).
14010
 */
14011
void intel_plane_destroy(struct drm_plane *plane)
5060 serge 14012
{
14013
	struct intel_plane *intel_plane = to_intel_plane(plane);
14014
	drm_plane_cleanup(plane);
14015
	kfree(intel_plane);
14016
}
14017
 
6084 serge 14018
const struct drm_plane_funcs intel_plane_funcs = {
14019
	.update_plane = drm_atomic_helper_update_plane,
14020
	.disable_plane = drm_atomic_helper_disable_plane,
5060 serge 14021
	.destroy = intel_plane_destroy,
6084 serge 14022
	.set_property = drm_atomic_helper_plane_set_property,
14023
	.atomic_get_property = intel_plane_atomic_get_property,
14024
	.atomic_set_property = intel_plane_atomic_set_property,
14025
	.atomic_duplicate_state = intel_plane_duplicate_state,
14026
	.atomic_destroy_state = intel_plane_destroy_state,
14027
 
5060 serge 14028
};
14029
 
14030
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14031
						    int pipe)
14032
{
14033
	struct intel_plane *primary;
6084 serge 14034
	struct intel_plane_state *state;
5060 serge 14035
	const uint32_t *intel_primary_formats;
6084 serge 14036
	unsigned int num_formats;
5060 serge 14037
 
14038
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14039
	if (primary == NULL)
14040
		return NULL;
14041
 
6084 serge 14042
	state = intel_create_plane_state(&primary->base);
14043
	if (!state) {
14044
		kfree(primary);
14045
		return NULL;
14046
	}
14047
	primary->base.state = &state->base;
14048
 
5060 serge 14049
	primary->can_scale = false;
14050
	primary->max_downscale = 1;
6084 serge 14051
	if (INTEL_INFO(dev)->gen >= 9) {
14052
		primary->can_scale = true;
14053
		state->scaler_id = -1;
14054
	}
5060 serge 14055
	primary->pipe = pipe;
14056
	primary->plane = pipe;
6084 serge 14057
	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14058
	primary->check_plane = intel_check_primary_plane;
14059
	primary->commit_plane = intel_commit_primary_plane;
14060
	primary->disable_plane = intel_disable_primary_plane;
5060 serge 14061
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14062
		primary->plane = !pipe;
14063
 
6084 serge 14064
	if (INTEL_INFO(dev)->gen >= 9) {
14065
		intel_primary_formats = skl_primary_formats;
14066
		num_formats = ARRAY_SIZE(skl_primary_formats);
14067
	} else if (INTEL_INFO(dev)->gen >= 4) {
14068
		intel_primary_formats = i965_primary_formats;
14069
		num_formats = ARRAY_SIZE(i965_primary_formats);
5060 serge 14070
	} else {
6084 serge 14071
		intel_primary_formats = i8xx_primary_formats;
14072
		num_formats = ARRAY_SIZE(i8xx_primary_formats);
5060 serge 14073
	}
14074
 
14075
	drm_universal_plane_init(dev, &primary->base, 0,
6084 serge 14076
				 &intel_plane_funcs,
5060 serge 14077
				 intel_primary_formats, num_formats,
6937 serge 14078
				 DRM_PLANE_TYPE_PRIMARY, NULL);
5354 serge 14079
 
6084 serge 14080
	if (INTEL_INFO(dev)->gen >= 4)
14081
		intel_create_rotation_property(dev, primary);
5354 serge 14082
 
6084 serge 14083
	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14084
 
5060 serge 14085
	return &primary->base;
14086
}
14087
 
6084 serge 14088
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
5060 serge 14089
{
6084 serge 14090
	if (!dev->mode_config.rotation_property) {
14091
		unsigned long flags = BIT(DRM_ROTATE_0) |
14092
			BIT(DRM_ROTATE_180);
5060 serge 14093
 
6084 serge 14094
		if (INTEL_INFO(dev)->gen >= 9)
14095
			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
5060 serge 14096
 
6084 serge 14097
		dev->mode_config.rotation_property =
14098
			drm_mode_create_rotation_property(dev, flags);
14099
	}
14100
	if (dev->mode_config.rotation_property)
14101
		drm_object_attach_property(&plane->base.base,
14102
				dev->mode_config.rotation_property,
14103
				plane->base.state->rotation);
5060 serge 14104
}
14105
 
14106
static int
5354 serge 14107
intel_check_cursor_plane(struct drm_plane *plane,
6084 serge 14108
			 struct intel_crtc_state *crtc_state,
5354 serge 14109
			 struct intel_plane_state *state)
5060 serge 14110
{
6084 serge 14111
	struct drm_crtc *crtc = crtc_state->base.crtc;
14112
	struct drm_framebuffer *fb = state->base.fb;
5354 serge 14113
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 14114
	enum pipe pipe = to_intel_plane(plane)->pipe;
5354 serge 14115
	unsigned stride;
5060 serge 14116
	int ret;
14117
 
6084 serge 14118
	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14119
					    &state->dst, &state->clip,
5060 serge 14120
					    DRM_PLANE_HELPER_NO_SCALING,
14121
					    DRM_PLANE_HELPER_NO_SCALING,
5354 serge 14122
					    true, true, &state->visible);
5060 serge 14123
	if (ret)
14124
		return ret;
14125
 
5354 serge 14126
	/* if we want to turn off the cursor ignore width and height */
14127
	if (!obj)
14128
		return 0;
14129
 
14130
	/* Check for which cursor types we support */
6084 serge 14131
	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14132
		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14133
			  state->base.crtc_w, state->base.crtc_h);
5354 serge 14134
		return -EINVAL;
14135
	}
14136
 
6084 serge 14137
	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14138
	if (obj->base.size < stride * state->base.crtc_h) {
5354 serge 14139
		DRM_DEBUG_KMS("buffer is too small\n");
14140
		return -ENOMEM;
14141
	}
14142
 
6084 serge 14143
	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
5354 serge 14144
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
6084 serge 14145
		return -EINVAL;
5354 serge 14146
	}
14147
 
6084 serge 14148
	/*
14149
	 * There's something wrong with the cursor on CHV pipe C.
14150
	 * If it straddles the left edge of the screen then
14151
	 * moving it away from the edge or disabling it often
14152
	 * results in a pipe underrun, and often that can lead to
14153
	 * dead pipe (constant underrun reported, and it scans
14154
	 * out just a solid color). To recover from that, the
14155
	 * display power well must be turned off and on again.
14156
	 * Refuse the put the cursor into that compromised position.
14157
	 */
14158
	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14159
	    state->visible && state->base.crtc_x < 0) {
14160
		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14161
		return -EINVAL;
14162
	}
14163
 
14164
	return 0;
5354 serge 14165
}
14166
 
6084 serge 14167
static void
14168
intel_disable_cursor_plane(struct drm_plane *plane,
14169
			   struct drm_crtc *crtc)
5354 serge 14170
{
6084 serge 14171
	intel_crtc_update_cursor(crtc, false);
5060 serge 14172
}
5354 serge 14173
 
6084 serge 14174
static void
14175
intel_commit_cursor_plane(struct drm_plane *plane,
14176
			  struct intel_plane_state *state)
5354 serge 14177
{
6084 serge 14178
	struct drm_crtc *crtc = state->base.crtc;
14179
	struct drm_device *dev = plane->dev;
14180
	struct intel_crtc *intel_crtc;
14181
	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14182
	uint32_t addr;
5354 serge 14183
 
6084 serge 14184
	crtc = crtc ? crtc : plane->crtc;
14185
	intel_crtc = to_intel_crtc(crtc);
5354 serge 14186
 
6084 serge 14187
	if (!obj)
14188
		addr = 0;
14189
	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14190
		addr = i915_gem_obj_ggtt_offset(obj);
14191
	else
14192
		addr = obj->phys_handle->busaddr;
5354 serge 14193
 
6084 serge 14194
	intel_crtc->cursor_addr = addr;
5354 serge 14195
 
6084 serge 14196
	if (crtc->state->active)
14197
		intel_crtc_update_cursor(crtc, state->visible);
5354 serge 14198
}
14199
 
5060 serge 14200
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14201
						   int pipe)
14202
{
14203
	struct intel_plane *cursor;
6084 serge 14204
	struct intel_plane_state *state;
5060 serge 14205
 
14206
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14207
	if (cursor == NULL)
14208
		return NULL;
14209
 
6084 serge 14210
	state = intel_create_plane_state(&cursor->base);
14211
	if (!state) {
14212
		kfree(cursor);
14213
		return NULL;
14214
	}
14215
	cursor->base.state = &state->base;
14216
 
5060 serge 14217
	cursor->can_scale = false;
14218
	cursor->max_downscale = 1;
14219
	cursor->pipe = pipe;
14220
	cursor->plane = pipe;
6084 serge 14221
	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14222
	cursor->check_plane = intel_check_cursor_plane;
14223
	cursor->commit_plane = intel_commit_cursor_plane;
14224
	cursor->disable_plane = intel_disable_cursor_plane;
5060 serge 14225
 
14226
	drm_universal_plane_init(dev, &cursor->base, 0,
6084 serge 14227
				 &intel_plane_funcs,
5060 serge 14228
				 intel_cursor_formats,
14229
				 ARRAY_SIZE(intel_cursor_formats),
6937 serge 14230
				 DRM_PLANE_TYPE_CURSOR, NULL);
5354 serge 14231
 
14232
	if (INTEL_INFO(dev)->gen >= 4) {
14233
		if (!dev->mode_config.rotation_property)
14234
			dev->mode_config.rotation_property =
14235
				drm_mode_create_rotation_property(dev,
14236
							BIT(DRM_ROTATE_0) |
14237
							BIT(DRM_ROTATE_180));
14238
		if (dev->mode_config.rotation_property)
14239
			drm_object_attach_property(&cursor->base.base,
14240
				dev->mode_config.rotation_property,
6084 serge 14241
				state->base.rotation);
5354 serge 14242
	}
14243
 
6084 serge 14244
	if (INTEL_INFO(dev)->gen >=9)
14245
		state->scaler_id = -1;
14246
 
14247
	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14248
 
5060 serge 14249
	return &cursor->base;
14250
}
14251
 
6084 serge 14252
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14253
	struct intel_crtc_state *crtc_state)
14254
{
14255
	int i;
14256
	struct intel_scaler *intel_scaler;
14257
	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14258
 
14259
	for (i = 0; i < intel_crtc->num_scalers; i++) {
14260
		intel_scaler = &scaler_state->scalers[i];
14261
		intel_scaler->in_use = 0;
14262
		intel_scaler->mode = PS_SCALER_MODE_DYN;
14263
	}
14264
 
14265
	scaler_state->scaler_id = -1;
14266
}
14267
 
2330 Serge 14268
static void intel_crtc_init(struct drm_device *dev, int pipe)
14269
{
5060 serge 14270
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 14271
	struct intel_crtc *intel_crtc;
6084 serge 14272
	struct intel_crtc_state *crtc_state = NULL;
5060 serge 14273
	struct drm_plane *primary = NULL;
14274
	struct drm_plane *cursor = NULL;
14275
	int i, ret;
2327 Serge 14276
 
4560 Serge 14277
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 14278
	if (intel_crtc == NULL)
14279
		return;
2327 Serge 14280
 
6084 serge 14281
	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14282
	if (!crtc_state)
14283
		goto fail;
14284
	intel_crtc->config = crtc_state;
14285
	intel_crtc->base.state = &crtc_state->base;
14286
	crtc_state->base.crtc = &intel_crtc->base;
14287
 
14288
	/* initialize shared scalers */
14289
	if (INTEL_INFO(dev)->gen >= 9) {
14290
		if (pipe == PIPE_C)
14291
			intel_crtc->num_scalers = 1;
14292
		else
14293
			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14294
 
14295
		skl_init_scalers(dev, intel_crtc, crtc_state);
14296
	}
14297
 
5060 serge 14298
	primary = intel_primary_plane_create(dev, pipe);
14299
	if (!primary)
14300
		goto fail;
2327 Serge 14301
 
5060 serge 14302
	cursor = intel_cursor_plane_create(dev, pipe);
14303
	if (!cursor)
14304
		goto fail;
14305
 
14306
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
6937 serge 14307
					cursor, &intel_crtc_funcs, NULL);
5060 serge 14308
	if (ret)
14309
		goto fail;
14310
 
2330 Serge 14311
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14312
	for (i = 0; i < 256; i++) {
14313
		intel_crtc->lut_r[i] = i;
14314
		intel_crtc->lut_g[i] = i;
14315
		intel_crtc->lut_b[i] = i;
14316
	}
2327 Serge 14317
 
4560 Serge 14318
	/*
14319
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
5060 serge 14320
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
4560 Serge 14321
	 */
2330 Serge 14322
	intel_crtc->pipe = pipe;
14323
	intel_crtc->plane = pipe;
4560 Serge 14324
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 14325
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14326
		intel_crtc->plane = !pipe;
14327
	}
2327 Serge 14328
 
5060 serge 14329
	intel_crtc->cursor_base = ~0;
14330
	intel_crtc->cursor_cntl = ~0;
5354 serge 14331
	intel_crtc->cursor_size = ~0;
5060 serge 14332
 
6084 serge 14333
	intel_crtc->wm.cxsr_allowed = true;
14334
 
2330 Serge 14335
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14336
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14337
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14338
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 14339
 
2330 Serge 14340
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5060 serge 14341
 
14342
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14343
	return;
14344
 
14345
fail:
14346
	if (primary)
14347
		drm_plane_cleanup(primary);
14348
	if (cursor)
14349
		drm_plane_cleanup(cursor);
6084 serge 14350
	kfree(crtc_state);
5060 serge 14351
	kfree(intel_crtc);
2330 Serge 14352
}
2327 Serge 14353
 
4560 Serge 14354
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14355
{
14356
	struct drm_encoder *encoder = connector->base.encoder;
5060 serge 14357
	struct drm_device *dev = connector->base.dev;
4560 Serge 14358
 
5060 serge 14359
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4560 Serge 14360
 
5354 serge 14361
	if (!encoder || WARN_ON(!encoder->crtc))
4560 Serge 14362
		return INVALID_PIPE;
14363
 
14364
	return to_intel_crtc(encoder->crtc)->pipe;
14365
}
14366
 
3031 serge 14367
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14368
				struct drm_file *file)
14369
{
14370
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5060 serge 14371
	struct drm_crtc *drmmode_crtc;
3031 serge 14372
	struct intel_crtc *crtc;
2327 Serge 14373
 
5060 serge 14374
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
2327 Serge 14375
 
5060 serge 14376
	if (!drmmode_crtc) {
3031 serge 14377
		DRM_ERROR("no such CRTC id\n");
4560 Serge 14378
		return -ENOENT;
3031 serge 14379
	}
2327 Serge 14380
 
5060 serge 14381
	crtc = to_intel_crtc(drmmode_crtc);
3031 serge 14382
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 14383
 
3031 serge 14384
	return 0;
14385
}
2327 Serge 14386
 
3031 serge 14387
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 14388
{
3031 serge 14389
	struct drm_device *dev = encoder->base.dev;
14390
	struct intel_encoder *source_encoder;
2330 Serge 14391
	int index_mask = 0;
14392
	int entry = 0;
2327 Serge 14393
 
5354 serge 14394
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 14395
		if (encoders_cloneable(encoder, source_encoder))
2330 Serge 14396
			index_mask |= (1 << entry);
3031 serge 14397
 
2330 Serge 14398
		entry++;
14399
	}
2327 Serge 14400
 
2330 Serge 14401
	return index_mask;
14402
}
2327 Serge 14403
 
2330 Serge 14404
static bool has_edp_a(struct drm_device *dev)
14405
{
14406
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14407
 
2330 Serge 14408
	if (!IS_MOBILE(dev))
14409
		return false;
2327 Serge 14410
 
2330 Serge 14411
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14412
		return false;
2327 Serge 14413
 
5060 serge 14414
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
2330 Serge 14415
		return false;
2327 Serge 14416
 
2330 Serge 14417
	return true;
14418
}
2327 Serge 14419
 
5060 serge 14420
static bool intel_crt_present(struct drm_device *dev)
14421
{
14422
	struct drm_i915_private *dev_priv = dev->dev_private;
14423
 
5354 serge 14424
	if (INTEL_INFO(dev)->gen >= 9)
5060 serge 14425
		return false;
14426
 
5354 serge 14427
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14428
		return false;
14429
 
5060 serge 14430
	if (IS_CHERRYVIEW(dev))
14431
		return false;
14432
 
6937 serge 14433
	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
5060 serge 14434
		return false;
14435
 
6937 serge 14436
	/* DDI E can't be used if DDI A requires 4 lanes */
14437
	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14438
		return false;
14439
 
14440
	if (!dev_priv->vbt.int_crt_support)
14441
		return false;
14442
 
5060 serge 14443
	return true;
14444
}
14445
 
2330 Serge 14446
static void intel_setup_outputs(struct drm_device *dev)
14447
{
14448
	struct drm_i915_private *dev_priv = dev->dev_private;
14449
	struct intel_encoder *encoder;
14450
	bool dpd_is_edp = false;
2327 Serge 14451
 
4104 Serge 14452
	intel_lvds_init(dev);
2327 Serge 14453
 
5060 serge 14454
	if (intel_crt_present(dev))
6084 serge 14455
		intel_crt_init(dev);
2327 Serge 14456
 
6084 serge 14457
	if (IS_BROXTON(dev)) {
14458
		/*
14459
		 * FIXME: Broxton doesn't support port detection via the
14460
		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14461
		 * detect the ports.
14462
		 */
14463
		intel_ddi_init(dev, PORT_A);
14464
		intel_ddi_init(dev, PORT_B);
14465
		intel_ddi_init(dev, PORT_C);
14466
	} else if (HAS_DDI(dev)) {
2330 Serge 14467
		int found;
2327 Serge 14468
 
6084 serge 14469
		/*
14470
		 * Haswell uses DDI functions to detect digital outputs.
14471
		 * On SKL pre-D0 the strap isn't connected, so we assume
14472
		 * it's there.
14473
		 */
14474
		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14475
		/* WaIgnoreDDIAStrap: skl */
6937 serge 14476
		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
3031 serge 14477
			intel_ddi_init(dev, PORT_A);
14478
 
14479
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14480
		 * register */
14481
		found = I915_READ(SFUSE_STRAP);
14482
 
14483
		if (found & SFUSE_STRAP_DDIB_DETECTED)
14484
			intel_ddi_init(dev, PORT_B);
14485
		if (found & SFUSE_STRAP_DDIC_DETECTED)
14486
			intel_ddi_init(dev, PORT_C);
14487
		if (found & SFUSE_STRAP_DDID_DETECTED)
14488
			intel_ddi_init(dev, PORT_D);
6084 serge 14489
		/*
14490
		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14491
		 */
6937 serge 14492
		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
6084 serge 14493
		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14494
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14495
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14496
			intel_ddi_init(dev, PORT_E);
14497
 
3031 serge 14498
	} else if (HAS_PCH_SPLIT(dev)) {
14499
		int found;
4560 Serge 14500
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 14501
 
3243 Serge 14502
		if (has_edp_a(dev))
14503
			intel_dp_init(dev, DP_A, PORT_A);
14504
 
3746 Serge 14505
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 14506
			/* PCH SDVOB multiplex with HDMIB */
6937 serge 14507
			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
2330 Serge 14508
			if (!found)
3746 Serge 14509
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 14510
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 14511
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 14512
		}
2327 Serge 14513
 
3746 Serge 14514
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14515
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 14516
 
3746 Serge 14517
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14518
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 14519
 
2330 Serge 14520
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 14521
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 14522
 
3243 Serge 14523
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 14524
			intel_dp_init(dev, PCH_DP_D, PORT_D);
6937 serge 14525
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 14526
		/*
14527
		 * The DP_DETECTED bit is the latched state of the DDC
14528
		 * SDA pin at boot. However since eDP doesn't require DDC
14529
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14530
		 * eDP ports may have been muxed to an alternate function.
14531
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14532
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14533
		 * detect eDP ports.
14534
		 */
6937 serge 14535
		if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14536
		    !intel_dp_is_edp(dev, PORT_B))
6084 serge 14537
			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
6937 serge 14538
		if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14539
		    intel_dp_is_edp(dev, PORT_B))
14540
			intel_dp_init(dev, VLV_DP_B, PORT_B);
4560 Serge 14541
 
6937 serge 14542
		if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14543
		    !intel_dp_is_edp(dev, PORT_C))
6084 serge 14544
			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
6937 serge 14545
		if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14546
		    intel_dp_is_edp(dev, PORT_C))
14547
			intel_dp_init(dev, VLV_DP_C, PORT_C);
3243 Serge 14548
 
5060 serge 14549
		if (IS_CHERRYVIEW(dev)) {
6937 serge 14550
			/* eDP not supported on port D, so don't check VBT */
14551
			if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14552
				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14553
			if (I915_READ(CHV_DP_D) & DP_DETECTED)
6660 serge 14554
				intel_dp_init(dev, CHV_DP_D, PORT_D);
6084 serge 14555
		}
5060 serge 14556
 
4560 Serge 14557
		intel_dsi_init(dev);
6084 serge 14558
	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
2330 Serge 14559
		bool found = false;
2327 Serge 14560
 
3746 Serge 14561
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 14562
			DRM_DEBUG_KMS("probing SDVOB\n");
6937 serge 14563
			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
6084 serge 14564
			if (!found && IS_G4X(dev)) {
2330 Serge 14565
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 14566
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 14567
			}
2327 Serge 14568
 
6084 serge 14569
			if (!found && IS_G4X(dev))
3031 serge 14570
				intel_dp_init(dev, DP_B, PORT_B);
6084 serge 14571
		}
2327 Serge 14572
 
2330 Serge 14573
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 14574
 
3746 Serge 14575
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 14576
			DRM_DEBUG_KMS("probing SDVOC\n");
6937 serge 14577
			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
2330 Serge 14578
		}
2327 Serge 14579
 
3746 Serge 14580
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 14581
 
6084 serge 14582
			if (IS_G4X(dev)) {
2330 Serge 14583
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 14584
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 14585
			}
6084 serge 14586
			if (IS_G4X(dev))
3031 serge 14587
				intel_dp_init(dev, DP_C, PORT_C);
6084 serge 14588
		}
2327 Serge 14589
 
6084 serge 14590
		if (IS_G4X(dev) &&
4104 Serge 14591
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 14592
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 14593
	} else if (IS_GEN2(dev))
14594
		intel_dvo_init(dev);
2327 Serge 14595
 
6937 serge 14596
//   if (SUPPORTS_TV(dev))
14597
//       intel_tv_init(dev);
14598
 
5354 serge 14599
	intel_psr_init(dev);
5060 serge 14600
 
5354 serge 14601
	for_each_intel_encoder(dev, encoder) {
2330 Serge 14602
		encoder->base.possible_crtcs = encoder->crtc_mask;
14603
		encoder->base.possible_clones =
3031 serge 14604
			intel_encoder_clones(encoder);
2330 Serge 14605
	}
2327 Serge 14606
 
3243 Serge 14607
	intel_init_pch_refclk(dev);
14608
 
14609
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 14610
}
14611
 
6084 serge 14612
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14613
{
14614
	struct drm_device *dev = fb->dev;
14615
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2330 Serge 14616
 
6084 serge 14617
	drm_framebuffer_cleanup(fb);
14618
	mutex_lock(&dev->struct_mutex);
14619
	WARN_ON(!intel_fb->obj->framebuffer_references--);
14620
	drm_gem_object_unreference(&intel_fb->obj->base);
14621
	mutex_unlock(&dev->struct_mutex);
14622
	kfree(intel_fb);
14623
}
2330 Serge 14624
 
6084 serge 14625
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14626
						struct drm_file *file,
14627
						unsigned int *handle)
14628
{
14629
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14630
	struct drm_i915_gem_object *obj = intel_fb->obj;
14631
 
14632
	if (obj->userptr.mm) {
14633
		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14634
		return -EINVAL;
14635
	}
14636
 
14637
	return drm_gem_handle_create(file, &obj->base, handle);
14638
}
14639
 
14640
static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14641
					struct drm_file *file,
14642
					unsigned flags, unsigned color,
14643
					struct drm_clip_rect *clips,
14644
					unsigned num_clips)
14645
{
14646
	struct drm_device *dev = fb->dev;
14647
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14648
	struct drm_i915_gem_object *obj = intel_fb->obj;
14649
 
14650
	mutex_lock(&dev->struct_mutex);
14651
	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14652
	mutex_unlock(&dev->struct_mutex);
14653
 
14654
	return 0;
14655
}
14656
 
2335 Serge 14657
static const struct drm_framebuffer_funcs intel_fb_funcs = {
6084 serge 14658
	.destroy = intel_user_framebuffer_destroy,
14659
	.create_handle = intel_user_framebuffer_create_handle,
14660
	.dirty = intel_user_framebuffer_dirty,
2335 Serge 14661
};
2327 Serge 14662
 
6084 serge 14663
static
14664
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14665
			 uint32_t pixel_format)
14666
{
14667
	u32 gen = INTEL_INFO(dev)->gen;
14668
 
14669
	if (gen >= 9) {
14670
		/* "The stride in bytes must not exceed the of the size of 8K
14671
		 *  pixels and 32K bytes."
14672
		 */
14673
		 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
6937 serge 14674
	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6084 serge 14675
		return 32*1024;
14676
	} else if (gen >= 4) {
14677
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14678
			return 16*1024;
14679
		else
14680
			return 32*1024;
14681
	} else if (gen >= 3) {
14682
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14683
			return 8*1024;
14684
		else
14685
			return 16*1024;
14686
	} else {
14687
		/* XXX DSPC is limited to 4k tiled */
14688
		return 8*1024;
14689
	}
14690
}
14691
 
5060 serge 14692
static int intel_framebuffer_init(struct drm_device *dev,
6084 serge 14693
				  struct intel_framebuffer *intel_fb,
14694
				  struct drm_mode_fb_cmd2 *mode_cmd,
14695
				  struct drm_i915_gem_object *obj)
2335 Serge 14696
{
6084 serge 14697
	unsigned int aligned_height;
2335 Serge 14698
	int ret;
6084 serge 14699
	u32 pitch_limit, stride_alignment;
2327 Serge 14700
 
4560 Serge 14701
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14702
 
6084 serge 14703
	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14704
		/* Enforce that fb modifier and tiling mode match, but only for
14705
		 * X-tiled. This is needed for FBC. */
14706
		if (!!(obj->tiling_mode == I915_TILING_X) !=
14707
		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14708
			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14709
			return -EINVAL;
14710
		}
14711
	} else {
14712
		if (obj->tiling_mode == I915_TILING_X)
14713
			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14714
		else if (obj->tiling_mode == I915_TILING_Y) {
14715
			DRM_DEBUG("No Y tiling for legacy addfb\n");
14716
			return -EINVAL;
14717
		}
14718
	}
14719
 
14720
	/* Passed in modifier sanity checking. */
14721
	switch (mode_cmd->modifier[0]) {
14722
	case I915_FORMAT_MOD_Y_TILED:
14723
	case I915_FORMAT_MOD_Yf_TILED:
14724
		if (INTEL_INFO(dev)->gen < 9) {
14725
			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14726
				  mode_cmd->modifier[0]);
14727
			return -EINVAL;
14728
		}
14729
	case DRM_FORMAT_MOD_NONE:
14730
	case I915_FORMAT_MOD_X_TILED:
14731
		break;
14732
	default:
14733
		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14734
			  mode_cmd->modifier[0]);
2335 Serge 14735
		return -EINVAL;
3243 Serge 14736
	}
2327 Serge 14737
 
6084 serge 14738
	stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14739
						     mode_cmd->pixel_format);
14740
	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14741
		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14742
			  mode_cmd->pitches[0], stride_alignment);
3243 Serge 14743
		return -EINVAL;
14744
	}
14745
 
6084 serge 14746
	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14747
					   mode_cmd->pixel_format);
4104 Serge 14748
	if (mode_cmd->pitches[0] > pitch_limit) {
6084 serge 14749
		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14750
			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14751
			  "tiled" : "linear",
4104 Serge 14752
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 14753
		return -EINVAL;
14754
	}
14755
 
6084 serge 14756
	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
3243 Serge 14757
	    mode_cmd->pitches[0] != obj->stride) {
14758
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14759
			  mode_cmd->pitches[0], obj->stride);
6084 serge 14760
		return -EINVAL;
3243 Serge 14761
	}
2327 Serge 14762
 
3243 Serge 14763
	/* Reject formats not supported by any plane early. */
2342 Serge 14764
	switch (mode_cmd->pixel_format) {
3243 Serge 14765
	case DRM_FORMAT_C8:
2342 Serge 14766
	case DRM_FORMAT_RGB565:
14767
	case DRM_FORMAT_XRGB8888:
3243 Serge 14768
	case DRM_FORMAT_ARGB8888:
14769
		break;
14770
	case DRM_FORMAT_XRGB1555:
14771
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 14772
			DRM_DEBUG("unsupported pixel format: %s\n",
14773
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14774
			return -EINVAL;
14775
		}
14776
		break;
6084 serge 14777
	case DRM_FORMAT_ABGR8888:
6937 serge 14778
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14779
		    INTEL_INFO(dev)->gen < 9) {
6084 serge 14780
			DRM_DEBUG("unsupported pixel format: %s\n",
14781
				  drm_get_format_name(mode_cmd->pixel_format));
14782
			return -EINVAL;
14783
		}
14784
		break;
3031 serge 14785
	case DRM_FORMAT_XBGR8888:
2342 Serge 14786
	case DRM_FORMAT_XRGB2101010:
3243 Serge 14787
	case DRM_FORMAT_XBGR2101010:
14788
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 14789
			DRM_DEBUG("unsupported pixel format: %s\n",
14790
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14791
			return -EINVAL;
14792
		}
2335 Serge 14793
		break;
6084 serge 14794
	case DRM_FORMAT_ABGR2101010:
6937 serge 14795
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6084 serge 14796
			DRM_DEBUG("unsupported pixel format: %s\n",
14797
				  drm_get_format_name(mode_cmd->pixel_format));
14798
			return -EINVAL;
14799
		}
14800
		break;
2342 Serge 14801
	case DRM_FORMAT_YUYV:
14802
	case DRM_FORMAT_UYVY:
14803
	case DRM_FORMAT_YVYU:
14804
	case DRM_FORMAT_VYUY:
3243 Serge 14805
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 14806
			DRM_DEBUG("unsupported pixel format: %s\n",
14807
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14808
			return -EINVAL;
14809
		}
2342 Serge 14810
		break;
2335 Serge 14811
	default:
4104 Serge 14812
		DRM_DEBUG("unsupported pixel format: %s\n",
14813
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 14814
		return -EINVAL;
14815
	}
2327 Serge 14816
 
3243 Serge 14817
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14818
	if (mode_cmd->offsets[0] != 0)
14819
		return -EINVAL;
14820
 
6084 serge 14821
	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14822
					       mode_cmd->pixel_format,
14823
					       mode_cmd->modifier[0]);
4560 Serge 14824
	/* FIXME drm helper for size checks (especially planar formats)? */
14825
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14826
		return -EINVAL;
14827
 
3480 Serge 14828
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14829
	intel_fb->obj = obj;
4560 Serge 14830
	intel_fb->obj->framebuffer_references++;
3480 Serge 14831
 
2335 Serge 14832
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14833
	if (ret) {
14834
		DRM_ERROR("framebuffer init failed %d\n", ret);
14835
		return ret;
14836
	}
6283 serge 14837
	kolibri_framebuffer_init(intel_fb);
2335 Serge 14838
	return 0;
14839
}
2327 Serge 14840
 
6084 serge 14841
static struct drm_framebuffer *
14842
intel_user_framebuffer_create(struct drm_device *dev,
14843
			      struct drm_file *filp,
6937 serge 14844
			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
6084 serge 14845
{
6937 serge 14846
	struct drm_framebuffer *fb;
6084 serge 14847
	struct drm_i915_gem_object *obj;
14848
	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14849
 
14850
	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14851
						mode_cmd.handles[0]));
14852
	if (&obj->base == NULL)
14853
		return ERR_PTR(-ENOENT);
14854
 
6937 serge 14855
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14856
	if (IS_ERR(fb))
14857
		drm_gem_object_unreference_unlocked(&obj->base);
14858
 
14859
	return fb;
6084 serge 14860
}
14861
 
14862
#ifndef CONFIG_DRM_FBDEV_EMULATION
4560 Serge 14863
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14864
{
14865
}
14866
#endif
2327 Serge 14867
 
2360 Serge 14868
static const struct drm_mode_config_funcs intel_mode_funcs = {
6084 serge 14869
	.fb_create = intel_user_framebuffer_create,
4560 Serge 14870
	.output_poll_changed = intel_fbdev_output_poll_changed,
6084 serge 14871
	.atomic_check = intel_atomic_check,
14872
	.atomic_commit = intel_atomic_commit,
14873
	.atomic_state_alloc = intel_atomic_state_alloc,
14874
	.atomic_state_clear = intel_atomic_state_clear,
2360 Serge 14875
};
2327 Serge 14876
 
3031 serge 14877
/* Set up chip specific display functions */
14878
static void intel_init_display(struct drm_device *dev)
14879
{
14880
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14881
 
4104 Serge 14882
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14883
		dev_priv->display.find_dpll = g4x_find_best_dpll;
5060 serge 14884
	else if (IS_CHERRYVIEW(dev))
14885
		dev_priv->display.find_dpll = chv_find_best_dpll;
4104 Serge 14886
	else if (IS_VALLEYVIEW(dev))
14887
		dev_priv->display.find_dpll = vlv_find_best_dpll;
14888
	else if (IS_PINEVIEW(dev))
14889
		dev_priv->display.find_dpll = pnv_find_best_dpll;
14890
	else
14891
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
14892
 
6084 serge 14893
	if (INTEL_INFO(dev)->gen >= 9) {
3746 Serge 14894
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
6084 serge 14895
		dev_priv->display.get_initial_plane_config =
14896
			skylake_get_initial_plane_config;
5354 serge 14897
		dev_priv->display.crtc_compute_clock =
14898
			haswell_crtc_compute_clock;
3243 Serge 14899
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14900
		dev_priv->display.crtc_disable = haswell_crtc_disable;
5060 serge 14901
		dev_priv->display.update_primary_plane =
6084 serge 14902
			skylake_update_primary_plane;
14903
	} else if (HAS_DDI(dev)) {
14904
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14905
		dev_priv->display.get_initial_plane_config =
14906
			ironlake_get_initial_plane_config;
14907
		dev_priv->display.crtc_compute_clock =
14908
			haswell_crtc_compute_clock;
14909
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14910
		dev_priv->display.crtc_disable = haswell_crtc_disable;
14911
		dev_priv->display.update_primary_plane =
5060 serge 14912
			ironlake_update_primary_plane;
3243 Serge 14913
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 14914
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
6084 serge 14915
		dev_priv->display.get_initial_plane_config =
14916
			ironlake_get_initial_plane_config;
5354 serge 14917
		dev_priv->display.crtc_compute_clock =
14918
			ironlake_crtc_compute_clock;
3031 serge 14919
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14920
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
5060 serge 14921
		dev_priv->display.update_primary_plane =
14922
			ironlake_update_primary_plane;
6937 serge 14923
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4104 Serge 14924
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
6084 serge 14925
		dev_priv->display.get_initial_plane_config =
14926
			i9xx_get_initial_plane_config;
5354 serge 14927
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
4104 Serge 14928
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14929
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
5060 serge 14930
		dev_priv->display.update_primary_plane =
14931
			i9xx_update_primary_plane;
3031 serge 14932
	} else {
3746 Serge 14933
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
6084 serge 14934
		dev_priv->display.get_initial_plane_config =
14935
			i9xx_get_initial_plane_config;
5354 serge 14936
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
3031 serge 14937
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14938
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
5060 serge 14939
		dev_priv->display.update_primary_plane =
14940
			i9xx_update_primary_plane;
3031 serge 14941
	}
2327 Serge 14942
 
3031 serge 14943
	/* Returns the core display clock speed */
6937 serge 14944
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
3031 serge 14945
		dev_priv->display.get_display_clock_speed =
6084 serge 14946
			skylake_get_display_clock_speed;
14947
	else if (IS_BROXTON(dev))
14948
		dev_priv->display.get_display_clock_speed =
14949
			broxton_get_display_clock_speed;
14950
	else if (IS_BROADWELL(dev))
14951
		dev_priv->display.get_display_clock_speed =
14952
			broadwell_get_display_clock_speed;
14953
	else if (IS_HASWELL(dev))
14954
		dev_priv->display.get_display_clock_speed =
14955
			haswell_get_display_clock_speed;
6937 serge 14956
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 14957
		dev_priv->display.get_display_clock_speed =
3031 serge 14958
			valleyview_get_display_clock_speed;
6084 serge 14959
	else if (IS_GEN5(dev))
3031 serge 14960
		dev_priv->display.get_display_clock_speed =
6084 serge 14961
			ilk_get_display_clock_speed;
14962
	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14963
		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14964
		dev_priv->display.get_display_clock_speed =
3031 serge 14965
			i945_get_display_clock_speed;
6084 serge 14966
	else if (IS_GM45(dev))
14967
		dev_priv->display.get_display_clock_speed =
14968
			gm45_get_display_clock_speed;
14969
	else if (IS_CRESTLINE(dev))
14970
		dev_priv->display.get_display_clock_speed =
14971
			i965gm_get_display_clock_speed;
14972
	else if (IS_PINEVIEW(dev))
14973
		dev_priv->display.get_display_clock_speed =
14974
			pnv_get_display_clock_speed;
14975
	else if (IS_G33(dev) || IS_G4X(dev))
14976
		dev_priv->display.get_display_clock_speed =
14977
			g33_get_display_clock_speed;
3031 serge 14978
	else if (IS_I915G(dev))
14979
		dev_priv->display.get_display_clock_speed =
14980
			i915_get_display_clock_speed;
4104 Serge 14981
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 14982
		dev_priv->display.get_display_clock_speed =
14983
			i9xx_misc_get_display_clock_speed;
14984
	else if (IS_I915GM(dev))
14985
		dev_priv->display.get_display_clock_speed =
14986
			i915gm_get_display_clock_speed;
14987
	else if (IS_I865G(dev))
14988
		dev_priv->display.get_display_clock_speed =
14989
			i865_get_display_clock_speed;
14990
	else if (IS_I85X(dev))
14991
		dev_priv->display.get_display_clock_speed =
6084 serge 14992
			i85x_get_display_clock_speed;
14993
	else { /* 830 */
14994
		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
3031 serge 14995
		dev_priv->display.get_display_clock_speed =
14996
			i830_get_display_clock_speed;
6084 serge 14997
	}
2327 Serge 14998
 
6084 serge 14999
	if (IS_GEN5(dev)) {
15000
		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15001
	} else if (IS_GEN6(dev)) {
15002
		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15003
	} else if (IS_IVYBRIDGE(dev)) {
15004
		/* FIXME: detect B0+ stepping and use auto training */
15005
		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
5354 serge 15006
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 15007
		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15008
		if (IS_BROADWELL(dev)) {
15009
			dev_priv->display.modeset_commit_cdclk =
15010
				broadwell_modeset_commit_cdclk;
15011
			dev_priv->display.modeset_calc_cdclk =
15012
				broadwell_modeset_calc_cdclk;
15013
		}
6937 serge 15014
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 15015
		dev_priv->display.modeset_commit_cdclk =
15016
			valleyview_modeset_commit_cdclk;
15017
		dev_priv->display.modeset_calc_cdclk =
15018
			valleyview_modeset_calc_cdclk;
15019
	} else if (IS_BROXTON(dev)) {
15020
		dev_priv->display.modeset_commit_cdclk =
15021
			broxton_modeset_commit_cdclk;
15022
		dev_priv->display.modeset_calc_cdclk =
15023
			broxton_modeset_calc_cdclk;
3031 serge 15024
	}
2327 Serge 15025
 
6320 serge 15026
	switch (INTEL_INFO(dev)->gen) {
15027
	case 2:
15028
		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15029
		break;
2327 Serge 15030
 
6320 serge 15031
	case 3:
15032
		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15033
		break;
2327 Serge 15034
 
6320 serge 15035
	case 4:
15036
	case 5:
15037
		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15038
		break;
2327 Serge 15039
 
6320 serge 15040
	case 6:
15041
		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15042
		break;
15043
	case 7:
15044
	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15045
		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15046
		break;
15047
	case 9:
15048
		/* Drop through - unsupported since execlist only. */
15049
	default:
15050
		/* Default just returns -ENODEV to indicate unsupported */
15051
		dev_priv->display.queue_flip = intel_default_queue_flip;
15052
	}
2327 Serge 15053
 
5354 serge 15054
	mutex_init(&dev_priv->pps_mutex);
3031 serge 15055
}
15056
 
15057
/*
15058
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15059
 * resume, or other times.  This quirk makes sure that's the case for
15060
 * affected systems.
15061
 */
15062
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 15063
{
15064
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 15065
 
3031 serge 15066
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15067
	DRM_INFO("applying pipe a force quirk\n");
15068
}
2327 Serge 15069
 
5354 serge 15070
static void quirk_pipeb_force(struct drm_device *dev)
15071
{
15072
	struct drm_i915_private *dev_priv = dev->dev_private;
15073
 
15074
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15075
	DRM_INFO("applying pipe b force quirk\n");
15076
}
15077
 
3031 serge 15078
/*
15079
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15080
 */
15081
static void quirk_ssc_force_disable(struct drm_device *dev)
15082
{
15083
	struct drm_i915_private *dev_priv = dev->dev_private;
15084
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15085
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 15086
}
2327 Serge 15087
 
3031 serge 15088
/*
15089
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15090
 * brightness value
15091
 */
15092
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 15093
{
15094
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15095
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15096
	DRM_INFO("applying inverted panel brightness quirk\n");
15097
}
2327 Serge 15098
 
5060 serge 15099
/* Some VBT's incorrectly indicate no backlight is present */
15100
static void quirk_backlight_present(struct drm_device *dev)
15101
{
15102
	struct drm_i915_private *dev_priv = dev->dev_private;
15103
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15104
	DRM_INFO("applying backlight present quirk\n");
15105
}
15106
 
3031 serge 15107
struct intel_quirk {
15108
	int device;
15109
	int subsystem_vendor;
15110
	int subsystem_device;
15111
	void (*hook)(struct drm_device *dev);
15112
};
2327 Serge 15113
 
3031 serge 15114
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15115
struct intel_dmi_quirk {
15116
	void (*hook)(struct drm_device *dev);
15117
	const struct dmi_system_id (*dmi_id_list)[];
15118
};
2327 Serge 15119
 
3031 serge 15120
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15121
{
15122
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15123
	return 1;
2330 Serge 15124
}
2327 Serge 15125
 
3031 serge 15126
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15127
	{
15128
		.dmi_id_list = &(const struct dmi_system_id[]) {
15129
			{
15130
				.callback = intel_dmi_reverse_brightness,
15131
				.ident = "NCR Corporation",
15132
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15133
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15134
				},
15135
			},
15136
			{ }  /* terminating entry */
15137
		},
15138
		.hook = quirk_invert_brightness,
15139
	},
15140
};
2327 Serge 15141
 
3031 serge 15142
static struct intel_quirk intel_quirks[] = {
15143
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15144
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 15145
 
3031 serge 15146
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15147
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 15148
 
5367 serge 15149
	/* 830 needs to leave pipe A & dpll A up */
15150
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15151
 
15152
	/* 830 needs to leave pipe B & dpll B up */
15153
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15154
 
3031 serge 15155
	/* Lenovo U160 cannot use SSC on LVDS */
15156
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 15157
 
3031 serge 15158
	/* Sony Vaio Y cannot use SSC on LVDS */
15159
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 15160
 
3031 serge 15161
	/* Acer Aspire 5734Z must invert backlight brightness */
15162
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 15163
 
15164
	/* Acer/eMachines G725 */
15165
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15166
 
15167
	/* Acer/eMachines e725 */
15168
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15169
 
15170
	/* Acer/Packard Bell NCL20 */
15171
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15172
 
15173
	/* Acer Aspire 4736Z */
15174
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
5060 serge 15175
 
15176
	/* Acer Aspire 5336 */
15177
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15178
 
15179
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15180
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15181
 
5097 serge 15182
	/* Acer C720 Chromebook (Core i3 4005U) */
15183
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15184
 
5354 serge 15185
	/* Apple Macbook 2,1 (Core 2 T7400) */
15186
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15187
 
6084 serge 15188
	/* Apple Macbook 4,1 */
15189
	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15190
 
5060 serge 15191
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15192
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15193
 
15194
	/* HP Chromebook 14 (Celeron 2955U) */
15195
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
6084 serge 15196
 
15197
	/* Dell Chromebook 11 */
15198
	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15199
 
15200
	/* Dell Chromebook 11 (2015 version) */
15201
	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
3031 serge 15202
};
2327 Serge 15203
 
3031 serge 15204
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 15205
{
3031 serge 15206
	struct pci_dev *d = dev->pdev;
15207
	int i;
2327 Serge 15208
 
3031 serge 15209
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15210
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 15211
 
3031 serge 15212
		if (d->device == q->device &&
15213
		    (d->subsystem_vendor == q->subsystem_vendor ||
15214
		     q->subsystem_vendor == PCI_ANY_ID) &&
15215
		    (d->subsystem_device == q->subsystem_device ||
15216
		     q->subsystem_device == PCI_ANY_ID))
15217
			q->hook(dev);
15218
	}
5097 serge 15219
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15220
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15221
			intel_dmi_quirks[i].hook(dev);
15222
	}
2330 Serge 15223
}
2327 Serge 15224
 
3031 serge 15225
/* Disable the VGA plane that we never use */
15226
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 15227
{
15228
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15229
	u8 sr1;
6937 serge 15230
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 15231
 
6084 serge 15232
	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
4560 Serge 15233
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15234
	outb(SR01, VGA_SR_INDEX);
15235
	sr1 = inb(VGA_SR_DATA);
15236
	outb(sr1 | 1<<5, VGA_SR_DATA);
15237
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 15238
	udelay(300);
2327 Serge 15239
 
6084 serge 15240
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
3031 serge 15241
	POSTING_READ(vga_reg);
2330 Serge 15242
}
15243
 
3031 serge 15244
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 15245
{
6084 serge 15246
	intel_update_cdclk(dev);
3031 serge 15247
	intel_prepare_ddi(dev);
15248
	intel_init_clock_gating(dev);
6084 serge 15249
	intel_enable_gt_powersave(dev);
2342 Serge 15250
}
15251
 
3031 serge 15252
void intel_modeset_init(struct drm_device *dev)
2330 Serge 15253
{
3031 serge 15254
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 15255
	int sprite, ret;
15256
	enum pipe pipe;
15257
	struct intel_crtc *crtc;
6088 serge 15258
 
3031 serge 15259
	drm_mode_config_init(dev);
2330 Serge 15260
 
3031 serge 15261
	dev->mode_config.min_width = 0;
15262
	dev->mode_config.min_height = 0;
2330 Serge 15263
 
3031 serge 15264
	dev->mode_config.preferred_depth = 24;
15265
	dev->mode_config.prefer_shadow = 1;
2330 Serge 15266
 
6084 serge 15267
	dev->mode_config.allow_fb_modifiers = true;
15268
 
3031 serge 15269
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 15270
 
3031 serge 15271
	intel_init_quirks(dev);
2330 Serge 15272
 
3031 serge 15273
	intel_init_pm(dev);
2330 Serge 15274
 
3746 Serge 15275
	if (INTEL_INFO(dev)->num_pipes == 0)
15276
		return;
15277
 
6084 serge 15278
	/*
15279
	 * There may be no VBT; and if the BIOS enabled SSC we can
15280
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15281
	 * BIOS isn't using it, don't assume it will work even if the VBT
15282
	 * indicates as much.
15283
	 */
15284
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15285
		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15286
					    DREF_SSC1_ENABLE);
15287
 
15288
		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15289
			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15290
				     bios_lvds_use_ssc ? "en" : "dis",
15291
				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15292
			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15293
		}
15294
	}
15295
 
3031 serge 15296
	intel_init_display(dev);
2330 Serge 15297
 
3031 serge 15298
	if (IS_GEN2(dev)) {
15299
		dev->mode_config.max_width = 2048;
15300
		dev->mode_config.max_height = 2048;
15301
	} else if (IS_GEN3(dev)) {
15302
		dev->mode_config.max_width = 4096;
15303
		dev->mode_config.max_height = 4096;
15304
	} else {
15305
		dev->mode_config.max_width = 8192;
15306
		dev->mode_config.max_height = 8192;
15307
	}
5060 serge 15308
 
15309
	if (IS_GEN2(dev)) {
15310
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15311
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15312
	} else {
15313
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15314
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15315
	}
15316
 
3480 Serge 15317
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 15318
 
3031 serge 15319
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 15320
		      INTEL_INFO(dev)->num_pipes,
15321
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 15322
 
5354 serge 15323
	for_each_pipe(dev_priv, pipe) {
5060 serge 15324
		intel_crtc_init(dev, pipe);
6084 serge 15325
		for_each_sprite(dev_priv, pipe, sprite) {
5060 serge 15326
			ret = intel_plane_init(dev, pipe, sprite);
6084 serge 15327
			if (ret)
4104 Serge 15328
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
5060 serge 15329
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
3746 Serge 15330
		}
2330 Serge 15331
	}
15332
 
6084 serge 15333
	intel_update_czclk(dev_priv);
15334
	intel_update_cdclk(dev);
4560 Serge 15335
 
4104 Serge 15336
	intel_shared_dpll_init(dev);
2330 Serge 15337
 
3031 serge 15338
	/* Just disable it once at startup */
15339
	i915_disable_vga(dev);
15340
	intel_setup_outputs(dev);
3480 Serge 15341
 
5060 serge 15342
	drm_modeset_lock_all(dev);
6084 serge 15343
	intel_modeset_setup_hw_state(dev);
5060 serge 15344
	drm_modeset_unlock_all(dev);
15345
 
15346
	for_each_intel_crtc(dev, crtc) {
6084 serge 15347
		struct intel_initial_plane_config plane_config = {};
15348
 
5060 serge 15349
		if (!crtc->active)
15350
			continue;
15351
 
15352
		/*
15353
		 * Note that reserving the BIOS fb up front prevents us
15354
		 * from stuffing other stolen allocations like the ring
15355
		 * on top.  This prevents some ugliness at boot time, and
15356
		 * can even allow for smooth boot transitions if the BIOS
15357
		 * fb is large enough for the active pipe configuration.
15358
		 */
6084 serge 15359
		dev_priv->display.get_initial_plane_config(crtc,
15360
							   &plane_config);
15361
 
15362
		/*
15363
		 * If the fb is shared between multiple heads, we'll
15364
		 * just get the first one.
15365
		 */
15366
		intel_find_initial_plane_obj(crtc, &plane_config);
5060 serge 15367
	}
2330 Serge 15368
}
15369
 
3031 serge 15370
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 15371
{
3031 serge 15372
	struct intel_connector *connector;
15373
	struct drm_connector *crt = NULL;
15374
	struct intel_load_detect_pipe load_detect_temp;
5060 serge 15375
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
2330 Serge 15376
 
3031 serge 15377
	/* We can't just switch on the pipe A, we need to set things up with a
15378
	 * proper mode and output configuration. As a gross hack, enable pipe A
15379
	 * by enabling the load detect pipe once. */
6084 serge 15380
	for_each_intel_connector(dev, connector) {
3031 serge 15381
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15382
			crt = &connector->base;
15383
			break;
2330 Serge 15384
		}
15385
	}
15386
 
3031 serge 15387
	if (!crt)
15388
		return;
2330 Serge 15389
 
5060 serge 15390
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
6084 serge 15391
		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
2327 Serge 15392
}
15393
 
3031 serge 15394
static bool
15395
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 15396
{
3746 Serge 15397
	struct drm_device *dev = crtc->base.dev;
15398
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 15399
	u32 val;
2327 Serge 15400
 
3746 Serge 15401
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 15402
		return true;
2327 Serge 15403
 
6084 serge 15404
	val = I915_READ(DSPCNTR(!crtc->plane));
2327 Serge 15405
 
3031 serge 15406
	if ((val & DISPLAY_PLANE_ENABLE) &&
15407
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15408
		return false;
2327 Serge 15409
 
3031 serge 15410
	return true;
2327 Serge 15411
}
15412
 
6084 serge 15413
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15414
{
15415
	struct drm_device *dev = crtc->base.dev;
15416
	struct intel_encoder *encoder;
15417
 
15418
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15419
		return true;
15420
 
15421
	return false;
15422
}
15423
 
3031 serge 15424
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 15425
{
3031 serge 15426
	struct drm_device *dev = crtc->base.dev;
2327 Serge 15427
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 15428
	i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
2327 Serge 15429
 
3031 serge 15430
	/* Clear any frame start delays used for debugging left by the BIOS */
15431
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 15432
 
5060 serge 15433
	/* restore vblank interrupts to correct state */
6084 serge 15434
	drm_crtc_vblank_reset(&crtc->base);
5354 serge 15435
	if (crtc->active) {
6084 serge 15436
		struct intel_plane *plane;
5060 serge 15437
 
6084 serge 15438
		drm_crtc_vblank_on(&crtc->base);
15439
 
15440
		/* Disable everything but the primary plane */
15441
		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15442
			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15443
				continue;
15444
 
15445
			plane->disable_plane(&plane->base, &crtc->base);
15446
		}
15447
	}
15448
 
3031 serge 15449
	/* We need to sanitize the plane -> pipe mapping first because this will
15450
	 * disable the crtc (and hence change the state) if it is wrong. Note
15451
	 * that gen4+ has a fixed plane -> pipe mapping.  */
15452
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15453
		bool plane;
2327 Serge 15454
 
3031 serge 15455
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15456
			      crtc->base.base.id);
2327 Serge 15457
 
3031 serge 15458
		/* Pipe has the wrong plane attached and the plane is active.
15459
		 * Temporarily change the plane mapping and disable everything
15460
		 * ...  */
15461
		plane = crtc->plane;
6084 serge 15462
		to_intel_plane_state(crtc->base.primary->state)->visible = true;
3031 serge 15463
		crtc->plane = !plane;
6084 serge 15464
		intel_crtc_disable_noatomic(&crtc->base);
3031 serge 15465
		crtc->plane = plane;
15466
	}
2327 Serge 15467
 
3031 serge 15468
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15469
	    crtc->pipe == PIPE_A && !crtc->active) {
15470
		/* BIOS forgot to enable pipe A, this mostly happens after
15471
		 * resume. Force-enable the pipe to fix this, the update_dpms
15472
		 * call below we restore the pipe to the right state, but leave
15473
		 * the required bits on. */
15474
		intel_enable_pipe_a(dev);
15475
	}
2327 Serge 15476
 
3031 serge 15477
	/* Adjust the state of the output pipe according to whether we
15478
	 * have active connectors/encoders. */
6084 serge 15479
	if (!intel_crtc_has_encoders(crtc))
15480
		intel_crtc_disable_noatomic(&crtc->base);
2327 Serge 15481
 
6084 serge 15482
	if (crtc->active != crtc->base.state->active) {
3031 serge 15483
		struct intel_encoder *encoder;
2327 Serge 15484
 
3031 serge 15485
		/* This can happen either due to bugs in the get_hw_state
6084 serge 15486
		 * functions or because of calls to intel_crtc_disable_noatomic,
15487
		 * or because the pipe is force-enabled due to the
3031 serge 15488
		 * pipe A quirk. */
15489
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15490
			      crtc->base.base.id,
6084 serge 15491
			      crtc->base.state->enable ? "enabled" : "disabled",
3031 serge 15492
			      crtc->active ? "enabled" : "disabled");
2327 Serge 15493
 
6084 serge 15494
		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15495
		crtc->base.state->active = crtc->active;
3031 serge 15496
		crtc->base.enabled = crtc->active;
6937 serge 15497
		crtc->base.state->connector_mask = 0;
2327 Serge 15498
 
3031 serge 15499
		/* Because we only establish the connector -> encoder ->
15500
		 * crtc links if something is active, this means the
15501
		 * crtc is now deactivated. Break the links. connector
15502
		 * -> encoder links are only establish when things are
15503
		 *  actually up, hence no need to break them. */
15504
		WARN_ON(crtc->active);
2327 Serge 15505
 
6084 serge 15506
		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
3031 serge 15507
			encoder->base.crtc = NULL;
15508
	}
5060 serge 15509
 
5354 serge 15510
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
5060 serge 15511
		/*
15512
		 * We start out with underrun reporting disabled to avoid races.
15513
		 * For correct bookkeeping mark this on active crtcs.
15514
		 *
15515
		 * Also on gmch platforms we dont have any hardware bits to
15516
		 * disable the underrun reporting. Which means we need to start
15517
		 * out with underrun reporting disabled also on inactive pipes,
15518
		 * since otherwise we'll complain about the garbage we read when
15519
		 * e.g. coming up after runtime pm.
15520
		 *
15521
		 * No protection against concurrent access is required - at
15522
		 * worst a fifo underrun happens which also sets this to false.
15523
		 */
15524
		crtc->cpu_fifo_underrun_disabled = true;
15525
		crtc->pch_fifo_underrun_disabled = true;
15526
	}
2327 Serge 15527
}
15528
 
3031 serge 15529
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 15530
{
3031 serge 15531
	struct intel_connector *connector;
15532
	struct drm_device *dev = encoder->base.dev;
6084 serge 15533
	bool active = false;
2327 Serge 15534
 
3031 serge 15535
	/* We need to check both for a crtc link (meaning that the
15536
	 * encoder is active and trying to read from a pipe) and the
15537
	 * pipe itself being active. */
15538
	bool has_active_crtc = encoder->base.crtc &&
15539
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 15540
 
6084 serge 15541
	for_each_intel_connector(dev, connector) {
15542
		if (connector->base.encoder != &encoder->base)
15543
			continue;
15544
 
15545
		active = true;
15546
		break;
15547
	}
15548
 
15549
	if (active && !has_active_crtc) {
3031 serge 15550
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15551
			      encoder->base.base.id,
5060 serge 15552
			      encoder->base.name);
2327 Serge 15553
 
3031 serge 15554
		/* Connector is active, but has no active pipe. This is
15555
		 * fallout from our resume register restoring. Disable
15556
		 * the encoder manually again. */
15557
		if (encoder->base.crtc) {
15558
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15559
				      encoder->base.base.id,
5060 serge 15560
				      encoder->base.name);
3031 serge 15561
			encoder->disable(encoder);
5060 serge 15562
			if (encoder->post_disable)
15563
				encoder->post_disable(encoder);
3031 serge 15564
		}
5060 serge 15565
		encoder->base.crtc = NULL;
2327 Serge 15566
 
3031 serge 15567
		/* Inconsistent output/port/pipe state happens presumably due to
15568
		 * a bug in one of the get_hw_state functions. Or someplace else
15569
		 * in our code, like the register restore mess on resume. Clamp
15570
		 * things to off as a safer default. */
6084 serge 15571
		for_each_intel_connector(dev, connector) {
3031 serge 15572
			if (connector->encoder != encoder)
15573
				continue;
5060 serge 15574
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15575
			connector->base.encoder = NULL;
3031 serge 15576
		}
15577
	}
15578
	/* Enabled encoders without active connectors will be fixed in
15579
	 * the crtc fixup. */
2327 Serge 15580
}
15581
 
5060 serge 15582
void i915_redisable_vga_power_on(struct drm_device *dev)
3746 Serge 15583
{
15584
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 15585
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
3746 Serge 15586
 
5060 serge 15587
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15588
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15589
		i915_disable_vga(dev);
15590
	}
15591
}
15592
 
15593
void i915_redisable_vga(struct drm_device *dev)
15594
{
15595
	struct drm_i915_private *dev_priv = dev->dev_private;
15596
 
4104 Serge 15597
	/* This function can be called both from intel_modeset_setup_hw_state or
15598
	 * at a very early point in our resume sequence, where the power well
15599
	 * structures are not yet restored. Since this function is at a very
15600
	 * paranoid "someone might have enabled VGA while we were not looking"
15601
	 * level, just check if the power well is enabled instead of trying to
15602
	 * follow the "don't touch the power well if we don't need it" policy
15603
	 * the rest of the driver uses. */
6937 serge 15604
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
4104 Serge 15605
		return;
15606
 
5060 serge 15607
	i915_redisable_vga_power_on(dev);
6937 serge 15608
 
15609
	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
3746 Serge 15610
}
15611
 
6084 serge 15612
static bool primary_get_hw_state(struct intel_plane *plane)
5060 serge 15613
{
6084 serge 15614
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5060 serge 15615
 
6084 serge 15616
	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15617
}
5060 serge 15618
 
6084 serge 15619
/* FIXME read out full plane state for all planes */
15620
static void readout_plane_state(struct intel_crtc *crtc)
15621
{
15622
	struct drm_plane *primary = crtc->base.primary;
15623
	struct intel_plane_state *plane_state =
15624
		to_intel_plane_state(primary->state);
15625
 
6937 serge 15626
	plane_state->visible = crtc->active &&
6084 serge 15627
		primary_get_hw_state(to_intel_plane(primary));
15628
 
15629
	if (plane_state->visible)
15630
		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
5060 serge 15631
}
15632
 
4104 Serge 15633
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 15634
{
15635
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15636
	enum pipe pipe;
15637
	struct intel_crtc *crtc;
15638
	struct intel_encoder *encoder;
15639
	struct intel_connector *connector;
4104 Serge 15640
	int i;
2327 Serge 15641
 
5060 serge 15642
	for_each_intel_crtc(dev, crtc) {
6084 serge 15643
		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
15644
		memset(crtc->config, 0, sizeof(*crtc->config));
15645
		crtc->config->base.crtc = &crtc->base;
2327 Serge 15646
 
3746 Serge 15647
		crtc->active = dev_priv->display.get_pipe_config(crtc,
6084 serge 15648
								 crtc->config);
2327 Serge 15649
 
6084 serge 15650
		crtc->base.state->active = crtc->active;
3031 serge 15651
		crtc->base.enabled = crtc->active;
2330 Serge 15652
 
6084 serge 15653
		readout_plane_state(crtc);
15654
 
3031 serge 15655
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15656
			      crtc->base.base.id,
15657
			      crtc->active ? "enabled" : "disabled");
2339 Serge 15658
	}
2332 Serge 15659
 
4104 Serge 15660
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15661
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15662
 
5354 serge 15663
		pll->on = pll->get_hw_state(dev_priv, pll,
15664
					    &pll->config.hw_state);
4104 Serge 15665
		pll->active = 0;
5354 serge 15666
		pll->config.crtc_mask = 0;
5060 serge 15667
		for_each_intel_crtc(dev, crtc) {
5354 serge 15668
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
4104 Serge 15669
				pll->active++;
5354 serge 15670
				pll->config.crtc_mask |= 1 << crtc->pipe;
15671
			}
4104 Serge 15672
		}
15673
 
5354 serge 15674
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15675
			      pll->name, pll->config.crtc_mask, pll->on);
5060 serge 15676
 
5354 serge 15677
		if (pll->config.crtc_mask)
5060 serge 15678
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
4104 Serge 15679
	}
15680
 
5354 serge 15681
	for_each_intel_encoder(dev, encoder) {
3031 serge 15682
		pipe = 0;
2332 Serge 15683
 
3031 serge 15684
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 15685
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15686
			encoder->base.crtc = &crtc->base;
6084 serge 15687
			encoder->get_config(encoder, crtc->config);
3031 serge 15688
		} else {
15689
			encoder->base.crtc = NULL;
15690
		}
2332 Serge 15691
 
4560 Serge 15692
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 15693
			      encoder->base.base.id,
5060 serge 15694
			      encoder->base.name,
3031 serge 15695
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 15696
			      pipe_name(pipe));
3031 serge 15697
	}
2332 Serge 15698
 
6084 serge 15699
	for_each_intel_connector(dev, connector) {
3031 serge 15700
		if (connector->get_hw_state(connector)) {
15701
			connector->base.dpms = DRM_MODE_DPMS_ON;
6937 serge 15702
 
15703
			encoder = connector->encoder;
15704
			connector->base.encoder = &encoder->base;
15705
 
15706
			if (encoder->base.crtc &&
15707
			    encoder->base.crtc->state->active) {
15708
				/*
15709
				 * This has to be done during hardware readout
15710
				 * because anything calling .crtc_disable may
15711
				 * rely on the connector_mask being accurate.
15712
				 */
15713
				encoder->base.crtc->state->connector_mask |=
15714
					1 << drm_connector_index(&connector->base);
15715
			}
15716
 
3031 serge 15717
		} else {
15718
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15719
			connector->base.encoder = NULL;
15720
		}
15721
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15722
			      connector->base.base.id,
5060 serge 15723
			      connector->base.name,
3031 serge 15724
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 15725
	}
6084 serge 15726
 
15727
	for_each_intel_crtc(dev, crtc) {
15728
		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15729
 
15730
		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15731
		if (crtc->base.state->active) {
15732
			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15733
			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15734
			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15735
 
15736
			/*
15737
			 * The initial mode needs to be set in order to keep
15738
			 * the atomic core happy. It wants a valid mode if the
15739
			 * crtc's enabled, so we do the above call.
15740
			 *
15741
			 * At this point some state updated by the connectors
15742
			 * in their ->detect() callback has not run yet, so
15743
			 * no recalculation can be done yet.
15744
			 *
15745
			 * Even if we could do a recalculation and modeset
15746
			 * right now it would cause a double modeset if
15747
			 * fbdev or userspace chooses a different initial mode.
15748
			 *
15749
			 * If that happens, someone indicated they wanted a
15750
			 * mode change, which means it's safe to do a full
15751
			 * recalculation.
15752
			 */
15753
			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15754
 
15755
			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15756
			update_scanline_offset(crtc);
15757
		}
15758
	}
4104 Serge 15759
}
2332 Serge 15760
 
6084 serge 15761
/* Scan out the current hw modeset state,
15762
 * and sanitizes it to the current state
15763
 */
15764
static void
15765
intel_modeset_setup_hw_state(struct drm_device *dev)
4104 Serge 15766
{
15767
	struct drm_i915_private *dev_priv = dev->dev_private;
15768
	enum pipe pipe;
15769
	struct intel_crtc *crtc;
15770
	struct intel_encoder *encoder;
15771
	int i;
15772
 
15773
	intel_modeset_readout_hw_state(dev);
15774
 
3031 serge 15775
	/* HW state is read out, now we need to sanitize this mess. */
5354 serge 15776
	for_each_intel_encoder(dev, encoder) {
3031 serge 15777
		intel_sanitize_encoder(encoder);
2332 Serge 15778
	}
15779
 
5354 serge 15780
	for_each_pipe(dev_priv, pipe) {
3031 serge 15781
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15782
		intel_sanitize_crtc(crtc);
6084 serge 15783
		intel_dump_pipe_config(crtc, crtc->config,
15784
				       "[setup_hw_state]");
2332 Serge 15785
	}
15786
 
6084 serge 15787
	intel_modeset_update_connector_atomic_state(dev);
15788
 
4104 Serge 15789
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15790
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15791
 
15792
		if (!pll->on || pll->active)
15793
			continue;
15794
 
15795
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15796
 
15797
		pll->disable(dev_priv, pll);
15798
		pll->on = false;
15799
	}
15800
 
6937 serge 15801
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 15802
		vlv_wm_get_hw_state(dev);
15803
	else if (IS_GEN9(dev))
5354 serge 15804
		skl_wm_get_hw_state(dev);
15805
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 15806
		ilk_wm_get_hw_state(dev);
15807
 
6084 serge 15808
	for_each_intel_crtc(dev, crtc) {
15809
		unsigned long put_domains;
4560 Serge 15810
 
6084 serge 15811
		put_domains = modeset_get_crtc_power_domains(&crtc->base);
15812
		if (WARN_ON(put_domains))
15813
			modeset_put_power_domains(dev_priv, put_domains);
15814
	}
15815
	intel_display_set_init_power(dev_priv, false);
15816
}
3746 Serge 15817
 
6084 serge 15818
void intel_display_resume(struct drm_device *dev)
15819
{
15820
	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15821
	struct intel_connector *conn;
15822
	struct intel_plane *plane;
15823
	struct drm_crtc *crtc;
15824
	int ret;
15825
 
15826
	if (!state)
15827
		return;
15828
 
15829
	state->acquire_ctx = dev->mode_config.acquire_ctx;
15830
 
15831
	/* preserve complete old state, including dpll */
15832
	intel_atomic_get_shared_dpll_state(state);
15833
 
15834
	for_each_crtc(dev, crtc) {
15835
		struct drm_crtc_state *crtc_state =
15836
			drm_atomic_get_crtc_state(state, crtc);
15837
 
15838
		ret = PTR_ERR_OR_ZERO(crtc_state);
15839
		if (ret)
15840
			goto err;
15841
 
15842
		/* force a restore */
15843
		crtc_state->mode_changed = true;
3243 Serge 15844
	}
2332 Serge 15845
 
6084 serge 15846
	for_each_intel_plane(dev, plane) {
15847
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15848
		if (ret)
15849
			goto err;
15850
	}
15851
 
15852
	for_each_intel_connector(dev, conn) {
15853
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15854
		if (ret)
15855
			goto err;
15856
	}
15857
 
15858
	intel_modeset_setup_hw_state(dev);
15859
 
15860
	i915_redisable_vga(dev);
15861
	ret = drm_atomic_commit(state);
15862
	if (!ret)
15863
		return;
15864
 
15865
err:
15866
	DRM_ERROR("Restoring old state failed with %i\n", ret);
15867
	drm_atomic_state_free(state);
2332 Serge 15868
}
15869
 
3031 serge 15870
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 15871
{
5060 serge 15872
	struct drm_crtc *c;
15873
	struct drm_i915_gem_object *obj;
6084 serge 15874
	int ret;
5060 serge 15875
 
15876
	mutex_lock(&dev->struct_mutex);
15877
	intel_init_gt_powersave(dev);
15878
	mutex_unlock(&dev->struct_mutex);
15879
 
3031 serge 15880
	intel_modeset_init_hw(dev);
2330 Serge 15881
 
3031 serge 15882
//   intel_setup_overlay(dev);
2330 Serge 15883
 
5060 serge 15884
	/*
15885
	 * Make sure any fbs we allocated at startup are properly
15886
	 * pinned & fenced.  When we do the allocation it's too early
15887
	 * for this.
15888
	 */
15889
	for_each_crtc(dev, c) {
15890
		obj = intel_fb_obj(c->primary->fb);
15891
		if (obj == NULL)
15892
			continue;
15893
 
6084 serge 15894
		mutex_lock(&dev->struct_mutex);
15895
		ret = intel_pin_and_fence_fb_obj(c->primary,
15896
						 c->primary->fb,
6937 serge 15897
						 c->primary->state);
6084 serge 15898
		mutex_unlock(&dev->struct_mutex);
15899
		if (ret) {
5060 serge 15900
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
15901
				  to_intel_crtc(c)->pipe);
15902
			drm_framebuffer_unreference(c->primary->fb);
15903
			c->primary->fb = NULL;
6084 serge 15904
			c->primary->crtc = c->primary->state->crtc = NULL;
15905
			update_state_fb(c->primary);
15906
			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
5060 serge 15907
		}
15908
	}
6084 serge 15909
 
15910
	intel_backlight_register(dev);
2330 Serge 15911
}
15912
 
5060 serge 15913
void intel_connector_unregister(struct intel_connector *intel_connector)
15914
{
15915
	struct drm_connector *connector = &intel_connector->base;
15916
 
15917
	intel_panel_destroy_backlight(connector);
15918
	drm_connector_unregister(connector);
15919
}
15920
 
3031 serge 15921
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 15922
{
3031 serge 15923
#if 0
15924
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 15925
	struct intel_connector *connector;
2327 Serge 15926
 
5354 serge 15927
	intel_disable_gt_powersave(dev);
15928
 
15929
	intel_backlight_unregister(dev);
15930
 
4104 Serge 15931
	/*
15932
	 * Interrupts and polling as the first thing to avoid creating havoc.
5354 serge 15933
	 * Too much stuff here (turning of connectors, ...) would
4104 Serge 15934
	 * experience fancy races otherwise.
15935
	 */
5354 serge 15936
	intel_irq_uninstall(dev_priv);
5060 serge 15937
 
4104 Serge 15938
	/*
15939
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
15940
	 * poll handlers. Hence disable polling after hpd handling is shut down.
15941
	 */
4560 Serge 15942
	drm_kms_helper_poll_fini(dev);
4104 Serge 15943
 
4560 Serge 15944
	intel_unregister_dsm_handler();
2327 Serge 15945
 
6084 serge 15946
	intel_fbc_disable(dev_priv);
2342 Serge 15947
 
4104 Serge 15948
	/* flush any delayed tasks or pending work */
15949
	flush_scheduled_work();
2327 Serge 15950
 
4560 Serge 15951
	/* destroy the backlight and sysfs files before encoders/connectors */
6937 serge 15952
	for_each_intel_connector(dev, connector)
15953
		connector->unregister(connector);
5060 serge 15954
 
3031 serge 15955
	drm_mode_config_cleanup(dev);
5060 serge 15956
 
15957
	intel_cleanup_overlay(dev);
15958
 
15959
	mutex_lock(&dev->struct_mutex);
15960
	intel_cleanup_gt_powersave(dev);
15961
	mutex_unlock(&dev->struct_mutex);
2327 Serge 15962
#endif
15963
}
15964
 
15965
/*
3031 serge 15966
 * Return which encoder is currently attached for connector.
2327 Serge 15967
 */
3031 serge 15968
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 15969
{
3031 serge 15970
	return &intel_attached_encoder(connector)->base;
15971
}
2327 Serge 15972
 
3031 serge 15973
void intel_connector_attach_encoder(struct intel_connector *connector,
15974
				    struct intel_encoder *encoder)
15975
{
15976
	connector->encoder = encoder;
15977
	drm_mode_connector_attach_encoder(&connector->base,
15978
					  &encoder->base);
2327 Serge 15979
}
15980
 
15981
/*
3031 serge 15982
 * set vga decode state - true == enable VGA decode
2327 Serge 15983
 */
3031 serge 15984
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 15985
{
2330 Serge 15986
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 15987
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 15988
	u16 gmch_ctrl;
2327 Serge 15989
 
5060 serge 15990
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15991
		DRM_ERROR("failed to read control word\n");
15992
		return -EIO;
15993
	}
15994
 
15995
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15996
		return 0;
15997
 
3031 serge 15998
	if (state)
15999
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 16000
	else
3031 serge 16001
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5060 serge 16002
 
16003
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16004
		DRM_ERROR("failed to write control word\n");
16005
		return -EIO;
16006
	}
16007
 
3031 serge 16008
	return 0;
2330 Serge 16009
}
16010
 
3031 serge 16011
#ifdef CONFIG_DEBUG_FS
2327 Serge 16012
 
3031 serge 16013
struct intel_display_error_state {
4104 Serge 16014
 
16015
	u32 power_well_driver;
16016
 
16017
	int num_transcoders;
16018
 
3031 serge 16019
	struct intel_cursor_error_state {
16020
		u32 control;
16021
		u32 position;
16022
		u32 base;
16023
		u32 size;
16024
	} cursor[I915_MAX_PIPES];
2327 Serge 16025
 
3031 serge 16026
	struct intel_pipe_error_state {
4560 Serge 16027
		bool power_domain_on;
3031 serge 16028
		u32 source;
5060 serge 16029
		u32 stat;
3031 serge 16030
	} pipe[I915_MAX_PIPES];
2327 Serge 16031
 
3031 serge 16032
	struct intel_plane_error_state {
16033
		u32 control;
16034
		u32 stride;
16035
		u32 size;
16036
		u32 pos;
16037
		u32 addr;
16038
		u32 surface;
16039
		u32 tile_offset;
16040
	} plane[I915_MAX_PIPES];
4104 Serge 16041
 
16042
	struct intel_transcoder_error_state {
4560 Serge 16043
		bool power_domain_on;
4104 Serge 16044
		enum transcoder cpu_transcoder;
16045
 
16046
		u32 conf;
16047
 
16048
		u32 htotal;
16049
		u32 hblank;
16050
		u32 hsync;
16051
		u32 vtotal;
16052
		u32 vblank;
16053
		u32 vsync;
16054
	} transcoder[4];
3031 serge 16055
};
2327 Serge 16056
 
3031 serge 16057
struct intel_display_error_state *
16058
intel_display_capture_error_state(struct drm_device *dev)
16059
{
5060 serge 16060
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 16061
	struct intel_display_error_state *error;
4104 Serge 16062
	int transcoders[] = {
16063
		TRANSCODER_A,
16064
		TRANSCODER_B,
16065
		TRANSCODER_C,
16066
		TRANSCODER_EDP,
16067
	};
3031 serge 16068
	int i;
2327 Serge 16069
 
4104 Serge 16070
	if (INTEL_INFO(dev)->num_pipes == 0)
16071
		return NULL;
16072
 
4560 Serge 16073
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 16074
	if (error == NULL)
16075
		return NULL;
2327 Serge 16076
 
4560 Serge 16077
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 16078
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16079
 
5354 serge 16080
	for_each_pipe(dev_priv, i) {
4560 Serge 16081
		error->pipe[i].power_domain_on =
5354 serge 16082
			__intel_display_power_is_enabled(dev_priv,
6084 serge 16083
							 POWER_DOMAIN_PIPE(i));
4560 Serge 16084
		if (!error->pipe[i].power_domain_on)
16085
			continue;
16086
 
3031 serge 16087
		error->cursor[i].control = I915_READ(CURCNTR(i));
16088
		error->cursor[i].position = I915_READ(CURPOS(i));
16089
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 16090
 
3031 serge 16091
		error->plane[i].control = I915_READ(DSPCNTR(i));
16092
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 16093
		if (INTEL_INFO(dev)->gen <= 3) {
6084 serge 16094
			error->plane[i].size = I915_READ(DSPSIZE(i));
16095
			error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 16096
		}
16097
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
6084 serge 16098
			error->plane[i].addr = I915_READ(DSPADDR(i));
3031 serge 16099
		if (INTEL_INFO(dev)->gen >= 4) {
16100
			error->plane[i].surface = I915_READ(DSPSURF(i));
16101
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16102
		}
2327 Serge 16103
 
3031 serge 16104
		error->pipe[i].source = I915_READ(PIPESRC(i));
5060 serge 16105
 
16106
		if (HAS_GMCH_DISPLAY(dev))
16107
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
3031 serge 16108
	}
2327 Serge 16109
 
4104 Serge 16110
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16111
	if (HAS_DDI(dev_priv->dev))
16112
		error->num_transcoders++; /* Account for eDP. */
16113
 
16114
	for (i = 0; i < error->num_transcoders; i++) {
16115
		enum transcoder cpu_transcoder = transcoders[i];
16116
 
4560 Serge 16117
		error->transcoder[i].power_domain_on =
5354 serge 16118
			__intel_display_power_is_enabled(dev_priv,
4560 Serge 16119
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16120
		if (!error->transcoder[i].power_domain_on)
16121
			continue;
16122
 
4104 Serge 16123
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16124
 
16125
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16126
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16127
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16128
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16129
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16130
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16131
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16132
	}
16133
 
3031 serge 16134
	return error;
2330 Serge 16135
}
2327 Serge 16136
 
4104 Serge 16137
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16138
 
3031 serge 16139
void
4104 Serge 16140
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 16141
				struct drm_device *dev,
16142
				struct intel_display_error_state *error)
2332 Serge 16143
{
5354 serge 16144
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 16145
	int i;
2330 Serge 16146
 
4104 Serge 16147
	if (!error)
16148
		return;
16149
 
16150
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 16151
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 16152
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16153
			   error->power_well_driver);
5354 serge 16154
	for_each_pipe(dev_priv, i) {
4104 Serge 16155
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 16156
		err_printf(m, "  Power: %s\n",
16157
			   error->pipe[i].power_domain_on ? "on" : "off");
4104 Serge 16158
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
5060 serge 16159
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
2332 Serge 16160
 
4104 Serge 16161
		err_printf(m, "Plane [%d]:\n", i);
16162
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16163
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 16164
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 16165
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16166
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 16167
		}
16168
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 16169
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 16170
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 16171
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16172
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 16173
		}
2332 Serge 16174
 
4104 Serge 16175
		err_printf(m, "Cursor [%d]:\n", i);
16176
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16177
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16178
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 16179
	}
4104 Serge 16180
 
16181
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 16182
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 16183
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 16184
		err_printf(m, "  Power: %s\n",
16185
			   error->transcoder[i].power_domain_on ? "on" : "off");
4104 Serge 16186
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16187
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16188
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16189
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16190
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16191
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16192
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16193
	}
2327 Serge 16194
}
3031 serge 16195
#endif
5354 serge 16196
 
16197
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
16198
{
16199
	struct intel_crtc *crtc;
16200
 
16201
	for_each_intel_crtc(dev, crtc) {
16202
		struct intel_unpin_work *work;
16203
 
16204
		spin_lock_irq(&dev->event_lock);
16205
 
16206
		work = crtc->unpin_work;
16207
 
16208
		if (work && work->event &&
16209
		    work->event->base.file_priv == file) {
16210
			kfree(work->event);
16211
			work->event = NULL;
16212
		}
16213
 
16214
		spin_unlock_irq(&dev->event_lock);
16215
	}
16216
}