Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
6084 serge 24
 *	Eric Anholt 
2327 Serge 25
 */
26
 
5097 serge 27
#include 
2327 Serge 28
#include 
6088 serge 29
#include 
2327 Serge 30
#include 
31
#include 
2330 Serge 32
#include 
5354 serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
6084 serge 40
#include 
41
#include 
3031 serge 42
#include 
43
#include 
5060 serge 44
#include 
45
#include 
46
#include 
6937 serge 47
#include 
48
#include 
2327 Serge 49
 
5060 serge 50
/* Primary plane formats for gen <= 3 */
6084 serge 51
static const uint32_t i8xx_primary_formats[] = {
52
	DRM_FORMAT_C8,
53
	DRM_FORMAT_RGB565,
5060 serge 54
	DRM_FORMAT_XRGB1555,
6084 serge 55
	DRM_FORMAT_XRGB8888,
5060 serge 56
};
57
 
58
/* Primary plane formats for gen >= 4 */
6084 serge 59
static const uint32_t i965_primary_formats[] = {
60
	DRM_FORMAT_C8,
61
	DRM_FORMAT_RGB565,
62
	DRM_FORMAT_XRGB8888,
5060 serge 63
	DRM_FORMAT_XBGR8888,
6084 serge 64
	DRM_FORMAT_XRGB2101010,
65
	DRM_FORMAT_XBGR2101010,
66
};
67
 
68
static const uint32_t skl_primary_formats[] = {
69
	DRM_FORMAT_C8,
70
	DRM_FORMAT_RGB565,
71
	DRM_FORMAT_XRGB8888,
72
	DRM_FORMAT_XBGR8888,
73
	DRM_FORMAT_ARGB8888,
5060 serge 74
	DRM_FORMAT_ABGR8888,
75
	DRM_FORMAT_XRGB2101010,
76
	DRM_FORMAT_XBGR2101010,
6084 serge 77
	DRM_FORMAT_YUYV,
78
	DRM_FORMAT_YVYU,
79
	DRM_FORMAT_UYVY,
80
	DRM_FORMAT_VYUY,
5060 serge 81
};
82
 
83
/* Cursor formats */
84
static const uint32_t intel_cursor_formats[] = {
85
	DRM_FORMAT_ARGB8888,
86
};
87
 
4104 Serge 88
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 89
				struct intel_crtc_state *pipe_config);
4560 Serge 90
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
6084 serge 91
				   struct intel_crtc_state *pipe_config);
2327 Serge 92
 
5060 serge 93
static int intel_framebuffer_init(struct drm_device *dev,
94
				  struct intel_framebuffer *ifb,
95
				  struct drm_mode_fb_cmd2 *mode_cmd,
96
				  struct drm_i915_gem_object *obj);
97
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
98
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
99
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 100
					 struct intel_link_m_n *m_n,
101
					 struct intel_link_m_n *m2_n2);
5060 serge 102
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
103
static void haswell_set_pipeconf(struct drm_crtc *crtc);
104
static void intel_set_pipe_csc(struct drm_crtc *crtc);
5354 serge 105
static void vlv_prepare_pll(struct intel_crtc *crtc,
6084 serge 106
			    const struct intel_crtc_state *pipe_config);
5354 serge 107
static void chv_prepare_pll(struct intel_crtc *crtc,
6084 serge 108
			    const struct intel_crtc_state *pipe_config);
109
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112
	struct intel_crtc_state *crtc_state);
113
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
114
			   int num_connectors);
115
static void skylake_pfit_enable(struct intel_crtc *crtc);
116
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117
static void ironlake_pfit_enable(struct intel_crtc *crtc);
118
static void intel_modeset_setup_hw_state(struct drm_device *dev);
119
static void intel_pre_disable_primary(struct drm_crtc *crtc);
4104 Serge 120
 
2327 Serge 121
typedef struct {
6084 serge 122
	int	min, max;
2327 Serge 123
} intel_range_t;
124
 
125
typedef struct {
6084 serge 126
	int	dot_limit;
127
	int	p2_slow, p2_fast;
2327 Serge 128
} intel_p2_t;
129
 
130
typedef struct intel_limit intel_limit_t;
131
struct intel_limit {
6084 serge 132
	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
133
	intel_p2_t	    p2;
2327 Serge 134
};
135
 
6084 serge 136
/* returns HPLL frequency in kHz */
137
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
138
{
139
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140
 
141
	/* Obtain SKU information */
142
	mutex_lock(&dev_priv->sb_lock);
143
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144
		CCK_FUSE_HPLL_FREQ_MASK;
145
	mutex_unlock(&dev_priv->sb_lock);
146
 
147
	return vco_freq[hpll_freq] * 1000;
148
}
149
 
150
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
151
				  const char *name, u32 reg)
152
{
153
	u32 val;
154
	int divider;
155
 
156
	if (dev_priv->hpll_freq == 0)
157
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
158
 
159
	mutex_lock(&dev_priv->sb_lock);
160
	val = vlv_cck_read(dev_priv, reg);
161
	mutex_unlock(&dev_priv->sb_lock);
162
 
163
	divider = val & CCK_FREQUENCY_VALUES;
164
 
165
	WARN((val & CCK_FREQUENCY_STATUS) !=
166
	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
167
	     "%s change in progress\n", name);
168
 
169
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
170
}
171
 
3243 Serge 172
int
173
intel_pch_rawclk(struct drm_device *dev)
174
{
175
	struct drm_i915_private *dev_priv = dev->dev_private;
176
 
177
	WARN_ON(!HAS_PCH_SPLIT(dev));
178
 
179
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
180
}
181
 
6084 serge 182
/* hrawclock is 1/4 the FSB frequency */
183
int intel_hrawclk(struct drm_device *dev)
184
{
185
	struct drm_i915_private *dev_priv = dev->dev_private;
186
	uint32_t clkcfg;
187
 
188
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
6937 serge 189
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 190
		return 200;
191
 
192
	clkcfg = I915_READ(CLKCFG);
193
	switch (clkcfg & CLKCFG_FSB_MASK) {
194
	case CLKCFG_FSB_400:
195
		return 100;
196
	case CLKCFG_FSB_533:
197
		return 133;
198
	case CLKCFG_FSB_667:
199
		return 166;
200
	case CLKCFG_FSB_800:
201
		return 200;
202
	case CLKCFG_FSB_1067:
203
		return 266;
204
	case CLKCFG_FSB_1333:
205
		return 333;
206
	/* these two are just a guess; one of them might be right */
207
	case CLKCFG_FSB_1600:
208
	case CLKCFG_FSB_1600_ALT:
209
		return 400;
210
	default:
211
		return 133;
212
	}
213
}
214
 
215
static void intel_update_czclk(struct drm_i915_private *dev_priv)
216
{
6937 serge 217
	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
6084 serge 218
		return;
219
 
220
	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
221
						      CCK_CZ_CLOCK_CONTROL);
222
 
223
	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
224
}
225
 
2327 Serge 226
static inline u32 /* units of 100MHz */
227
intel_fdi_link_freq(struct drm_device *dev)
228
{
229
	if (IS_GEN5(dev)) {
230
		struct drm_i915_private *dev_priv = dev->dev_private;
231
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
232
	} else
233
		return 27;
234
}
235
 
4104 Serge 236
static const intel_limit_t intel_limits_i8xx_dac = {
237
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 238
	.vco = { .min = 908000, .max = 1512000 },
239
	.n = { .min = 2, .max = 16 },
4104 Serge 240
	.m = { .min = 96, .max = 140 },
241
	.m1 = { .min = 18, .max = 26 },
242
	.m2 = { .min = 6, .max = 16 },
243
	.p = { .min = 4, .max = 128 },
244
	.p1 = { .min = 2, .max = 33 },
245
	.p2 = { .dot_limit = 165000,
246
		.p2_slow = 4, .p2_fast = 2 },
247
};
248
 
2327 Serge 249
static const intel_limit_t intel_limits_i8xx_dvo = {
6084 serge 250
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 251
	.vco = { .min = 908000, .max = 1512000 },
252
	.n = { .min = 2, .max = 16 },
6084 serge 253
	.m = { .min = 96, .max = 140 },
254
	.m1 = { .min = 18, .max = 26 },
255
	.m2 = { .min = 6, .max = 16 },
256
	.p = { .min = 4, .max = 128 },
257
	.p1 = { .min = 2, .max = 33 },
2327 Serge 258
	.p2 = { .dot_limit = 165000,
4104 Serge 259
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 260
};
261
 
262
static const intel_limit_t intel_limits_i8xx_lvds = {
6084 serge 263
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 264
	.vco = { .min = 908000, .max = 1512000 },
265
	.n = { .min = 2, .max = 16 },
6084 serge 266
	.m = { .min = 96, .max = 140 },
267
	.m1 = { .min = 18, .max = 26 },
268
	.m2 = { .min = 6, .max = 16 },
269
	.p = { .min = 4, .max = 128 },
270
	.p1 = { .min = 1, .max = 6 },
2327 Serge 271
	.p2 = { .dot_limit = 165000,
272
		.p2_slow = 14, .p2_fast = 7 },
273
};
274
 
275
static const intel_limit_t intel_limits_i9xx_sdvo = {
6084 serge 276
	.dot = { .min = 20000, .max = 400000 },
277
	.vco = { .min = 1400000, .max = 2800000 },
278
	.n = { .min = 1, .max = 6 },
279
	.m = { .min = 70, .max = 120 },
3480 Serge 280
	.m1 = { .min = 8, .max = 18 },
281
	.m2 = { .min = 3, .max = 7 },
6084 serge 282
	.p = { .min = 5, .max = 80 },
283
	.p1 = { .min = 1, .max = 8 },
2327 Serge 284
	.p2 = { .dot_limit = 200000,
285
		.p2_slow = 10, .p2_fast = 5 },
286
};
287
 
288
static const intel_limit_t intel_limits_i9xx_lvds = {
6084 serge 289
	.dot = { .min = 20000, .max = 400000 },
290
	.vco = { .min = 1400000, .max = 2800000 },
291
	.n = { .min = 1, .max = 6 },
292
	.m = { .min = 70, .max = 120 },
3480 Serge 293
	.m1 = { .min = 8, .max = 18 },
294
	.m2 = { .min = 3, .max = 7 },
6084 serge 295
	.p = { .min = 7, .max = 98 },
296
	.p1 = { .min = 1, .max = 8 },
2327 Serge 297
	.p2 = { .dot_limit = 112000,
298
		.p2_slow = 14, .p2_fast = 7 },
299
};
300
 
301
 
302
static const intel_limit_t intel_limits_g4x_sdvo = {
303
	.dot = { .min = 25000, .max = 270000 },
304
	.vco = { .min = 1750000, .max = 3500000},
305
	.n = { .min = 1, .max = 4 },
306
	.m = { .min = 104, .max = 138 },
307
	.m1 = { .min = 17, .max = 23 },
308
	.m2 = { .min = 5, .max = 11 },
309
	.p = { .min = 10, .max = 30 },
310
	.p1 = { .min = 1, .max = 3},
311
	.p2 = { .dot_limit = 270000,
312
		.p2_slow = 10,
313
		.p2_fast = 10
314
	},
315
};
316
 
317
static const intel_limit_t intel_limits_g4x_hdmi = {
318
	.dot = { .min = 22000, .max = 400000 },
319
	.vco = { .min = 1750000, .max = 3500000},
320
	.n = { .min = 1, .max = 4 },
321
	.m = { .min = 104, .max = 138 },
322
	.m1 = { .min = 16, .max = 23 },
323
	.m2 = { .min = 5, .max = 11 },
324
	.p = { .min = 5, .max = 80 },
325
	.p1 = { .min = 1, .max = 8},
326
	.p2 = { .dot_limit = 165000,
327
		.p2_slow = 10, .p2_fast = 5 },
328
};
329
 
330
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
331
	.dot = { .min = 20000, .max = 115000 },
332
	.vco = { .min = 1750000, .max = 3500000 },
333
	.n = { .min = 1, .max = 3 },
334
	.m = { .min = 104, .max = 138 },
335
	.m1 = { .min = 17, .max = 23 },
336
	.m2 = { .min = 5, .max = 11 },
337
	.p = { .min = 28, .max = 112 },
338
	.p1 = { .min = 2, .max = 8 },
339
	.p2 = { .dot_limit = 0,
340
		.p2_slow = 14, .p2_fast = 14
341
	},
342
};
343
 
344
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
345
	.dot = { .min = 80000, .max = 224000 },
346
	.vco = { .min = 1750000, .max = 3500000 },
347
	.n = { .min = 1, .max = 3 },
348
	.m = { .min = 104, .max = 138 },
349
	.m1 = { .min = 17, .max = 23 },
350
	.m2 = { .min = 5, .max = 11 },
351
	.p = { .min = 14, .max = 42 },
352
	.p1 = { .min = 2, .max = 6 },
353
	.p2 = { .dot_limit = 0,
354
		.p2_slow = 7, .p2_fast = 7
355
	},
356
};
357
 
358
static const intel_limit_t intel_limits_pineview_sdvo = {
6084 serge 359
	.dot = { .min = 20000, .max = 400000},
360
	.vco = { .min = 1700000, .max = 3500000 },
2327 Serge 361
	/* Pineview's Ncounter is a ring counter */
6084 serge 362
	.n = { .min = 3, .max = 6 },
363
	.m = { .min = 2, .max = 256 },
2327 Serge 364
	/* Pineview only has one combined m divider, which we treat as m2. */
6084 serge 365
	.m1 = { .min = 0, .max = 0 },
366
	.m2 = { .min = 0, .max = 254 },
367
	.p = { .min = 5, .max = 80 },
368
	.p1 = { .min = 1, .max = 8 },
2327 Serge 369
	.p2 = { .dot_limit = 200000,
370
		.p2_slow = 10, .p2_fast = 5 },
371
};
372
 
373
static const intel_limit_t intel_limits_pineview_lvds = {
6084 serge 374
	.dot = { .min = 20000, .max = 400000 },
375
	.vco = { .min = 1700000, .max = 3500000 },
376
	.n = { .min = 3, .max = 6 },
377
	.m = { .min = 2, .max = 256 },
378
	.m1 = { .min = 0, .max = 0 },
379
	.m2 = { .min = 0, .max = 254 },
380
	.p = { .min = 7, .max = 112 },
381
	.p1 = { .min = 1, .max = 8 },
2327 Serge 382
	.p2 = { .dot_limit = 112000,
383
		.p2_slow = 14, .p2_fast = 14 },
384
};
385
 
386
/* Ironlake / Sandybridge
387
 *
388
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
389
 * the range value for them is (actual_value - 2).
390
 */
391
static const intel_limit_t intel_limits_ironlake_dac = {
392
	.dot = { .min = 25000, .max = 350000 },
393
	.vco = { .min = 1760000, .max = 3510000 },
394
	.n = { .min = 1, .max = 5 },
395
	.m = { .min = 79, .max = 127 },
396
	.m1 = { .min = 12, .max = 22 },
397
	.m2 = { .min = 5, .max = 9 },
398
	.p = { .min = 5, .max = 80 },
399
	.p1 = { .min = 1, .max = 8 },
400
	.p2 = { .dot_limit = 225000,
401
		.p2_slow = 10, .p2_fast = 5 },
402
};
403
 
404
static const intel_limit_t intel_limits_ironlake_single_lvds = {
405
	.dot = { .min = 25000, .max = 350000 },
406
	.vco = { .min = 1760000, .max = 3510000 },
407
	.n = { .min = 1, .max = 3 },
408
	.m = { .min = 79, .max = 118 },
409
	.m1 = { .min = 12, .max = 22 },
410
	.m2 = { .min = 5, .max = 9 },
411
	.p = { .min = 28, .max = 112 },
412
	.p1 = { .min = 2, .max = 8 },
413
	.p2 = { .dot_limit = 225000,
414
		.p2_slow = 14, .p2_fast = 14 },
415
};
416
 
417
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
418
	.dot = { .min = 25000, .max = 350000 },
419
	.vco = { .min = 1760000, .max = 3510000 },
420
	.n = { .min = 1, .max = 3 },
421
	.m = { .min = 79, .max = 127 },
422
	.m1 = { .min = 12, .max = 22 },
423
	.m2 = { .min = 5, .max = 9 },
424
	.p = { .min = 14, .max = 56 },
425
	.p1 = { .min = 2, .max = 8 },
426
	.p2 = { .dot_limit = 225000,
427
		.p2_slow = 7, .p2_fast = 7 },
428
};
429
 
430
/* LVDS 100mhz refclk limits. */
431
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
432
	.dot = { .min = 25000, .max = 350000 },
433
	.vco = { .min = 1760000, .max = 3510000 },
434
	.n = { .min = 1, .max = 2 },
435
	.m = { .min = 79, .max = 126 },
436
	.m1 = { .min = 12, .max = 22 },
437
	.m2 = { .min = 5, .max = 9 },
438
	.p = { .min = 28, .max = 112 },
2342 Serge 439
	.p1 = { .min = 2, .max = 8 },
2327 Serge 440
	.p2 = { .dot_limit = 225000,
441
		.p2_slow = 14, .p2_fast = 14 },
442
};
443
 
444
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
445
	.dot = { .min = 25000, .max = 350000 },
446
	.vco = { .min = 1760000, .max = 3510000 },
447
	.n = { .min = 1, .max = 3 },
448
	.m = { .min = 79, .max = 126 },
449
	.m1 = { .min = 12, .max = 22 },
450
	.m2 = { .min = 5, .max = 9 },
451
	.p = { .min = 14, .max = 42 },
2342 Serge 452
	.p1 = { .min = 2, .max = 6 },
2327 Serge 453
	.p2 = { .dot_limit = 225000,
454
		.p2_slow = 7, .p2_fast = 7 },
455
};
456
 
4560 Serge 457
static const intel_limit_t intel_limits_vlv = {
458
	 /*
459
	  * These are the data rate limits (measured in fast clocks)
460
	  * since those are the strictest limits we have. The fast
461
	  * clock and actual rate limits are more relaxed, so checking
462
	  * them would make no difference.
463
	  */
464
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 465
	.vco = { .min = 4000000, .max = 6000000 },
466
	.n = { .min = 1, .max = 7 },
467
	.m1 = { .min = 2, .max = 3 },
468
	.m2 = { .min = 11, .max = 156 },
469
	.p1 = { .min = 2, .max = 3 },
4560 Serge 470
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 471
};
472
 
5060 serge 473
static const intel_limit_t intel_limits_chv = {
474
	/*
475
	 * These are the data rate limits (measured in fast clocks)
476
	 * since those are the strictest limits we have.  The fast
477
	 * clock and actual rate limits are more relaxed, so checking
478
	 * them would make no difference.
479
	 */
480
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
6084 serge 481
	.vco = { .min = 4800000, .max = 6480000 },
5060 serge 482
	.n = { .min = 1, .max = 1 },
483
	.m1 = { .min = 2, .max = 2 },
484
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
485
	.p1 = { .min = 2, .max = 4 },
486
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
487
};
488
 
6084 serge 489
static const intel_limit_t intel_limits_bxt = {
490
	/* FIXME: find real dot limits */
491
	.dot = { .min = 0, .max = INT_MAX },
492
	.vco = { .min = 4800000, .max = 6700000 },
493
	.n = { .min = 1, .max = 1 },
494
	.m1 = { .min = 2, .max = 2 },
495
	/* FIXME: find real m2 limits */
496
	.m2 = { .min = 2 << 22, .max = 255 << 22 },
497
	.p1 = { .min = 2, .max = 4 },
498
	.p2 = { .p2_slow = 1, .p2_fast = 20 },
499
};
500
 
501
static bool
502
needs_modeset(struct drm_crtc_state *state)
4560 Serge 503
{
6084 serge 504
	return drm_atomic_crtc_needs_modeset(state);
4560 Serge 505
}
3031 serge 506
 
4560 Serge 507
/**
508
 * Returns whether any output on the specified pipe is of the specified type
509
 */
5354 serge 510
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
4560 Serge 511
{
5354 serge 512
	struct drm_device *dev = crtc->base.dev;
4560 Serge 513
	struct intel_encoder *encoder;
514
 
5354 serge 515
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4560 Serge 516
		if (encoder->type == type)
517
			return true;
518
 
519
	return false;
520
}
521
 
5354 serge 522
/**
523
 * Returns whether any output on the specified pipe will have the specified
524
 * type after a staged modeset is complete, i.e., the same as
525
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
526
 * encoder->crtc.
527
 */
6084 serge 528
static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
529
				      int type)
5354 serge 530
{
6084 serge 531
	struct drm_atomic_state *state = crtc_state->base.state;
532
	struct drm_connector *connector;
533
	struct drm_connector_state *connector_state;
5354 serge 534
	struct intel_encoder *encoder;
6084 serge 535
	int i, num_connectors = 0;
5354 serge 536
 
6084 serge 537
	for_each_connector_in_state(state, connector, connector_state, i) {
538
		if (connector_state->crtc != crtc_state->base.crtc)
539
			continue;
540
 
541
		num_connectors++;
542
 
543
		encoder = to_intel_encoder(connector_state->best_encoder);
544
		if (encoder->type == type)
5354 serge 545
			return true;
6084 serge 546
	}
5354 serge 547
 
6084 serge 548
	WARN_ON(num_connectors == 0);
549
 
5354 serge 550
	return false;
551
}
552
 
6084 serge 553
static const intel_limit_t *
554
intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
2327 Serge 555
{
6084 serge 556
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 557
	const intel_limit_t *limit;
558
 
6084 serge 559
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3480 Serge 560
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 561
			if (refclk == 100000)
562
				limit = &intel_limits_ironlake_dual_lvds_100m;
563
			else
564
				limit = &intel_limits_ironlake_dual_lvds;
565
		} else {
566
			if (refclk == 100000)
567
				limit = &intel_limits_ironlake_single_lvds_100m;
568
			else
569
				limit = &intel_limits_ironlake_single_lvds;
570
		}
4104 Serge 571
	} else
2327 Serge 572
		limit = &intel_limits_ironlake_dac;
573
 
574
	return limit;
575
}
576
 
6084 serge 577
static const intel_limit_t *
578
intel_g4x_limit(struct intel_crtc_state *crtc_state)
2327 Serge 579
{
6084 serge 580
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 581
	const intel_limit_t *limit;
582
 
6084 serge 583
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3480 Serge 584
		if (intel_is_dual_link_lvds(dev))
2327 Serge 585
			limit = &intel_limits_g4x_dual_channel_lvds;
586
		else
587
			limit = &intel_limits_g4x_single_channel_lvds;
6084 serge 588
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
589
		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
2327 Serge 590
		limit = &intel_limits_g4x_hdmi;
6084 serge 591
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
2327 Serge 592
		limit = &intel_limits_g4x_sdvo;
593
	} else /* The option is for other outputs */
594
		limit = &intel_limits_i9xx_sdvo;
595
 
596
	return limit;
597
}
598
 
6084 serge 599
static const intel_limit_t *
600
intel_limit(struct intel_crtc_state *crtc_state, int refclk)
2327 Serge 601
{
6084 serge 602
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 603
	const intel_limit_t *limit;
604
 
6084 serge 605
	if (IS_BROXTON(dev))
606
		limit = &intel_limits_bxt;
607
	else if (HAS_PCH_SPLIT(dev))
608
		limit = intel_ironlake_limit(crtc_state, refclk);
2327 Serge 609
	else if (IS_G4X(dev)) {
6084 serge 610
		limit = intel_g4x_limit(crtc_state);
2327 Serge 611
	} else if (IS_PINEVIEW(dev)) {
6084 serge 612
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 613
			limit = &intel_limits_pineview_lvds;
614
		else
615
			limit = &intel_limits_pineview_sdvo;
5060 serge 616
	} else if (IS_CHERRYVIEW(dev)) {
617
		limit = &intel_limits_chv;
3031 serge 618
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 619
		limit = &intel_limits_vlv;
2327 Serge 620
	} else if (!IS_GEN2(dev)) {
6084 serge 621
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 622
			limit = &intel_limits_i9xx_lvds;
623
		else
624
			limit = &intel_limits_i9xx_sdvo;
625
	} else {
6084 serge 626
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 627
			limit = &intel_limits_i8xx_lvds;
6084 serge 628
		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4104 Serge 629
			limit = &intel_limits_i8xx_dvo;
2327 Serge 630
		else
4104 Serge 631
			limit = &intel_limits_i8xx_dac;
2327 Serge 632
	}
633
	return limit;
634
}
635
 
6084 serge 636
/*
637
 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
638
 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
639
 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
640
 * The helpers' return value is the rate of the clock that is fed to the
641
 * display engine's pipe which can be the above fast dot clock rate or a
642
 * divided-down version of it.
643
 */
2327 Serge 644
/* m1 is reserved as 0 in Pineview, n is a ring counter */
6084 serge 645
static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
2327 Serge 646
{
647
	clock->m = clock->m2 + 2;
648
	clock->p = clock->p1 * clock->p2;
4560 Serge 649
	if (WARN_ON(clock->n == 0 || clock->p == 0))
6084 serge 650
		return 0;
4560 Serge 651
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
652
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 653
 
654
	return clock->dot;
2327 Serge 655
}
656
 
4104 Serge 657
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 658
{
4104 Serge 659
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
660
}
661
 
6084 serge 662
static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
4104 Serge 663
{
664
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 665
	clock->p = clock->p1 * clock->p2;
4560 Serge 666
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
6084 serge 667
		return 0;
4560 Serge 668
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
669
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 670
 
671
	return clock->dot;
2327 Serge 672
}
673
 
6084 serge 674
static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
5060 serge 675
{
676
	clock->m = clock->m1 * clock->m2;
677
	clock->p = clock->p1 * clock->p2;
678
	if (WARN_ON(clock->n == 0 || clock->p == 0))
6084 serge 679
		return 0;
680
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
681
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
682
 
683
	return clock->dot / 5;
684
}
685
 
686
int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
687
{
688
	clock->m = clock->m1 * clock->m2;
689
	clock->p = clock->p1 * clock->p2;
690
	if (WARN_ON(clock->n == 0 || clock->p == 0))
691
		return 0;
5060 serge 692
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
693
			clock->n << 22);
694
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 695
 
696
	return clock->dot / 5;
5060 serge 697
}
698
 
2327 Serge 699
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
700
/**
701
 * Returns whether the given set of divisors are valid for a given refclk with
702
 * the given connectors.
703
 */
704
 
705
static bool intel_PLL_is_valid(struct drm_device *dev,
706
			       const intel_limit_t *limit,
707
			       const intel_clock_t *clock)
708
{
4560 Serge 709
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
710
		INTELPllInvalid("n out of range\n");
2327 Serge 711
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 712
		INTELPllInvalid("p1 out of range\n");
2327 Serge 713
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 714
		INTELPllInvalid("m2 out of range\n");
2327 Serge 715
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 716
		INTELPllInvalid("m1 out of range\n");
4560 Serge 717
 
6937 serge 718
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
719
	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
4560 Serge 720
		if (clock->m1 <= clock->m2)
6084 serge 721
			INTELPllInvalid("m1 <= m2\n");
4560 Serge 722
 
6937 serge 723
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
4560 Serge 724
		if (clock->p < limit->p.min || limit->p.max < clock->p)
725
			INTELPllInvalid("p out of range\n");
6084 serge 726
		if (clock->m < limit->m.min || limit->m.max < clock->m)
727
			INTELPllInvalid("m out of range\n");
4560 Serge 728
	}
729
 
2327 Serge 730
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 731
		INTELPllInvalid("vco out of range\n");
2327 Serge 732
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
733
	 * connector, etc., rather than just a single range.
734
	 */
735
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 736
		INTELPllInvalid("dot out of range\n");
2327 Serge 737
 
738
	return true;
739
}
740
 
6084 serge 741
static int
742
i9xx_select_p2_div(const intel_limit_t *limit,
743
		   const struct intel_crtc_state *crtc_state,
744
		   int target)
2327 Serge 745
{
6084 serge 746
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 747
 
6084 serge 748
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
2327 Serge 749
		/*
3480 Serge 750
		 * For LVDS just rely on its current settings for dual-channel.
751
		 * We haven't figured out how to reliably set up different
752
		 * single/dual channel state, if we even can.
2327 Serge 753
		 */
3480 Serge 754
		if (intel_is_dual_link_lvds(dev))
6084 serge 755
			return limit->p2.p2_fast;
2327 Serge 756
		else
6084 serge 757
			return limit->p2.p2_slow;
2327 Serge 758
	} else {
759
		if (target < limit->p2.dot_limit)
6084 serge 760
			return limit->p2.p2_slow;
2327 Serge 761
		else
6084 serge 762
			return limit->p2.p2_fast;
2327 Serge 763
	}
6084 serge 764
}
2327 Serge 765
 
6084 serge 766
static bool
767
i9xx_find_best_dpll(const intel_limit_t *limit,
768
		    struct intel_crtc_state *crtc_state,
769
		    int target, int refclk, intel_clock_t *match_clock,
770
		    intel_clock_t *best_clock)
771
{
772
	struct drm_device *dev = crtc_state->base.crtc->dev;
773
	intel_clock_t clock;
774
	int err = target;
775
 
2342 Serge 776
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 777
 
6084 serge 778
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
779
 
2327 Serge 780
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
781
	     clock.m1++) {
782
		for (clock.m2 = limit->m2.min;
783
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 784
			if (clock.m2 >= clock.m1)
2327 Serge 785
				break;
786
			for (clock.n = limit->n.min;
787
			     clock.n <= limit->n.max; clock.n++) {
788
				for (clock.p1 = limit->p1.min;
789
					clock.p1 <= limit->p1.max; clock.p1++) {
790
					int this_err;
791
 
6084 serge 792
					i9xx_calc_dpll_params(refclk, &clock);
2327 Serge 793
					if (!intel_PLL_is_valid(dev, limit,
794
								&clock))
795
						continue;
3031 serge 796
					if (match_clock &&
797
					    clock.p != match_clock->p)
798
						continue;
2327 Serge 799
 
800
					this_err = abs(clock.dot - target);
801
					if (this_err < err) {
802
						*best_clock = clock;
803
						err = this_err;
804
					}
805
				}
806
			}
807
		}
808
	}
809
 
810
	return (err != target);
811
}
812
 
813
static bool
6084 serge 814
pnv_find_best_dpll(const intel_limit_t *limit,
815
		   struct intel_crtc_state *crtc_state,
4104 Serge 816
		   int target, int refclk, intel_clock_t *match_clock,
817
		   intel_clock_t *best_clock)
818
{
6084 serge 819
	struct drm_device *dev = crtc_state->base.crtc->dev;
4104 Serge 820
	intel_clock_t clock;
821
	int err = target;
822
 
823
	memset(best_clock, 0, sizeof(*best_clock));
824
 
6084 serge 825
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
826
 
4104 Serge 827
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
828
	     clock.m1++) {
829
		for (clock.m2 = limit->m2.min;
830
		     clock.m2 <= limit->m2.max; clock.m2++) {
831
			for (clock.n = limit->n.min;
832
			     clock.n <= limit->n.max; clock.n++) {
833
				for (clock.p1 = limit->p1.min;
834
					clock.p1 <= limit->p1.max; clock.p1++) {
835
					int this_err;
836
 
6084 serge 837
					pnv_calc_dpll_params(refclk, &clock);
4104 Serge 838
					if (!intel_PLL_is_valid(dev, limit,
839
								&clock))
840
						continue;
841
					if (match_clock &&
842
					    clock.p != match_clock->p)
843
						continue;
844
 
845
					this_err = abs(clock.dot - target);
846
					if (this_err < err) {
847
						*best_clock = clock;
848
						err = this_err;
849
					}
850
				}
851
			}
852
		}
853
	}
854
 
855
	return (err != target);
856
}
857
 
858
static bool
6084 serge 859
g4x_find_best_dpll(const intel_limit_t *limit,
860
		   struct intel_crtc_state *crtc_state,
861
		   int target, int refclk, intel_clock_t *match_clock,
862
		   intel_clock_t *best_clock)
2327 Serge 863
{
6084 serge 864
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 865
	intel_clock_t clock;
866
	int max_n;
6084 serge 867
	bool found = false;
2327 Serge 868
	/* approximately equals target * 0.00585 */
869
	int err_most = (target >> 8) + (target >> 9);
870
 
6084 serge 871
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 872
 
6084 serge 873
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
874
 
2327 Serge 875
	max_n = limit->n.max;
876
	/* based on hardware requirement, prefer smaller n to precision */
877
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
878
		/* based on hardware requirement, prefere larger m1,m2 */
879
		for (clock.m1 = limit->m1.max;
880
		     clock.m1 >= limit->m1.min; clock.m1--) {
881
			for (clock.m2 = limit->m2.max;
882
			     clock.m2 >= limit->m2.min; clock.m2--) {
883
				for (clock.p1 = limit->p1.max;
884
				     clock.p1 >= limit->p1.min; clock.p1--) {
885
					int this_err;
886
 
6084 serge 887
					i9xx_calc_dpll_params(refclk, &clock);
2327 Serge 888
					if (!intel_PLL_is_valid(dev, limit,
889
								&clock))
890
						continue;
891
 
892
					this_err = abs(clock.dot - target);
893
					if (this_err < err_most) {
894
						*best_clock = clock;
895
						err_most = this_err;
896
						max_n = clock.n;
897
						found = true;
898
					}
899
				}
900
			}
901
		}
902
	}
903
	return found;
904
}
905
 
6084 serge 906
/*
907
 * Check if the calculated PLL configuration is more optimal compared to the
908
 * best configuration and error found so far. Return the calculated error.
909
 */
910
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
911
			       const intel_clock_t *calculated_clock,
912
			       const intel_clock_t *best_clock,
913
			       unsigned int best_error_ppm,
914
			       unsigned int *error_ppm)
915
{
916
	/*
917
	 * For CHV ignore the error and consider only the P value.
918
	 * Prefer a bigger P value based on HW requirements.
919
	 */
920
	if (IS_CHERRYVIEW(dev)) {
921
		*error_ppm = 0;
922
 
923
		return calculated_clock->p > best_clock->p;
924
	}
925
 
926
	if (WARN_ON_ONCE(!target_freq))
927
		return false;
928
 
929
	*error_ppm = div_u64(1000000ULL *
930
				abs(target_freq - calculated_clock->dot),
931
			     target_freq);
932
	/*
933
	 * Prefer a better P value over a better (smaller) error if the error
934
	 * is small. Ensure this preference for future configurations too by
935
	 * setting the error to 0.
936
	 */
937
	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
938
		*error_ppm = 0;
939
 
940
		return true;
941
	}
942
 
943
	return *error_ppm + 10 < best_error_ppm;
944
}
945
 
2327 Serge 946
static bool
6084 serge 947
vlv_find_best_dpll(const intel_limit_t *limit,
948
		   struct intel_crtc_state *crtc_state,
949
		   int target, int refclk, intel_clock_t *match_clock,
950
		   intel_clock_t *best_clock)
3031 serge 951
{
6084 serge 952
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5354 serge 953
	struct drm_device *dev = crtc->base.dev;
4560 Serge 954
	intel_clock_t clock;
955
	unsigned int bestppm = 1000000;
956
	/* min update 19.2 MHz */
957
	int max_n = min(limit->n.max, refclk / 19200);
958
	bool found = false;
2327 Serge 959
 
4560 Serge 960
	target *= 5; /* fast clock */
3031 serge 961
 
4560 Serge 962
	memset(best_clock, 0, sizeof(*best_clock));
963
 
3031 serge 964
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 965
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
966
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
967
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
968
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
969
				clock.p = clock.p1 * clock.p2;
3031 serge 970
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 971
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
6084 serge 972
					unsigned int ppm;
4560 Serge 973
 
974
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
975
								     refclk * clock.m1);
976
 
6084 serge 977
					vlv_calc_dpll_params(refclk, &clock);
4560 Serge 978
 
979
					if (!intel_PLL_is_valid(dev, limit,
980
								&clock))
981
						continue;
982
 
6084 serge 983
					if (!vlv_PLL_is_optimal(dev, target,
984
								&clock,
985
								best_clock,
986
								bestppm, &ppm))
987
						continue;
4560 Serge 988
 
6084 serge 989
					*best_clock = clock;
990
					bestppm = ppm;
991
					found = true;
3031 serge 992
				}
993
			}
6084 serge 994
		}
995
	}
3031 serge 996
 
4560 Serge 997
	return found;
3031 serge 998
}
999
 
5060 serge 1000
static bool
6084 serge 1001
chv_find_best_dpll(const intel_limit_t *limit,
1002
		   struct intel_crtc_state *crtc_state,
5060 serge 1003
		   int target, int refclk, intel_clock_t *match_clock,
1004
		   intel_clock_t *best_clock)
1005
{
6084 serge 1006
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5354 serge 1007
	struct drm_device *dev = crtc->base.dev;
6084 serge 1008
	unsigned int best_error_ppm;
5060 serge 1009
	intel_clock_t clock;
1010
	uint64_t m2;
1011
	int found = false;
1012
 
1013
	memset(best_clock, 0, sizeof(*best_clock));
6084 serge 1014
	best_error_ppm = 1000000;
5060 serge 1015
 
1016
	/*
1017
	 * Based on hardware doc, the n always set to 1, and m1 always
1018
	 * set to 2.  If requires to support 200Mhz refclk, we need to
1019
	 * revisit this because n may not 1 anymore.
1020
	 */
1021
	clock.n = 1, clock.m1 = 2;
1022
	target *= 5;	/* fast clock */
1023
 
1024
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1025
		for (clock.p2 = limit->p2.p2_fast;
1026
				clock.p2 >= limit->p2.p2_slow;
1027
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6084 serge 1028
			unsigned int error_ppm;
5060 serge 1029
 
1030
			clock.p = clock.p1 * clock.p2;
1031
 
1032
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1033
					clock.n) << 22, refclk * clock.m1);
1034
 
1035
			if (m2 > INT_MAX/clock.m1)
1036
				continue;
1037
 
1038
			clock.m2 = m2;
1039
 
6084 serge 1040
			chv_calc_dpll_params(refclk, &clock);
5060 serge 1041
 
1042
			if (!intel_PLL_is_valid(dev, limit, &clock))
1043
				continue;
1044
 
6084 serge 1045
			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1046
						best_error_ppm, &error_ppm))
1047
				continue;
1048
 
1049
			*best_clock = clock;
1050
			best_error_ppm = error_ppm;
1051
			found = true;
5060 serge 1052
		}
1053
	}
1054
 
1055
	return found;
1056
}
1057
 
6084 serge 1058
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1059
			intel_clock_t *best_clock)
1060
{
1061
	int refclk = i9xx_get_refclk(crtc_state, 0);
1062
 
1063
	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1064
				  target_clock, refclk, NULL, best_clock);
1065
}
1066
 
4560 Serge 1067
bool intel_crtc_active(struct drm_crtc *crtc)
1068
{
1069
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1070
 
1071
	/* Be paranoid as we can arrive here with only partial
1072
	 * state retrieved from the hardware during setup.
1073
	 *
1074
	 * We can ditch the adjusted_mode.crtc_clock check as soon
1075
	 * as Haswell has gained clock readout/fastboot support.
1076
	 *
5060 serge 1077
	 * We can ditch the crtc->primary->fb check as soon as we can
4560 Serge 1078
	 * properly reconstruct framebuffers.
6084 serge 1079
	 *
1080
	 * FIXME: The intel_crtc->active here should be switched to
1081
	 * crtc->state->active once we have proper CRTC states wired up
1082
	 * for atomic.
4560 Serge 1083
	 */
6084 serge 1084
	return intel_crtc->active && crtc->primary->state->fb &&
1085
		intel_crtc->config->base.adjusted_mode.crtc_clock;
4560 Serge 1086
}
1087
 
3243 Serge 1088
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1089
					     enum pipe pipe)
1090
{
1091
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1092
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1093
 
6084 serge 1094
	return intel_crtc->config->cpu_transcoder;
3243 Serge 1095
}
1096
 
4560 Serge 1097
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1098
{
1099
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1100
	i915_reg_t reg = PIPEDSL(pipe);
4560 Serge 1101
	u32 line1, line2;
1102
	u32 line_mask;
1103
 
1104
	if (IS_GEN2(dev))
1105
		line_mask = DSL_LINEMASK_GEN2;
1106
	else
1107
		line_mask = DSL_LINEMASK_GEN3;
1108
 
1109
	line1 = I915_READ(reg) & line_mask;
6084 serge 1110
	msleep(5);
4560 Serge 1111
	line2 = I915_READ(reg) & line_mask;
1112
 
1113
	return line1 == line2;
1114
}
1115
 
2327 Serge 1116
/*
1117
 * intel_wait_for_pipe_off - wait for pipe to turn off
5354 serge 1118
 * @crtc: crtc whose pipe to wait for
2327 Serge 1119
 *
1120
 * After disabling a pipe, we can't wait for vblank in the usual way,
1121
 * spinning on the vblank interrupt status bit, since we won't actually
1122
 * see an interrupt when the pipe is disabled.
1123
 *
1124
 * On Gen4 and above:
1125
 *   wait for the pipe register state bit to turn off
1126
 *
1127
 * Otherwise:
1128
 *   wait for the display line value to settle (it usually
1129
 *   ends up stopping at the start of the next frame).
1130
 *
1131
 */
5354 serge 1132
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
2327 Serge 1133
{
5354 serge 1134
	struct drm_device *dev = crtc->base.dev;
2327 Serge 1135
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1136
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
5354 serge 1137
	enum pipe pipe = crtc->pipe;
2327 Serge 1138
 
1139
	if (INTEL_INFO(dev)->gen >= 4) {
6937 serge 1140
		i915_reg_t reg = PIPECONF(cpu_transcoder);
2327 Serge 1141
 
1142
		/* Wait for the Pipe State to go off */
1143
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1144
			     100))
3031 serge 1145
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1146
	} else {
1147
		/* Wait for the display line to settle */
4560 Serge 1148
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 1149
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1150
	}
1151
}
1152
 
1153
/* Only for pre-ILK configs */
4104 Serge 1154
void assert_pll(struct drm_i915_private *dev_priv,
6084 serge 1155
		enum pipe pipe, bool state)
2327 Serge 1156
{
1157
	u32 val;
1158
	bool cur_state;
1159
 
6084 serge 1160
	val = I915_READ(DPLL(pipe));
2327 Serge 1161
	cur_state = !!(val & DPLL_VCO_ENABLE);
6084 serge 1162
	I915_STATE_WARN(cur_state != state,
2327 Serge 1163
	     "PLL state assertion failure (expected %s, current %s)\n",
7144 serge 1164
			onoff(state), onoff(cur_state));
2327 Serge 1165
}
1166
 
4560 Serge 1167
/* XXX: the dsi pll is shared between MIPI DSI ports */
1168
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1169
{
1170
	u32 val;
1171
	bool cur_state;
1172
 
6084 serge 1173
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 1174
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
6084 serge 1175
	mutex_unlock(&dev_priv->sb_lock);
4560 Serge 1176
 
1177
	cur_state = val & DSI_PLL_VCO_EN;
6084 serge 1178
	I915_STATE_WARN(cur_state != state,
4560 Serge 1179
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
7144 serge 1180
			onoff(state), onoff(cur_state));
4560 Serge 1181
}
1182
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1183
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1184
 
4104 Serge 1185
struct intel_shared_dpll *
1186
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1187
{
1188
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1189
 
6084 serge 1190
	if (crtc->config->shared_dpll < 0)
4104 Serge 1191
		return NULL;
1192
 
6084 serge 1193
	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
4104 Serge 1194
}
1195
 
2327 Serge 1196
/* For ILK+ */
4104 Serge 1197
void assert_shared_dpll(struct drm_i915_private *dev_priv,
6084 serge 1198
			struct intel_shared_dpll *pll,
1199
			bool state)
2327 Serge 1200
{
1201
	bool cur_state;
4104 Serge 1202
	struct intel_dpll_hw_state hw_state;
2327 Serge 1203
 
7144 serge 1204
	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
3031 serge 1205
		return;
2342 Serge 1206
 
4104 Serge 1207
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
6084 serge 1208
	I915_STATE_WARN(cur_state != state,
4104 Serge 1209
	     "%s assertion failure (expected %s, current %s)\n",
7144 serge 1210
			pll->name, onoff(state), onoff(cur_state));
2327 Serge 1211
}
1212
 
1213
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1214
			  enum pipe pipe, bool state)
1215
{
1216
	bool cur_state;
3243 Serge 1217
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1218
								      pipe);
2327 Serge 1219
 
3480 Serge 1220
	if (HAS_DDI(dev_priv->dev)) {
1221
		/* DDI does not have a specific FDI_TX register */
6084 serge 1222
		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
3243 Serge 1223
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1224
	} else {
6084 serge 1225
		u32 val = I915_READ(FDI_TX_CTL(pipe));
1226
		cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1227
	}
6084 serge 1228
	I915_STATE_WARN(cur_state != state,
2327 Serge 1229
	     "FDI TX state assertion failure (expected %s, current %s)\n",
7144 serge 1230
			onoff(state), onoff(cur_state));
2327 Serge 1231
}
1232
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1233
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1234
 
1235
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1236
			  enum pipe pipe, bool state)
1237
{
1238
	u32 val;
1239
	bool cur_state;
1240
 
6084 serge 1241
	val = I915_READ(FDI_RX_CTL(pipe));
2327 Serge 1242
	cur_state = !!(val & FDI_RX_ENABLE);
6084 serge 1243
	I915_STATE_WARN(cur_state != state,
2327 Serge 1244
	     "FDI RX state assertion failure (expected %s, current %s)\n",
7144 serge 1245
			onoff(state), onoff(cur_state));
2327 Serge 1246
}
1247
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1248
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1249
 
1250
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1251
				      enum pipe pipe)
1252
{
1253
	u32 val;
1254
 
1255
	/* ILK FDI PLL is always enabled */
5060 serge 1256
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
2327 Serge 1257
		return;
1258
 
3031 serge 1259
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1260
	if (HAS_DDI(dev_priv->dev))
3031 serge 1261
		return;
1262
 
6084 serge 1263
	val = I915_READ(FDI_TX_CTL(pipe));
1264
	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
2327 Serge 1265
}
1266
 
4104 Serge 1267
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1268
		       enum pipe pipe, bool state)
2327 Serge 1269
{
1270
	u32 val;
4104 Serge 1271
	bool cur_state;
2327 Serge 1272
 
6084 serge 1273
	val = I915_READ(FDI_RX_CTL(pipe));
4104 Serge 1274
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
6084 serge 1275
	I915_STATE_WARN(cur_state != state,
4104 Serge 1276
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
7144 serge 1277
			onoff(state), onoff(cur_state));
2327 Serge 1278
}
1279
 
5354 serge 1280
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
6084 serge 1281
			   enum pipe pipe)
2327 Serge 1282
{
5354 serge 1283
	struct drm_device *dev = dev_priv->dev;
6937 serge 1284
	i915_reg_t pp_reg;
2327 Serge 1285
	u32 val;
1286
	enum pipe panel_pipe = PIPE_A;
1287
	bool locked = true;
1288
 
5354 serge 1289
	if (WARN_ON(HAS_DDI(dev)))
1290
		return;
1291
 
1292
	if (HAS_PCH_SPLIT(dev)) {
1293
		u32 port_sel;
1294
 
2327 Serge 1295
		pp_reg = PCH_PP_CONTROL;
5354 serge 1296
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1297
 
1298
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1299
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1300
			panel_pipe = PIPE_B;
1301
		/* XXX: else fix for eDP */
6937 serge 1302
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 1303
		/* presumably write lock depends on pipe, not port select */
1304
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1305
		panel_pipe = pipe;
2327 Serge 1306
	} else {
1307
		pp_reg = PP_CONTROL;
5354 serge 1308
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1309
			panel_pipe = PIPE_B;
2327 Serge 1310
	}
1311
 
1312
	val = I915_READ(pp_reg);
1313
	if (!(val & PANEL_POWER_ON) ||
5354 serge 1314
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
2327 Serge 1315
		locked = false;
1316
 
6084 serge 1317
	I915_STATE_WARN(panel_pipe == pipe && locked,
2327 Serge 1318
	     "panel assertion failure, pipe %c regs locked\n",
1319
	     pipe_name(pipe));
1320
}
1321
 
4560 Serge 1322
static void assert_cursor(struct drm_i915_private *dev_priv,
1323
			  enum pipe pipe, bool state)
1324
{
1325
	struct drm_device *dev = dev_priv->dev;
1326
	bool cur_state;
1327
 
5060 serge 1328
	if (IS_845G(dev) || IS_I865G(dev))
6084 serge 1329
		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
4560 Serge 1330
	else
1331
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1332
 
6084 serge 1333
	I915_STATE_WARN(cur_state != state,
4560 Serge 1334
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
7144 serge 1335
			pipe_name(pipe), onoff(state), onoff(cur_state));
4560 Serge 1336
}
1337
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1338
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1339
 
2342 Serge 1340
void assert_pipe(struct drm_i915_private *dev_priv,
6084 serge 1341
		 enum pipe pipe, bool state)
2327 Serge 1342
{
1343
	bool cur_state;
3243 Serge 1344
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1345
								      pipe);
6937 serge 1346
	enum intel_display_power_domain power_domain;
2327 Serge 1347
 
5354 serge 1348
	/* if we need the pipe quirk it must be always on */
1349
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1350
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3031 serge 1351
		state = true;
1352
 
6937 serge 1353
	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1354
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
6084 serge 1355
		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1356
		cur_state = !!(val & PIPECONF_ENABLE);
6937 serge 1357
 
1358
		intel_display_power_put(dev_priv, power_domain);
1359
	} else {
1360
		cur_state = false;
3480 Serge 1361
	}
1362
 
6084 serge 1363
	I915_STATE_WARN(cur_state != state,
2327 Serge 1364
	     "pipe %c assertion failure (expected %s, current %s)\n",
7144 serge 1365
			pipe_name(pipe), onoff(state), onoff(cur_state));
2327 Serge 1366
}
1367
 
3031 serge 1368
static void assert_plane(struct drm_i915_private *dev_priv,
1369
			 enum plane plane, bool state)
2327 Serge 1370
{
1371
	u32 val;
3031 serge 1372
	bool cur_state;
2327 Serge 1373
 
6084 serge 1374
	val = I915_READ(DSPCNTR(plane));
3031 serge 1375
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
6084 serge 1376
	I915_STATE_WARN(cur_state != state,
3031 serge 1377
	     "plane %c assertion failure (expected %s, current %s)\n",
7144 serge 1378
			plane_name(plane), onoff(state), onoff(cur_state));
2327 Serge 1379
}
1380
 
3031 serge 1381
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1382
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1383
 
2327 Serge 1384
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1385
				   enum pipe pipe)
1386
{
4104 Serge 1387
	struct drm_device *dev = dev_priv->dev;
6084 serge 1388
	int i;
2327 Serge 1389
 
4104 Serge 1390
	/* Primary planes are fixed to pipes on gen4+ */
1391
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 1392
		u32 val = I915_READ(DSPCNTR(pipe));
1393
		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
3031 serge 1394
		     "plane %c assertion failure, should be disabled but not\n",
1395
		     plane_name(pipe));
2327 Serge 1396
		return;
3031 serge 1397
	}
2327 Serge 1398
 
1399
	/* Need to check both planes against the pipe */
5354 serge 1400
	for_each_pipe(dev_priv, i) {
6084 serge 1401
		u32 val = I915_READ(DSPCNTR(i));
1402
		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
2327 Serge 1403
			DISPPLANE_SEL_PIPE_SHIFT;
6084 serge 1404
		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
2327 Serge 1405
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1406
		     plane_name(i), pipe_name(pipe));
1407
	}
1408
}
1409
 
3746 Serge 1410
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1411
				    enum pipe pipe)
1412
{
4104 Serge 1413
	struct drm_device *dev = dev_priv->dev;
6084 serge 1414
	int sprite;
3746 Serge 1415
 
5354 serge 1416
	if (INTEL_INFO(dev)->gen >= 9) {
6084 serge 1417
		for_each_sprite(dev_priv, pipe, sprite) {
1418
			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1419
			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
5354 serge 1420
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1421
			     sprite, pipe_name(pipe));
1422
		}
6937 serge 1423
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 1424
		for_each_sprite(dev_priv, pipe, sprite) {
1425
			u32 val = I915_READ(SPCNTR(pipe, sprite));
1426
			I915_STATE_WARN(val & SP_ENABLE,
4104 Serge 1427
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
5060 serge 1428
			     sprite_name(pipe, sprite), pipe_name(pipe));
4104 Serge 1429
		}
1430
	} else if (INTEL_INFO(dev)->gen >= 7) {
6084 serge 1431
		u32 val = I915_READ(SPRCTL(pipe));
1432
		I915_STATE_WARN(val & SPRITE_ENABLE,
4104 Serge 1433
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1434
		     plane_name(pipe), pipe_name(pipe));
1435
	} else if (INTEL_INFO(dev)->gen >= 5) {
6084 serge 1436
		u32 val = I915_READ(DVSCNTR(pipe));
1437
		I915_STATE_WARN(val & DVS_ENABLE,
4104 Serge 1438
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1439
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1440
	}
1441
}
1442
 
5354 serge 1443
static void assert_vblank_disabled(struct drm_crtc *crtc)
1444
{
6084 serge 1445
	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
5354 serge 1446
		drm_crtc_vblank_put(crtc);
1447
}
1448
 
4560 Serge 1449
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1450
{
1451
	u32 val;
1452
	bool enabled;
1453
 
6084 serge 1454
	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1455
 
2327 Serge 1456
	val = I915_READ(PCH_DREF_CONTROL);
1457
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1458
			    DREF_SUPERSPREAD_SOURCE_MASK));
6084 serge 1459
	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
2327 Serge 1460
}
1461
 
4104 Serge 1462
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
6084 serge 1463
					   enum pipe pipe)
2327 Serge 1464
{
1465
	u32 val;
1466
	bool enabled;
1467
 
6084 serge 1468
	val = I915_READ(PCH_TRANSCONF(pipe));
2327 Serge 1469
	enabled = !!(val & TRANS_ENABLE);
6084 serge 1470
	I915_STATE_WARN(enabled,
2327 Serge 1471
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1472
	     pipe_name(pipe));
1473
}
1474
 
1475
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1476
			    enum pipe pipe, u32 port_sel, u32 val)
1477
{
1478
	if ((val & DP_PORT_EN) == 0)
1479
		return false;
1480
 
1481
	if (HAS_PCH_CPT(dev_priv->dev)) {
6937 serge 1482
		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
2327 Serge 1483
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1484
			return false;
5060 serge 1485
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1486
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1487
			return false;
2327 Serge 1488
	} else {
1489
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1490
			return false;
1491
	}
1492
	return true;
1493
}
1494
 
1495
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1496
			      enum pipe pipe, u32 val)
1497
{
3746 Serge 1498
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1499
		return false;
1500
 
1501
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1502
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1503
			return false;
5060 serge 1504
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1505
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1506
			return false;
2327 Serge 1507
	} else {
3746 Serge 1508
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1509
			return false;
1510
	}
1511
	return true;
1512
}
1513
 
1514
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1515
			      enum pipe pipe, u32 val)
1516
{
1517
	if ((val & LVDS_PORT_EN) == 0)
1518
		return false;
1519
 
1520
	if (HAS_PCH_CPT(dev_priv->dev)) {
1521
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1522
			return false;
1523
	} else {
1524
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1525
			return false;
1526
	}
1527
	return true;
1528
}
1529
 
1530
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1531
			      enum pipe pipe, u32 val)
1532
{
1533
	if ((val & ADPA_DAC_ENABLE) == 0)
1534
		return false;
1535
	if (HAS_PCH_CPT(dev_priv->dev)) {
1536
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1537
			return false;
1538
	} else {
1539
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1540
			return false;
1541
	}
1542
	return true;
1543
}
1544
 
1545
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
6937 serge 1546
				   enum pipe pipe, i915_reg_t reg,
1547
				   u32 port_sel)
2327 Serge 1548
{
1549
	u32 val = I915_READ(reg);
6084 serge 1550
	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
2327 Serge 1551
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
6937 serge 1552
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
3031 serge 1553
 
6084 serge 1554
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
3031 serge 1555
	     && (val & DP_PIPEB_SELECT),
1556
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1557
}
1558
 
1559
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
6937 serge 1560
				     enum pipe pipe, i915_reg_t reg)
2327 Serge 1561
{
1562
	u32 val = I915_READ(reg);
6084 serge 1563
	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
3031 serge 1564
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
6937 serge 1565
	     i915_mmio_reg_offset(reg), pipe_name(pipe));
3031 serge 1566
 
6084 serge 1567
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1568
	     && (val & SDVO_PIPE_B_SELECT),
1569
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1570
}
1571
 
1572
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1573
				      enum pipe pipe)
1574
{
1575
	u32 val;
1576
 
1577
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1578
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1579
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1580
 
6084 serge 1581
	val = I915_READ(PCH_ADPA);
1582
	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1583
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1584
	     pipe_name(pipe));
1585
 
6084 serge 1586
	val = I915_READ(PCH_LVDS);
1587
	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1588
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1589
	     pipe_name(pipe));
1590
 
3746 Serge 1591
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1592
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1593
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1594
}
1595
 
5354 serge 1596
static void vlv_enable_pll(struct intel_crtc *crtc,
6084 serge 1597
			   const struct intel_crtc_state *pipe_config)
4560 Serge 1598
{
4104 Serge 1599
	struct drm_device *dev = crtc->base.dev;
1600
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1601
	i915_reg_t reg = DPLL(crtc->pipe);
5354 serge 1602
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2327 Serge 1603
 
4104 Serge 1604
	assert_pipe_disabled(dev_priv, crtc->pipe);
1605
 
6084 serge 1606
	/* PLL is protected by panel, make sure we can write it */
5354 serge 1607
	if (IS_MOBILE(dev_priv->dev))
4104 Serge 1608
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1609
 
4104 Serge 1610
	I915_WRITE(reg, dpll);
1611
	POSTING_READ(reg);
1612
	udelay(150);
2327 Serge 1613
 
4104 Serge 1614
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1615
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1616
 
5354 serge 1617
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
4104 Serge 1618
	POSTING_READ(DPLL_MD(crtc->pipe));
1619
 
1620
	/* We do this three times for luck */
1621
	I915_WRITE(reg, dpll);
1622
	POSTING_READ(reg);
1623
	udelay(150); /* wait for warmup */
1624
	I915_WRITE(reg, dpll);
1625
	POSTING_READ(reg);
1626
	udelay(150); /* wait for warmup */
1627
	I915_WRITE(reg, dpll);
1628
	POSTING_READ(reg);
1629
	udelay(150); /* wait for warmup */
1630
}
1631
 
5354 serge 1632
static void chv_enable_pll(struct intel_crtc *crtc,
6084 serge 1633
			   const struct intel_crtc_state *pipe_config)
5060 serge 1634
{
1635
	struct drm_device *dev = crtc->base.dev;
1636
	struct drm_i915_private *dev_priv = dev->dev_private;
1637
	int pipe = crtc->pipe;
1638
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1639
	u32 tmp;
1640
 
1641
	assert_pipe_disabled(dev_priv, crtc->pipe);
1642
 
6084 serge 1643
	mutex_lock(&dev_priv->sb_lock);
5060 serge 1644
 
1645
	/* Enable back the 10bit clock to display controller */
1646
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1647
	tmp |= DPIO_DCLKP_EN;
1648
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1649
 
6084 serge 1650
	mutex_unlock(&dev_priv->sb_lock);
1651
 
5060 serge 1652
	/*
1653
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1654
	 */
1655
	udelay(1);
1656
 
1657
	/* Enable PLL */
5354 serge 1658
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
5060 serge 1659
 
1660
	/* Check PLL is locked */
1661
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1662
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1663
 
1664
	/* not sure when this should be written */
5354 serge 1665
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
5060 serge 1666
	POSTING_READ(DPLL_MD(pipe));
1667
}
1668
 
5354 serge 1669
static int intel_num_dvo_pipes(struct drm_device *dev)
1670
{
1671
	struct intel_crtc *crtc;
1672
	int count = 0;
1673
 
1674
	for_each_intel_crtc(dev, crtc)
6084 serge 1675
		count += crtc->base.state->active &&
5354 serge 1676
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1677
 
1678
	return count;
1679
}
1680
 
4104 Serge 1681
static void i9xx_enable_pll(struct intel_crtc *crtc)
1682
{
1683
	struct drm_device *dev = crtc->base.dev;
1684
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1685
	i915_reg_t reg = DPLL(crtc->pipe);
6084 serge 1686
	u32 dpll = crtc->config->dpll_hw_state.dpll;
4104 Serge 1687
 
1688
	assert_pipe_disabled(dev_priv, crtc->pipe);
1689
 
1690
	/* No really, not for ILK+ */
5060 serge 1691
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
4104 Serge 1692
 
1693
	/* PLL is protected by panel, make sure we can write it */
1694
	if (IS_MOBILE(dev) && !IS_I830(dev))
1695
		assert_panel_unlocked(dev_priv, crtc->pipe);
1696
 
5354 serge 1697
	/* Enable DVO 2x clock on both PLLs if necessary */
1698
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1699
		/*
1700
		 * It appears to be important that we don't enable this
1701
		 * for the current pipe before otherwise configuring the
1702
		 * PLL. No idea how this should be handled if multiple
1703
		 * DVO outputs are enabled simultaneosly.
1704
		 */
1705
		dpll |= DPLL_DVO_2X_MODE;
1706
		I915_WRITE(DPLL(!crtc->pipe),
1707
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1708
	}
4104 Serge 1709
 
6084 serge 1710
	/*
1711
	 * Apparently we need to have VGA mode enabled prior to changing
1712
	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1713
	 * dividers, even though the register value does change.
1714
	 */
1715
	I915_WRITE(reg, 0);
1716
 
1717
	I915_WRITE(reg, dpll);
1718
 
4104 Serge 1719
	/* Wait for the clocks to stabilize. */
1720
	POSTING_READ(reg);
1721
	udelay(150);
1722
 
1723
	if (INTEL_INFO(dev)->gen >= 4) {
1724
		I915_WRITE(DPLL_MD(crtc->pipe),
6084 serge 1725
			   crtc->config->dpll_hw_state.dpll_md);
4104 Serge 1726
	} else {
1727
		/* The pixel multiplier can only be updated once the
1728
		 * DPLL is enabled and the clocks are stable.
1729
		 *
1730
		 * So write it again.
1731
		 */
1732
		I915_WRITE(reg, dpll);
1733
	}
1734
 
6084 serge 1735
	/* We do this three times for luck */
4104 Serge 1736
	I915_WRITE(reg, dpll);
6084 serge 1737
	POSTING_READ(reg);
1738
	udelay(150); /* wait for warmup */
4104 Serge 1739
	I915_WRITE(reg, dpll);
6084 serge 1740
	POSTING_READ(reg);
1741
	udelay(150); /* wait for warmup */
4104 Serge 1742
	I915_WRITE(reg, dpll);
6084 serge 1743
	POSTING_READ(reg);
1744
	udelay(150); /* wait for warmup */
2327 Serge 1745
}
1746
 
1747
/**
4104 Serge 1748
 * i9xx_disable_pll - disable a PLL
2327 Serge 1749
 * @dev_priv: i915 private structure
1750
 * @pipe: pipe PLL to disable
1751
 *
1752
 * Disable the PLL for @pipe, making sure the pipe is off first.
1753
 *
1754
 * Note!  This is for pre-ILK only.
1755
 */
5354 serge 1756
static void i9xx_disable_pll(struct intel_crtc *crtc)
2327 Serge 1757
{
5354 serge 1758
	struct drm_device *dev = crtc->base.dev;
1759
	struct drm_i915_private *dev_priv = dev->dev_private;
1760
	enum pipe pipe = crtc->pipe;
1761
 
1762
	/* Disable DVO 2x clock on both PLLs if necessary */
1763
	if (IS_I830(dev) &&
1764
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
6084 serge 1765
	    !intel_num_dvo_pipes(dev)) {
5354 serge 1766
		I915_WRITE(DPLL(PIPE_B),
1767
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1768
		I915_WRITE(DPLL(PIPE_A),
1769
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1770
	}
1771
 
1772
	/* Don't disable pipe or pipe PLLs if needed */
1773
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1774
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2327 Serge 1775
		return;
1776
 
1777
	/* Make sure the pipe isn't still relying on us */
1778
	assert_pipe_disabled(dev_priv, pipe);
1779
 
6084 serge 1780
	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
4104 Serge 1781
	POSTING_READ(DPLL(pipe));
2327 Serge 1782
}
1783
 
4539 Serge 1784
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1785
{
6084 serge 1786
	u32 val;
4539 Serge 1787
 
1788
	/* Make sure the pipe isn't still relying on us */
1789
	assert_pipe_disabled(dev_priv, pipe);
1790
 
4560 Serge 1791
	/*
1792
	 * Leave integrated clock source and reference clock enabled for pipe B.
1793
	 * The latter is needed for VGA hotplug / manual detection.
1794
	 */
6084 serge 1795
	val = DPLL_VGA_MODE_DIS;
4539 Serge 1796
	if (pipe == PIPE_B)
6084 serge 1797
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
4539 Serge 1798
	I915_WRITE(DPLL(pipe), val);
1799
	POSTING_READ(DPLL(pipe));
5060 serge 1800
 
4539 Serge 1801
}
1802
 
5060 serge 1803
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1804
{
1805
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1806
	u32 val;
1807
 
1808
	/* Make sure the pipe isn't still relying on us */
1809
	assert_pipe_disabled(dev_priv, pipe);
1810
 
1811
	/* Set PLL en = 0 */
6084 serge 1812
	val = DPLL_SSC_REF_CLK_CHV |
1813
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
5060 serge 1814
	if (pipe != PIPE_A)
1815
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1816
	I915_WRITE(DPLL(pipe), val);
1817
	POSTING_READ(DPLL(pipe));
1818
 
6084 serge 1819
	mutex_lock(&dev_priv->sb_lock);
5060 serge 1820
 
1821
	/* Disable 10bit clock to display controller */
1822
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1823
	val &= ~DPIO_DCLKP_EN;
1824
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1825
 
6084 serge 1826
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 1827
}
1828
 
4560 Serge 1829
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
6084 serge 1830
			 struct intel_digital_port *dport,
1831
			 unsigned int expected_mask)
3031 serge 1832
{
4104 Serge 1833
	u32 port_mask;
6937 serge 1834
	i915_reg_t dpll_reg;
3031 serge 1835
 
4560 Serge 1836
	switch (dport->port) {
1837
	case PORT_B:
4104 Serge 1838
		port_mask = DPLL_PORTB_READY_MASK;
5060 serge 1839
		dpll_reg = DPLL(0);
4560 Serge 1840
		break;
1841
	case PORT_C:
4104 Serge 1842
		port_mask = DPLL_PORTC_READY_MASK;
5060 serge 1843
		dpll_reg = DPLL(0);
6084 serge 1844
		expected_mask <<= 4;
4560 Serge 1845
		break;
5060 serge 1846
	case PORT_D:
1847
		port_mask = DPLL_PORTD_READY_MASK;
1848
		dpll_reg = DPIO_PHY_STATUS;
1849
		break;
4560 Serge 1850
	default:
1851
		BUG();
1852
	}
3243 Serge 1853
 
6084 serge 1854
	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1855
		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1856
		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
3031 serge 1857
}
1858
 
5060 serge 1859
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1860
{
1861
	struct drm_device *dev = crtc->base.dev;
1862
	struct drm_i915_private *dev_priv = dev->dev_private;
1863
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1864
 
1865
	if (WARN_ON(pll == NULL))
1866
		return;
1867
 
5354 serge 1868
	WARN_ON(!pll->config.crtc_mask);
5060 serge 1869
	if (pll->active == 0) {
1870
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1871
		WARN_ON(pll->on);
1872
		assert_shared_dpll_disabled(dev_priv, pll);
1873
 
1874
		pll->mode_set(dev_priv, pll);
1875
	}
1876
}
1877
 
2327 Serge 1878
/**
5060 serge 1879
 * intel_enable_shared_dpll - enable PCH PLL
2327 Serge 1880
 * @dev_priv: i915 private structure
1881
 * @pipe: pipe PLL to enable
1882
 *
1883
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1884
 * drives the transcoder clock.
1885
 */
5060 serge 1886
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1887
{
5060 serge 1888
	struct drm_device *dev = crtc->base.dev;
1889
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1890
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1891
 
4104 Serge 1892
	if (WARN_ON(pll == NULL))
2342 Serge 1893
		return;
1894
 
5354 serge 1895
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1896
		return;
2327 Serge 1897
 
5354 serge 1898
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
4104 Serge 1899
		      pll->name, pll->active, pll->on,
1900
		      crtc->base.base.id);
3031 serge 1901
 
4104 Serge 1902
	if (pll->active++) {
1903
		WARN_ON(!pll->on);
1904
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1905
		return;
1906
	}
4104 Serge 1907
	WARN_ON(pll->on);
3031 serge 1908
 
5060 serge 1909
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1910
 
4104 Serge 1911
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1912
	pll->enable(dev_priv, pll);
3031 serge 1913
	pll->on = true;
2327 Serge 1914
}
1915
 
5354 serge 1916
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1917
{
5060 serge 1918
	struct drm_device *dev = crtc->base.dev;
1919
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1920
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1921
 
1922
	/* PCH only available on ILK+ */
6084 serge 1923
	if (INTEL_INFO(dev)->gen < 5)
1924
		return;
2327 Serge 1925
 
6084 serge 1926
	if (pll == NULL)
3031 serge 1927
		return;
2327 Serge 1928
 
6084 serge 1929
	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1930
		return;
1931
 
4104 Serge 1932
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1933
		      pll->name, pll->active, pll->on,
1934
		      crtc->base.base.id);
2342 Serge 1935
 
3031 serge 1936
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1937
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1938
		return;
1939
	}
2342 Serge 1940
 
4104 Serge 1941
	assert_shared_dpll_enabled(dev_priv, pll);
1942
	WARN_ON(!pll->on);
1943
	if (--pll->active)
2342 Serge 1944
		return;
1945
 
4104 Serge 1946
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1947
	pll->disable(dev_priv, pll);
3031 serge 1948
	pll->on = false;
5060 serge 1949
 
1950
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2327 Serge 1951
}
1952
 
3243 Serge 1953
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
6084 serge 1954
					   enum pipe pipe)
2327 Serge 1955
{
3243 Serge 1956
	struct drm_device *dev = dev_priv->dev;
3031 serge 1957
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1958
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6937 serge 1959
	i915_reg_t reg;
1960
	uint32_t val, pipeconf_val;
2327 Serge 1961
 
1962
	/* PCH only available on ILK+ */
5354 serge 1963
	BUG_ON(!HAS_PCH_SPLIT(dev));
2327 Serge 1964
 
1965
	/* Make sure PCH DPLL is enabled */
4104 Serge 1966
	assert_shared_dpll_enabled(dev_priv,
1967
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1968
 
1969
	/* FDI must be feeding us bits for PCH ports */
1970
	assert_fdi_tx_enabled(dev_priv, pipe);
1971
	assert_fdi_rx_enabled(dev_priv, pipe);
1972
 
3243 Serge 1973
	if (HAS_PCH_CPT(dev)) {
1974
		/* Workaround: Set the timing override bit before enabling the
1975
		 * pch transcoder. */
1976
		reg = TRANS_CHICKEN2(pipe);
1977
		val = I915_READ(reg);
1978
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1979
		I915_WRITE(reg, val);
3031 serge 1980
	}
3243 Serge 1981
 
4104 Serge 1982
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1983
	val = I915_READ(reg);
3031 serge 1984
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1985
 
1986
	if (HAS_PCH_IBX(dev_priv->dev)) {
1987
		/*
6084 serge 1988
		 * Make the BPC in transcoder be consistent with
1989
		 * that in pipeconf reg. For HDMI we must use 8bpc
1990
		 * here for both 8bpc and 12bpc.
2327 Serge 1991
		 */
3480 Serge 1992
		val &= ~PIPECONF_BPC_MASK;
6084 serge 1993
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1994
			val |= PIPECONF_8BPC;
1995
		else
1996
			val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 1997
	}
3031 serge 1998
 
1999
	val &= ~TRANS_INTERLACE_MASK;
2000
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2001
		if (HAS_PCH_IBX(dev_priv->dev) &&
5354 serge 2002
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3031 serge 2003
			val |= TRANS_LEGACY_INTERLACED_ILK;
2004
		else
2005
			val |= TRANS_INTERLACED;
2006
	else
2007
		val |= TRANS_PROGRESSIVE;
2008
 
2327 Serge 2009
	I915_WRITE(reg, val | TRANS_ENABLE);
2010
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 2011
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 2012
}
2013
 
3243 Serge 2014
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2015
				      enum transcoder cpu_transcoder)
2016
{
2017
	u32 val, pipeconf_val;
2018
 
2019
	/* PCH only available on ILK+ */
5354 serge 2020
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
3243 Serge 2021
 
2022
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 2023
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 2024
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2025
 
2026
	/* Workaround: set timing override bit. */
6084 serge 2027
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
3243 Serge 2028
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6084 serge 2029
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
3243 Serge 2030
 
2031
	val = TRANS_ENABLE;
2032
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2033
 
2034
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2035
	    PIPECONF_INTERLACED_ILK)
2036
		val |= TRANS_INTERLACED;
2037
	else
2038
		val |= TRANS_PROGRESSIVE;
2039
 
4104 Serge 2040
	I915_WRITE(LPT_TRANSCONF, val);
2041
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 2042
		DRM_ERROR("Failed to enable PCH transcoder\n");
2043
}
2044
 
2045
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
6084 serge 2046
					    enum pipe pipe)
2327 Serge 2047
{
3243 Serge 2048
	struct drm_device *dev = dev_priv->dev;
6937 serge 2049
	i915_reg_t reg;
2050
	uint32_t val;
2327 Serge 2051
 
2052
	/* FDI relies on the transcoder */
2053
	assert_fdi_tx_disabled(dev_priv, pipe);
2054
	assert_fdi_rx_disabled(dev_priv, pipe);
2055
 
2056
	/* Ports must be off as well */
2057
	assert_pch_ports_disabled(dev_priv, pipe);
2058
 
4104 Serge 2059
	reg = PCH_TRANSCONF(pipe);
2327 Serge 2060
	val = I915_READ(reg);
2061
	val &= ~TRANS_ENABLE;
2062
	I915_WRITE(reg, val);
2063
	/* wait for PCH transcoder off, transcoder state */
2064
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 2065
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 2066
 
6937 serge 2067
	if (HAS_PCH_CPT(dev)) {
3243 Serge 2068
		/* Workaround: Clear the timing override chicken bit again. */
2069
		reg = TRANS_CHICKEN2(pipe);
2070
		val = I915_READ(reg);
2071
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2072
		I915_WRITE(reg, val);
2073
	}
2327 Serge 2074
}
2075
 
3243 Serge 2076
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2077
{
2078
	u32 val;
2079
 
4104 Serge 2080
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 2081
	val &= ~TRANS_ENABLE;
4104 Serge 2082
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 2083
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 2084
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 2085
		DRM_ERROR("Failed to disable PCH transcoder\n");
2086
 
2087
	/* Workaround: clear timing override bit. */
6084 serge 2088
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
3243 Serge 2089
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
6084 serge 2090
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
3243 Serge 2091
}
2092
 
2327 Serge 2093
/**
2094
 * intel_enable_pipe - enable a pipe, asserting requirements
5060 serge 2095
 * @crtc: crtc responsible for the pipe
2327 Serge 2096
 *
5060 serge 2097
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2327 Serge 2098
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2099
 */
5060 serge 2100
static void intel_enable_pipe(struct intel_crtc *crtc)
2327 Serge 2101
{
5060 serge 2102
	struct drm_device *dev = crtc->base.dev;
2103
	struct drm_i915_private *dev_priv = dev->dev_private;
2104
	enum pipe pipe = crtc->pipe;
6937 serge 2105
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3480 Serge 2106
	enum pipe pch_transcoder;
6937 serge 2107
	i915_reg_t reg;
2327 Serge 2108
	u32 val;
2109
 
6084 serge 2110
	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2111
 
4104 Serge 2112
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2113
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 2114
	assert_sprites_disabled(dev_priv, pipe);
2115
 
3480 Serge 2116
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 2117
		pch_transcoder = TRANSCODER_A;
2118
	else
2119
		pch_transcoder = pipe;
2120
 
2327 Serge 2121
	/*
2122
	 * A pipe without a PLL won't actually be able to drive bits from
2123
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2124
	 * need the check.
2125
	 */
6084 serge 2126
	if (HAS_GMCH_DISPLAY(dev_priv->dev))
6937 serge 2127
		if (crtc->config->has_dsi_encoder)
4560 Serge 2128
			assert_dsi_pll_enabled(dev_priv);
2129
		else
6084 serge 2130
			assert_pll_enabled(dev_priv, pipe);
2327 Serge 2131
	else {
6084 serge 2132
		if (crtc->config->has_pch_encoder) {
2327 Serge 2133
			/* if driving the PCH, we need FDI enabled */
3243 Serge 2134
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 2135
			assert_fdi_tx_pll_enabled(dev_priv,
2136
						  (enum pipe) cpu_transcoder);
2327 Serge 2137
		}
2138
		/* FIXME: assert CPU port conditions for SNB+ */
2139
	}
2140
 
3243 Serge 2141
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2142
	val = I915_READ(reg);
5060 serge 2143
	if (val & PIPECONF_ENABLE) {
5354 serge 2144
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2145
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2327 Serge 2146
		return;
5060 serge 2147
	}
2327 Serge 2148
 
2149
	I915_WRITE(reg, val | PIPECONF_ENABLE);
5060 serge 2150
	POSTING_READ(reg);
7144 serge 2151
 
2152
	/*
2153
	 * Until the pipe starts DSL will read as 0, which would cause
2154
	 * an apparent vblank timestamp jump, which messes up also the
2155
	 * frame count when it's derived from the timestamps. So let's
2156
	 * wait for the pipe to start properly before we call
2157
	 * drm_crtc_vblank_on()
2158
	 */
2159
	if (dev->max_vblank_count == 0 &&
2160
	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2161
		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2327 Serge 2162
}
2163
 
2164
/**
2165
 * intel_disable_pipe - disable a pipe, asserting requirements
5354 serge 2166
 * @crtc: crtc whose pipes is to be disabled
2327 Serge 2167
 *
5354 serge 2168
 * Disable the pipe of @crtc, making sure that various hardware
2169
 * specific requirements are met, if applicable, e.g. plane
2170
 * disabled, panel fitter off, etc.
2327 Serge 2171
 *
2172
 * Will wait until the pipe has shut down before returning.
2173
 */
5354 serge 2174
static void intel_disable_pipe(struct intel_crtc *crtc)
2327 Serge 2175
{
5354 serge 2176
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
6084 serge 2177
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
5354 serge 2178
	enum pipe pipe = crtc->pipe;
6937 serge 2179
	i915_reg_t reg;
2327 Serge 2180
	u32 val;
2181
 
6084 serge 2182
	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2183
 
2184
	/*
2327 Serge 2185
	 * Make sure planes won't keep trying to pump pixels to us,
2186
	 * or we might hang the display.
2187
	 */
2188
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2189
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 2190
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 2191
 
3243 Serge 2192
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2193
	val = I915_READ(reg);
2194
	if ((val & PIPECONF_ENABLE) == 0)
2195
		return;
2196
 
5354 serge 2197
	/*
2198
	 * Double wide has implications for planes
2199
	 * so best keep it disabled when not needed.
2200
	 */
6084 serge 2201
	if (crtc->config->double_wide)
5354 serge 2202
		val &= ~PIPECONF_DOUBLE_WIDE;
2203
 
2204
	/* Don't disable pipe or pipe PLLs if needed */
2205
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2206
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2207
		val &= ~PIPECONF_ENABLE;
2208
 
2209
	I915_WRITE(reg, val);
2210
	if ((val & PIPECONF_ENABLE) == 0)
2211
		intel_wait_for_pipe_off(crtc);
2327 Serge 2212
}
2213
 
6084 serge 2214
static bool need_vtd_wa(struct drm_device *dev)
2327 Serge 2215
{
6084 serge 2216
#ifdef CONFIG_INTEL_IOMMU
2217
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2218
		return true;
2219
#endif
2220
	return false;
2327 Serge 2221
}
2222
 
7144 serge 2223
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2327 Serge 2224
{
7144 serge 2225
	return IS_GEN2(dev_priv) ? 2048 : 4096;
2226
}
2327 Serge 2227
 
7144 serge 2228
static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
2229
				     uint64_t fb_modifier, unsigned int cpp)
2230
{
2231
	switch (fb_modifier) {
6084 serge 2232
	case DRM_FORMAT_MOD_NONE:
7144 serge 2233
		return cpp;
6084 serge 2234
	case I915_FORMAT_MOD_X_TILED:
7144 serge 2235
		if (IS_GEN2(dev_priv))
2236
			return 128;
2237
		else
2238
			return 512;
6084 serge 2239
	case I915_FORMAT_MOD_Y_TILED:
7144 serge 2240
		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2241
			return 128;
2242
		else
2243
			return 512;
6084 serge 2244
	case I915_FORMAT_MOD_Yf_TILED:
7144 serge 2245
		switch (cpp) {
6084 serge 2246
		case 1:
7144 serge 2247
			return 64;
6084 serge 2248
		case 2:
2249
		case 4:
7144 serge 2250
			return 128;
6084 serge 2251
		case 8:
2252
		case 16:
7144 serge 2253
			return 256;
2254
		default:
2255
			MISSING_CASE(cpp);
2256
			return cpp;
6084 serge 2257
		}
2258
		break;
2259
	default:
7144 serge 2260
		MISSING_CASE(fb_modifier);
2261
		return cpp;
6084 serge 2262
	}
7144 serge 2263
}
2327 Serge 2264
 
7144 serge 2265
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2266
			       uint64_t fb_modifier, unsigned int cpp)
2267
{
2268
	if (fb_modifier == DRM_FORMAT_MOD_NONE)
2269
		return 1;
2270
	else
2271
		return intel_tile_size(dev_priv) /
2272
			intel_tile_width(dev_priv, fb_modifier, cpp);
6084 serge 2273
}
4560 Serge 2274
 
6084 serge 2275
unsigned int
2276
intel_fb_align_height(struct drm_device *dev, unsigned int height,
7144 serge 2277
		      uint32_t pixel_format, uint64_t fb_modifier)
6084 serge 2278
{
7144 serge 2279
	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2280
	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2281
 
2282
	return ALIGN(height, tile_height);
2327 Serge 2283
}
2284
 
6937 serge 2285
static void
6084 serge 2286
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2287
			const struct drm_plane_state *plane_state)
2327 Serge 2288
{
7144 serge 2289
	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2290
	struct intel_rotation_info *info = &view->params.rotated;
2291
	unsigned int tile_size, tile_width, tile_height, cpp;
2327 Serge 2292
 
6084 serge 2293
	*view = i915_ggtt_view_normal;
5354 serge 2294
 
6084 serge 2295
	if (!plane_state)
6937 serge 2296
		return;
4560 Serge 2297
 
6084 serge 2298
	if (!intel_rotation_90_or_270(plane_state->rotation))
6937 serge 2299
		return;
4560 Serge 2300
 
6084 serge 2301
	*view = i915_ggtt_view_rotated;
2327 Serge 2302
 
6084 serge 2303
	info->height = fb->height;
2304
	info->pixel_format = fb->pixel_format;
2305
	info->pitch = fb->pitches[0];
2306
	info->uv_offset = fb->offsets[1];
2307
	info->fb_modifier = fb->modifier[0];
2308
 
7144 serge 2309
	tile_size = intel_tile_size(dev_priv);
2310
 
2311
	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2312
	tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
2313
	tile_height = tile_size / tile_width;
2314
 
2315
	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
6084 serge 2316
	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
7144 serge 2317
	info->size = info->width_pages * info->height_pages * tile_size;
6084 serge 2318
 
2319
	if (info->pixel_format == DRM_FORMAT_NV12) {
7144 serge 2320
		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2321
		tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
2322
		tile_height = tile_size / tile_width;
2323
 
2324
		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
2325
		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
2326
		info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
6084 serge 2327
	}
3746 Serge 2328
}
2329
 
7144 serge 2330
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
5060 serge 2331
{
6084 serge 2332
	if (INTEL_INFO(dev_priv)->gen >= 9)
2333
		return 256 * 1024;
2334
	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
6937 serge 2335
		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6084 serge 2336
		return 128 * 1024;
2337
	else if (INTEL_INFO(dev_priv)->gen >= 4)
2338
		return 4 * 1024;
2339
	else
2340
		return 0;
5060 serge 2341
}
2342
 
7144 serge 2343
static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2344
					 uint64_t fb_modifier)
2345
{
2346
	switch (fb_modifier) {
2347
	case DRM_FORMAT_MOD_NONE:
2348
		return intel_linear_alignment(dev_priv);
2349
	case I915_FORMAT_MOD_X_TILED:
2350
		if (INTEL_INFO(dev_priv)->gen >= 9)
2351
			return 256 * 1024;
2352
		return 0;
2353
	case I915_FORMAT_MOD_Y_TILED:
2354
	case I915_FORMAT_MOD_Yf_TILED:
2355
		return 1 * 1024 * 1024;
2356
	default:
2357
		MISSING_CASE(fb_modifier);
2358
		return 0;
2359
	}
2360
}
2361
 
2335 Serge 2362
int
5354 serge 2363
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2364
			   struct drm_framebuffer *fb,
6937 serge 2365
			   const struct drm_plane_state *plane_state)
2335 Serge 2366
{
5354 serge 2367
	struct drm_device *dev = fb->dev;
2335 Serge 2368
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2369
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 2370
	struct i915_ggtt_view view;
2335 Serge 2371
	u32 alignment;
2372
	int ret;
2327 Serge 2373
 
5060 serge 2374
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2375
 
7144 serge 2376
	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2327 Serge 2377
 
6937 serge 2378
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
6084 serge 2379
 
3746 Serge 2380
	/* Note that the w/a also requires 64 PTE of padding following the
2381
	 * bo. We currently fill all unused PTE with the shadow page and so
2382
	 * we should always have valid PTE following the scanout preventing
2383
	 * the VT-d warning.
2384
	 */
2385
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2386
		alignment = 256 * 1024;
2387
 
5097 serge 2388
	/*
2389
	 * Global gtt pte registers are special registers which actually forward
2390
	 * writes to a chunk of system memory. Which means that there is no risk
2391
	 * that the register values disappear as soon as we call
2392
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2393
	 * pin/unpin/fence and not more.
2394
	 */
2395
	intel_runtime_pm_get(dev_priv);
2396
 
6937 serge 2397
	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2398
						   &view);
2335 Serge 2399
	if (ret)
6937 serge 2400
		goto err_pm;
2327 Serge 2401
 
2335 Serge 2402
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2403
	 * fence, whereas 965+ only requires a fence if using
2404
	 * framebuffer compression.  For simplicity, we always install
2405
	 * a fence as the cost is not that onerous.
2406
	 */
6084 serge 2407
	if (view.type == I915_GGTT_VIEW_NORMAL) {
2408
		ret = i915_gem_object_get_fence(obj);
2409
		if (ret == -EDEADLK) {
2410
			/*
2411
			 * -EDEADLK means there are no free fences
2412
			 * no pending flips.
2413
			 *
2414
			 * This is propagated to atomic, but it uses
2415
			 * -EDEADLK to force a locking recovery, so
2416
			 * change the returned error to -EBUSY.
2417
			 */
2418
			ret = -EBUSY;
2419
			goto err_unpin;
2420
		} else if (ret)
2421
			goto err_unpin;
2327 Serge 2422
 
6084 serge 2423
		i915_gem_object_pin_fence(obj);
2424
	}
3480 Serge 2425
 
5097 serge 2426
	intel_runtime_pm_put(dev_priv);
2335 Serge 2427
	return 0;
2327 Serge 2428
 
2335 Serge 2429
err_unpin:
6084 serge 2430
	i915_gem_object_unpin_from_display_plane(obj, &view);
6937 serge 2431
err_pm:
5097 serge 2432
	intel_runtime_pm_put(dev_priv);
2335 Serge 2433
	return ret;
2434
}
2327 Serge 2435
 
6084 serge 2436
static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2437
			       const struct drm_plane_state *plane_state)
3031 serge 2438
{
6084 serge 2439
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2440
	struct i915_ggtt_view view;
2441
 
5060 serge 2442
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2443
 
6937 serge 2444
	intel_fill_fb_ggtt_view(&view, fb, plane_state);
6084 serge 2445
 
2446
	if (view.type == I915_GGTT_VIEW_NORMAL)
2447
		i915_gem_object_unpin_fence(obj);
2448
 
2449
	i915_gem_object_unpin_from_display_plane(obj, &view);
3031 serge 2450
}
2451
 
2452
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2453
 * is assumed to be a power-of-two. */
7144 serge 2454
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
2455
			      int *x, int *y,
2456
			      uint64_t fb_modifier,
2457
			      unsigned int cpp,
2458
			      unsigned int pitch)
3031 serge 2459
{
7144 serge 2460
	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2461
		unsigned int tile_size, tile_width, tile_height;
3480 Serge 2462
		unsigned int tile_rows, tiles;
3031 serge 2463
 
7144 serge 2464
		tile_size = intel_tile_size(dev_priv);
2465
		tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
2466
		tile_height = tile_size / tile_width;
3031 serge 2467
 
7144 serge 2468
		tile_rows = *y / tile_height;
2469
		*y %= tile_height;
3480 Serge 2470
 
7144 serge 2471
		tiles = *x / (tile_width/cpp);
2472
		*x %= tile_width/cpp;
2473
 
2474
		return tile_rows * pitch * tile_height + tiles * tile_size;
3480 Serge 2475
	} else {
6084 serge 2476
		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
3480 Serge 2477
		unsigned int offset;
2478
 
2479
		offset = *y * pitch + *x * cpp;
6084 serge 2480
		*y = (offset & alignment) / pitch;
2481
		*x = ((offset & alignment) - *y * pitch) / cpp;
2482
		return offset & ~alignment;
3480 Serge 2483
	}
3031 serge 2484
}
2485
 
6084 serge 2486
static int i9xx_format_to_fourcc(int format)
2327 Serge 2487
{
5060 serge 2488
	switch (format) {
2489
	case DISPPLANE_8BPP:
2490
		return DRM_FORMAT_C8;
2491
	case DISPPLANE_BGRX555:
2492
		return DRM_FORMAT_XRGB1555;
2493
	case DISPPLANE_BGRX565:
2494
		return DRM_FORMAT_RGB565;
2495
	default:
2496
	case DISPPLANE_BGRX888:
2497
		return DRM_FORMAT_XRGB8888;
2498
	case DISPPLANE_RGBX888:
2499
		return DRM_FORMAT_XBGR8888;
2500
	case DISPPLANE_BGRX101010:
2501
		return DRM_FORMAT_XRGB2101010;
2502
	case DISPPLANE_RGBX101010:
2503
		return DRM_FORMAT_XBGR2101010;
2504
	}
2505
}
2506
 
6084 serge 2507
static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
5060 serge 2508
{
6084 serge 2509
	switch (format) {
2510
	case PLANE_CTL_FORMAT_RGB_565:
2511
		return DRM_FORMAT_RGB565;
2512
	default:
2513
	case PLANE_CTL_FORMAT_XRGB_8888:
2514
		if (rgb_order) {
2515
			if (alpha)
2516
				return DRM_FORMAT_ABGR8888;
2517
			else
2518
				return DRM_FORMAT_XBGR8888;
2519
		} else {
2520
			if (alpha)
2521
				return DRM_FORMAT_ARGB8888;
2522
			else
2523
				return DRM_FORMAT_XRGB8888;
2524
		}
2525
	case PLANE_CTL_FORMAT_XRGB_2101010:
2526
		if (rgb_order)
2527
			return DRM_FORMAT_XBGR2101010;
2528
		else
2529
			return DRM_FORMAT_XRGB2101010;
2530
	}
2531
}
2532
 
2533
static bool
2534
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2535
			      struct intel_initial_plane_config *plane_config)
2536
{
5060 serge 2537
	struct drm_device *dev = crtc->base.dev;
6084 serge 2538
	struct drm_i915_private *dev_priv = to_i915(dev);
5060 serge 2539
	struct drm_i915_gem_object *obj = NULL;
2540
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
6084 serge 2541
	struct drm_framebuffer *fb = &plane_config->fb->base;
2542
	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2543
	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2544
				    PAGE_SIZE);
5060 serge 2545
 
6084 serge 2546
	size_aligned -= base_aligned;
2547
 
5060 serge 2548
	if (plane_config->size == 0)
2549
		return false;
2550
 
6084 serge 2551
	/* If the FB is too big, just don't use it since fbdev is not very
2552
	 * important and we should probably use that space with FBC or other
2553
	 * features. */
2554
	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2555
		return false;
2556
 
7144 serge 2557
	mutex_lock(&dev->struct_mutex);
2558
 
6084 serge 2559
	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2560
							     base_aligned,
2561
							     base_aligned,
2562
							     size_aligned);
7144 serge 2563
	if (!obj) {
2564
		mutex_unlock(&dev->struct_mutex);
5060 serge 2565
		return false;
7144 serge 2566
	}
5060 serge 2567
 
6084 serge 2568
	obj->tiling_mode = plane_config->tiling;
2569
	if (obj->tiling_mode == I915_TILING_X)
2570
		obj->stride = fb->pitches[0];
5060 serge 2571
 
6084 serge 2572
	mode_cmd.pixel_format = fb->pixel_format;
2573
	mode_cmd.width = fb->width;
2574
	mode_cmd.height = fb->height;
2575
	mode_cmd.pitches[0] = fb->pitches[0];
2576
	mode_cmd.modifier[0] = fb->modifier[0];
2577
	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
5060 serge 2578
 
6084 serge 2579
	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
5060 serge 2580
				   &mode_cmd, obj)) {
2581
		DRM_DEBUG_KMS("intel fb init failed\n");
2582
		goto out_unref_obj;
2583
	}
7144 serge 2584
 
5060 serge 2585
	mutex_unlock(&dev->struct_mutex);
2586
 
6084 serge 2587
	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
5060 serge 2588
	return true;
2589
 
2590
out_unref_obj:
2591
	drm_gem_object_unreference(&obj->base);
2592
	mutex_unlock(&dev->struct_mutex);
2593
	return false;
2594
}
2595
 
6084 serge 2596
/* Update plane->state->fb to match plane->fb after driver-internal updates */
2597
static void
2598
update_state_fb(struct drm_plane *plane)
5060 serge 2599
{
6084 serge 2600
	if (plane->fb == plane->state->fb)
2601
		return;
2602
 
2603
	if (plane->state->fb)
2604
		drm_framebuffer_unreference(plane->state->fb);
2605
	plane->state->fb = plane->fb;
2606
	if (plane->state->fb)
2607
		drm_framebuffer_reference(plane->state->fb);
2608
}
2609
 
2610
static void
2611
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2612
			     struct intel_initial_plane_config *plane_config)
2613
{
5060 serge 2614
	struct drm_device *dev = intel_crtc->base.dev;
5354 serge 2615
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2616
	struct drm_crtc *c;
2617
	struct intel_crtc *i;
2618
	struct drm_i915_gem_object *obj;
6084 serge 2619
	struct drm_plane *primary = intel_crtc->base.primary;
2620
	struct drm_plane_state *plane_state = primary->state;
2621
	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2622
	struct intel_plane *intel_plane = to_intel_plane(primary);
7144 serge 2623
	struct intel_plane_state *intel_state =
2624
		to_intel_plane_state(plane_state);
6084 serge 2625
	struct drm_framebuffer *fb;
5060 serge 2626
 
6084 serge 2627
	if (!plane_config->fb)
5060 serge 2628
		return;
2629
 
6084 serge 2630
	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2631
		fb = &plane_config->fb->base;
2632
		goto valid_fb;
2633
	}
5060 serge 2634
 
6084 serge 2635
	kfree(plane_config->fb);
5060 serge 2636
 
2637
	/*
2638
	 * Failed to alloc the obj, check to see if we should share
2639
	 * an fb with another CRTC instead
2640
	 */
2641
	for_each_crtc(dev, c) {
2642
		i = to_intel_crtc(c);
2643
 
2644
		if (c == &intel_crtc->base)
2645
			continue;
2646
 
2647
		if (!i->active)
2648
			continue;
2649
 
6084 serge 2650
		fb = c->primary->fb;
2651
		if (!fb)
5060 serge 2652
			continue;
2653
 
6084 serge 2654
		obj = intel_fb_obj(fb);
5060 serge 2655
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
6084 serge 2656
			drm_framebuffer_reference(fb);
2657
			goto valid_fb;
5060 serge 2658
		}
2659
	}
6084 serge 2660
 
2661
	/*
2662
	 * We've failed to reconstruct the BIOS FB.  Current display state
2663
	 * indicates that the primary plane is visible, but has a NULL FB,
2664
	 * which will lead to problems later if we don't fix it up.  The
2665
	 * simplest solution is to just disable the primary plane now and
2666
	 * pretend the BIOS never had it enabled.
2667
	 */
2668
	to_intel_plane_state(plane_state)->visible = false;
2669
	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2670
	intel_pre_disable_primary(&intel_crtc->base);
2671
	intel_plane->disable_plane(primary, &intel_crtc->base);
2672
 
2673
	return;
2674
 
2675
valid_fb:
2676
	plane_state->src_x = 0;
2677
	plane_state->src_y = 0;
2678
	plane_state->src_w = fb->width << 16;
2679
	plane_state->src_h = fb->height << 16;
2680
 
2681
	plane_state->crtc_x = 0;
2682
	plane_state->crtc_y = 0;
2683
	plane_state->crtc_w = fb->width;
2684
	plane_state->crtc_h = fb->height;
2685
 
7144 serge 2686
	intel_state->src.x1 = plane_state->src_x;
2687
	intel_state->src.y1 = plane_state->src_y;
2688
	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2689
	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2690
	intel_state->dst.x1 = plane_state->crtc_x;
2691
	intel_state->dst.y1 = plane_state->crtc_y;
2692
	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2693
	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2694
 
6084 serge 2695
	obj = intel_fb_obj(fb);
2696
	if (obj->tiling_mode != I915_TILING_NONE)
2697
		dev_priv->preserve_bios_swizzle = true;
2698
 
2699
	drm_framebuffer_reference(fb);
2700
	primary->fb = primary->state->fb = fb;
2701
	primary->crtc = primary->state->crtc = &intel_crtc->base;
2702
	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2703
	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
5060 serge 2704
}
2705
 
7144 serge 2706
static void i9xx_update_primary_plane(struct drm_plane *primary,
2707
				      const struct intel_crtc_state *crtc_state,
2708
				      const struct intel_plane_state *plane_state)
5060 serge 2709
{
7144 serge 2710
	struct drm_device *dev = primary->dev;
6084 serge 2711
	struct drm_i915_private *dev_priv = dev->dev_private;
7144 serge 2712
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2713
	struct drm_framebuffer *fb = plane_state->base.fb;
2714
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 2715
	int plane = intel_crtc->plane;
7144 serge 2716
	u32 linear_offset;
6084 serge 2717
	u32 dspcntr;
6937 serge 2718
	i915_reg_t reg = DSPCNTR(plane);
7144 serge 2719
	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2720
	int x = plane_state->src.x1 >> 16;
2721
	int y = plane_state->src.y1 >> 16;
2327 Serge 2722
 
5354 serge 2723
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2724
 
2725
	dspcntr |= DISPLAY_PLANE_ENABLE;
2726
 
2727
	if (INTEL_INFO(dev)->gen < 4) {
2728
		if (intel_crtc->pipe == PIPE_B)
2729
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2730
 
2731
		/* pipesrc and dspsize control the size that is scaled from,
2732
		 * which should always be the user's requested size.
2733
		 */
2734
		I915_WRITE(DSPSIZE(plane),
7144 serge 2735
			   ((crtc_state->pipe_src_h - 1) << 16) |
2736
			   (crtc_state->pipe_src_w - 1));
5354 serge 2737
		I915_WRITE(DSPPOS(plane), 0);
2738
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2739
		I915_WRITE(PRIMSIZE(plane),
7144 serge 2740
			   ((crtc_state->pipe_src_h - 1) << 16) |
2741
			   (crtc_state->pipe_src_w - 1));
5354 serge 2742
		I915_WRITE(PRIMPOS(plane), 0);
2743
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2744
	}
2745
 
3243 Serge 2746
	switch (fb->pixel_format) {
2747
	case DRM_FORMAT_C8:
6084 serge 2748
		dspcntr |= DISPPLANE_8BPP;
2749
		break;
3243 Serge 2750
	case DRM_FORMAT_XRGB1555:
2751
		dspcntr |= DISPPLANE_BGRX555;
2752
		break;
2753
	case DRM_FORMAT_RGB565:
2754
		dspcntr |= DISPPLANE_BGRX565;
2755
		break;
2756
	case DRM_FORMAT_XRGB8888:
2757
		dspcntr |= DISPPLANE_BGRX888;
2758
		break;
2759
	case DRM_FORMAT_XBGR8888:
2760
		dspcntr |= DISPPLANE_RGBX888;
2761
		break;
2762
	case DRM_FORMAT_XRGB2101010:
2763
		dspcntr |= DISPPLANE_BGRX101010;
6084 serge 2764
		break;
3243 Serge 2765
	case DRM_FORMAT_XBGR2101010:
2766
		dspcntr |= DISPPLANE_RGBX101010;
6084 serge 2767
		break;
2768
	default:
3746 Serge 2769
		BUG();
6084 serge 2770
	}
3243 Serge 2771
 
5354 serge 2772
	if (INTEL_INFO(dev)->gen >= 4 &&
2773
	    obj->tiling_mode != I915_TILING_NONE)
6084 serge 2774
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2775
 
4104 Serge 2776
	if (IS_G4X(dev))
2777
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2778
 
7144 serge 2779
	linear_offset = y * fb->pitches[0] + x * cpp;
2327 Serge 2780
 
3031 serge 2781
	if (INTEL_INFO(dev)->gen >= 4) {
2782
		intel_crtc->dspaddr_offset =
7144 serge 2783
			intel_compute_tile_offset(dev_priv, &x, &y,
2784
						  fb->modifier[0], cpp,
2785
						  fb->pitches[0]);
3031 serge 2786
		linear_offset -= intel_crtc->dspaddr_offset;
2787
	} else {
2788
		intel_crtc->dspaddr_offset = linear_offset;
2789
	}
2790
 
7144 serge 2791
	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
5354 serge 2792
		dspcntr |= DISPPLANE_ROTATE_180;
2793
 
7144 serge 2794
		x += (crtc_state->pipe_src_w - 1);
2795
		y += (crtc_state->pipe_src_h - 1);
5354 serge 2796
 
2797
		/* Finding the last pixel of the last line of the display
2798
		data and adding to linear_offset*/
2799
		linear_offset +=
7144 serge 2800
			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2801
			(crtc_state->pipe_src_w - 1) * cpp;
5354 serge 2802
	}
2803
 
6084 serge 2804
	intel_crtc->adjusted_x = x;
2805
	intel_crtc->adjusted_y = y;
2806
 
5354 serge 2807
	I915_WRITE(reg, dspcntr);
2808
 
2342 Serge 2809
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
6084 serge 2810
	if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2811
		I915_WRITE(DSPSURF(plane),
6084 serge 2812
			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2813
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2814
		I915_WRITE(DSPLINOFF(plane), linear_offset);
6084 serge 2815
	} else
4104 Serge 2816
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
6084 serge 2817
	POSTING_READ(reg);
2327 Serge 2818
}
2819
 
7144 serge 2820
static void i9xx_disable_primary_plane(struct drm_plane *primary,
2821
				       struct drm_crtc *crtc)
2327 Serge 2822
{
6084 serge 2823
	struct drm_device *dev = crtc->dev;
2824
	struct drm_i915_private *dev_priv = dev->dev_private;
2825
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2826
	int plane = intel_crtc->plane;
2327 Serge 2827
 
7144 serge 2828
	I915_WRITE(DSPCNTR(plane), 0);
2829
	if (INTEL_INFO(dev_priv)->gen >= 4)
5354 serge 2830
		I915_WRITE(DSPSURF(plane), 0);
7144 serge 2831
	else
2832
		I915_WRITE(DSPADDR(plane), 0);
2833
	POSTING_READ(DSPCNTR(plane));
2834
}
5354 serge 2835
 
7144 serge 2836
static void ironlake_update_primary_plane(struct drm_plane *primary,
2837
					  const struct intel_crtc_state *crtc_state,
2838
					  const struct intel_plane_state *plane_state)
2839
{
2840
	struct drm_device *dev = primary->dev;
2841
	struct drm_i915_private *dev_priv = dev->dev_private;
2842
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2843
	struct drm_framebuffer *fb = plane_state->base.fb;
2844
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2845
	int plane = intel_crtc->plane;
2846
	u32 linear_offset;
2847
	u32 dspcntr;
2848
	i915_reg_t reg = DSPCNTR(plane);
2849
	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2850
	int x = plane_state->src.x1 >> 16;
2851
	int y = plane_state->src.y1 >> 16;
5354 serge 2852
 
2853
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2854
	dspcntr |= DISPLAY_PLANE_ENABLE;
2855
 
2856
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2857
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2858
 
3243 Serge 2859
	switch (fb->pixel_format) {
2860
	case DRM_FORMAT_C8:
6084 serge 2861
		dspcntr |= DISPPLANE_8BPP;
2862
		break;
3243 Serge 2863
	case DRM_FORMAT_RGB565:
2864
		dspcntr |= DISPPLANE_BGRX565;
6084 serge 2865
		break;
3243 Serge 2866
	case DRM_FORMAT_XRGB8888:
2867
		dspcntr |= DISPPLANE_BGRX888;
2868
		break;
2869
	case DRM_FORMAT_XBGR8888:
2870
		dspcntr |= DISPPLANE_RGBX888;
2871
		break;
2872
	case DRM_FORMAT_XRGB2101010:
2873
		dspcntr |= DISPPLANE_BGRX101010;
2874
		break;
2875
	case DRM_FORMAT_XBGR2101010:
2876
		dspcntr |= DISPPLANE_RGBX101010;
6084 serge 2877
		break;
2878
	default:
3746 Serge 2879
		BUG();
6084 serge 2880
	}
2327 Serge 2881
 
3480 Serge 2882
	if (obj->tiling_mode != I915_TILING_NONE)
2883
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2884
 
5354 serge 2885
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
6084 serge 2886
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2327 Serge 2887
 
7144 serge 2888
	linear_offset = y * fb->pitches[0] + x * cpp;
3031 serge 2889
	intel_crtc->dspaddr_offset =
7144 serge 2890
		intel_compute_tile_offset(dev_priv, &x, &y,
2891
					  fb->modifier[0], cpp,
2892
					  fb->pitches[0]);
3031 serge 2893
	linear_offset -= intel_crtc->dspaddr_offset;
7144 serge 2894
	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
5354 serge 2895
		dspcntr |= DISPPLANE_ROTATE_180;
2327 Serge 2896
 
5354 serge 2897
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
7144 serge 2898
			x += (crtc_state->pipe_src_w - 1);
2899
			y += (crtc_state->pipe_src_h - 1);
5354 serge 2900
 
2901
			/* Finding the last pixel of the last line of the display
2902
			data and adding to linear_offset*/
2903
			linear_offset +=
7144 serge 2904
				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2905
				(crtc_state->pipe_src_w - 1) * cpp;
5354 serge 2906
		}
2907
	}
2908
 
6084 serge 2909
	intel_crtc->adjusted_x = x;
2910
	intel_crtc->adjusted_y = y;
2911
 
5354 serge 2912
	I915_WRITE(reg, dspcntr);
2913
 
2342 Serge 2914
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2915
	I915_WRITE(DSPSURF(plane),
6084 serge 2916
		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2917
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2918
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2919
	} else {
6084 serge 2920
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2921
		I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2922
	}
2330 Serge 2923
	POSTING_READ(reg);
2327 Serge 2924
}
2925
 
7144 serge 2926
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2927
			      uint64_t fb_modifier, uint32_t pixel_format)
6084 serge 2928
{
7144 serge 2929
	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2930
		return 64;
2931
	} else {
2932
		int cpp = drm_format_plane_cpp(pixel_format, 0);
6084 serge 2933
 
7144 serge 2934
		return intel_tile_width(dev_priv, fb_modifier, cpp);
6084 serge 2935
	}
2936
}
2937
 
6660 serge 2938
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
7144 serge 2939
			   struct drm_i915_gem_object *obj,
2940
			   unsigned int plane)
6084 serge 2941
{
6937 serge 2942
	struct i915_ggtt_view view;
6084 serge 2943
	struct i915_vma *vma;
6660 serge 2944
	u64 offset;
6084 serge 2945
 
6937 serge 2946
	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2947
				intel_plane->base.state);
6084 serge 2948
 
6937 serge 2949
	vma = i915_gem_obj_to_ggtt_view(obj, &view);
6084 serge 2950
	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
6937 serge 2951
		view.type))
6084 serge 2952
		return -1;
2953
 
6660 serge 2954
	offset = vma->node.start;
6084 serge 2955
 
2956
	if (plane == 1) {
7144 serge 2957
		offset += vma->ggtt_view.params.rotated.uv_start_page *
6084 serge 2958
			  PAGE_SIZE;
2959
	}
2960
 
6660 serge 2961
	WARN_ON(upper_32_bits(offset));
2962
 
2963
	return lower_32_bits(offset);
6084 serge 2964
}
2965
 
2966
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2967
{
2968
	struct drm_device *dev = intel_crtc->base.dev;
2969
	struct drm_i915_private *dev_priv = dev->dev_private;
2970
 
2971
	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2972
	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2973
	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2974
}
2975
 
2976
/*
2977
 * This function detaches (aka. unbinds) unused scalers in hardware
2978
 */
2979
static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2980
{
2981
	struct intel_crtc_scaler_state *scaler_state;
2982
	int i;
2983
 
2984
	scaler_state = &intel_crtc->config->scaler_state;
2985
 
2986
	/* loop through and disable scalers that aren't in use */
2987
	for (i = 0; i < intel_crtc->num_scalers; i++) {
2988
		if (!scaler_state->scalers[i].in_use)
2989
			skl_detach_scaler(intel_crtc, i);
2990
	}
2991
}
2992
 
2993
u32 skl_plane_ctl_format(uint32_t pixel_format)
2994
{
2995
	switch (pixel_format) {
2996
	case DRM_FORMAT_C8:
2997
		return PLANE_CTL_FORMAT_INDEXED;
2998
	case DRM_FORMAT_RGB565:
2999
		return PLANE_CTL_FORMAT_RGB_565;
3000
	case DRM_FORMAT_XBGR8888:
3001
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3002
	case DRM_FORMAT_XRGB8888:
3003
		return PLANE_CTL_FORMAT_XRGB_8888;
3004
	/*
3005
	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3006
	 * to be already pre-multiplied. We need to add a knob (or a different
3007
	 * DRM_FORMAT) for user-space to configure that.
3008
	 */
3009
	case DRM_FORMAT_ABGR8888:
3010
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3011
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3012
	case DRM_FORMAT_ARGB8888:
3013
		return PLANE_CTL_FORMAT_XRGB_8888 |
3014
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3015
	case DRM_FORMAT_XRGB2101010:
3016
		return PLANE_CTL_FORMAT_XRGB_2101010;
3017
	case DRM_FORMAT_XBGR2101010:
3018
		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3019
	case DRM_FORMAT_YUYV:
3020
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3021
	case DRM_FORMAT_YVYU:
3022
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3023
	case DRM_FORMAT_UYVY:
3024
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3025
	case DRM_FORMAT_VYUY:
3026
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3027
	default:
3028
		MISSING_CASE(pixel_format);
3029
	}
3030
 
3031
	return 0;
3032
}
3033
 
3034
u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3035
{
3036
	switch (fb_modifier) {
3037
	case DRM_FORMAT_MOD_NONE:
3038
		break;
3039
	case I915_FORMAT_MOD_X_TILED:
3040
		return PLANE_CTL_TILED_X;
3041
	case I915_FORMAT_MOD_Y_TILED:
3042
		return PLANE_CTL_TILED_Y;
3043
	case I915_FORMAT_MOD_Yf_TILED:
3044
		return PLANE_CTL_TILED_YF;
3045
	default:
3046
		MISSING_CASE(fb_modifier);
3047
	}
3048
 
3049
	return 0;
3050
}
3051
 
3052
u32 skl_plane_ctl_rotation(unsigned int rotation)
3053
{
3054
	switch (rotation) {
3055
	case BIT(DRM_ROTATE_0):
3056
		break;
3057
	/*
3058
	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3059
	 * while i915 HW rotation is clockwise, thats why this swapping.
3060
	 */
3061
	case BIT(DRM_ROTATE_90):
3062
		return PLANE_CTL_ROTATE_270;
3063
	case BIT(DRM_ROTATE_180):
3064
		return PLANE_CTL_ROTATE_180;
3065
	case BIT(DRM_ROTATE_270):
3066
		return PLANE_CTL_ROTATE_90;
3067
	default:
3068
		MISSING_CASE(rotation);
3069
	}
3070
 
3071
	return 0;
3072
}
3073
 
7144 serge 3074
static void skylake_update_primary_plane(struct drm_plane *plane,
3075
					 const struct intel_crtc_state *crtc_state,
3076
					 const struct intel_plane_state *plane_state)
5354 serge 3077
{
7144 serge 3078
	struct drm_device *dev = plane->dev;
5354 serge 3079
	struct drm_i915_private *dev_priv = dev->dev_private;
7144 serge 3080
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3081
	struct drm_framebuffer *fb = plane_state->base.fb;
3082
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
5354 serge 3083
	int pipe = intel_crtc->pipe;
6084 serge 3084
	u32 plane_ctl, stride_div, stride;
3085
	u32 tile_height, plane_offset, plane_size;
7144 serge 3086
	unsigned int rotation = plane_state->base.rotation;
6084 serge 3087
	int x_offset, y_offset;
6660 serge 3088
	u32 surf_addr;
7144 serge 3089
	int scaler_id = plane_state->scaler_id;
3090
	int src_x = plane_state->src.x1 >> 16;
3091
	int src_y = plane_state->src.y1 >> 16;
3092
	int src_w = drm_rect_width(&plane_state->src) >> 16;
3093
	int src_h = drm_rect_height(&plane_state->src) >> 16;
3094
	int dst_x = plane_state->dst.x1;
3095
	int dst_y = plane_state->dst.y1;
3096
	int dst_w = drm_rect_width(&plane_state->dst);
3097
	int dst_h = drm_rect_height(&plane_state->dst);
5354 serge 3098
 
3099
	plane_ctl = PLANE_CTL_ENABLE |
3100
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3101
		    PLANE_CTL_PIPE_CSC_ENABLE;
3102
 
6084 serge 3103
	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3104
	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3105
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3106
	plane_ctl |= skl_plane_ctl_rotation(rotation);
5354 serge 3107
 
7144 serge 3108
	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
6084 serge 3109
					       fb->pixel_format);
3110
	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3111
 
3112
	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3113
 
7144 serge 3114
	if (intel_rotation_90_or_270(rotation)) {
3115
		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
6084 serge 3116
 
3117
		/* stride = Surface height in tiles */
7144 serge 3118
		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
6084 serge 3119
		stride = DIV_ROUND_UP(fb->height, tile_height);
7144 serge 3120
		x_offset = stride * tile_height - src_y - src_h;
3121
		y_offset = src_x;
6084 serge 3122
		plane_size = (src_w - 1) << 16 | (src_h - 1);
3123
	} else {
3124
		stride = fb->pitches[0] / stride_div;
7144 serge 3125
		x_offset = src_x;
3126
		y_offset = src_y;
6084 serge 3127
		plane_size = (src_h - 1) << 16 | (src_w - 1);
5354 serge 3128
	}
6084 serge 3129
	plane_offset = y_offset << 16 | x_offset;
5354 serge 3130
 
6084 serge 3131
	intel_crtc->adjusted_x = x_offset;
3132
	intel_crtc->adjusted_y = y_offset;
5354 serge 3133
 
3134
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
6084 serge 3135
	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3136
	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3137
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
5354 serge 3138
 
6084 serge 3139
	if (scaler_id >= 0) {
3140
		uint32_t ps_ctrl = 0;
5354 serge 3141
 
6084 serge 3142
		WARN_ON(!dst_w || !dst_h);
3143
		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3144
			crtc_state->scaler_state.scalers[scaler_id].mode;
3145
		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3146
		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3147
		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3148
		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3149
		I915_WRITE(PLANE_POS(pipe, 0), 0);
3150
	} else {
3151
		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3152
	}
5354 serge 3153
 
6084 serge 3154
	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3155
 
5354 serge 3156
	POSTING_READ(PLANE_SURF(pipe, 0));
3157
}
3158
 
7144 serge 3159
static void skylake_disable_primary_plane(struct drm_plane *primary,
3160
					  struct drm_crtc *crtc)
3161
{
3162
	struct drm_device *dev = crtc->dev;
3163
	struct drm_i915_private *dev_priv = dev->dev_private;
3164
	int pipe = to_intel_crtc(crtc)->pipe;
3165
 
3166
	I915_WRITE(PLANE_CTL(pipe, 0), 0);
3167
	I915_WRITE(PLANE_SURF(pipe, 0), 0);
3168
	POSTING_READ(PLANE_SURF(pipe, 0));
3169
}
3170
 
2327 Serge 3171
/* Assume fb object is pinned & idle & fenced and just update base pointers */
3172
static int
3173
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3174
			   int x, int y, enum mode_set_atomic state)
3175
{
7144 serge 3176
	/* Support for kgdboc is disabled, this needs a major rework. */
3177
	DRM_ERROR("legacy panic handler not supported any more.\n");
3031 serge 3178
 
7144 serge 3179
	return -ENODEV;
3031 serge 3180
}
3181
 
5354 serge 3182
static void intel_complete_page_flips(struct drm_device *dev)
4104 Serge 3183
{
3184
	struct drm_crtc *crtc;
3185
 
5060 serge 3186
	for_each_crtc(dev, crtc) {
4104 Serge 3187
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3188
		enum plane plane = intel_crtc->plane;
3189
 
3190
		intel_prepare_page_flip(dev, plane);
3191
		intel_finish_page_flip_plane(dev, plane);
3192
	}
5354 serge 3193
}
4104 Serge 3194
 
5354 serge 3195
static void intel_update_primary_planes(struct drm_device *dev)
3196
{
3197
	struct drm_crtc *crtc;
3198
 
5060 serge 3199
	for_each_crtc(dev, crtc) {
6084 serge 3200
		struct intel_plane *plane = to_intel_plane(crtc->primary);
3201
		struct intel_plane_state *plane_state;
4104 Serge 3202
 
6084 serge 3203
		drm_modeset_lock_crtc(crtc, &plane->base);
3204
		plane_state = to_intel_plane_state(plane->base.state);
3205
 
7144 serge 3206
		if (plane_state->visible)
3207
			plane->update_plane(&plane->base,
3208
					    to_intel_crtc_state(crtc->state),
3209
					    plane_state);
6084 serge 3210
 
3211
		drm_modeset_unlock_crtc(crtc);
4104 Serge 3212
	}
3213
}
3214
 
5354 serge 3215
void intel_prepare_reset(struct drm_device *dev)
3216
{
3217
	/* no reset support for gen2 */
3218
	if (IS_GEN2(dev))
3219
		return;
3220
 
3221
	/* reset doesn't touch the display */
3222
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3223
		return;
3224
 
3225
	drm_modeset_lock_all(dev);
3226
	/*
3227
	 * Disabling the crtcs gracefully seems nicer. Also the
3228
	 * g33 docs say we should at least disable all the planes.
3229
	 */
6084 serge 3230
	intel_display_suspend(dev);
5354 serge 3231
}
3232
 
3233
void intel_finish_reset(struct drm_device *dev)
3234
{
3235
	struct drm_i915_private *dev_priv = to_i915(dev);
3236
 
3237
	/*
3238
	 * Flips in the rings will be nuked by the reset,
3239
	 * so complete all pending flips so that user space
3240
	 * will get its events and not get stuck.
3241
	 */
3242
	intel_complete_page_flips(dev);
3243
 
3244
	/* no reset support for gen2 */
3245
	if (IS_GEN2(dev))
3246
		return;
3247
 
3248
	/* reset doesn't touch the display */
3249
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3250
		/*
3251
		 * Flips in the rings have been nuked by the reset,
3252
		 * so update the base address of all primary
3253
		 * planes to the the last fb to make sure we're
3254
		 * showing the correct fb after a reset.
6084 serge 3255
		 *
3256
		 * FIXME: Atomic will make this obsolete since we won't schedule
3257
		 * CS-based flips (which might get lost in gpu resets) any more.
5354 serge 3258
		 */
3259
		intel_update_primary_planes(dev);
3260
		return;
3261
	}
3262
 
3263
	/*
3264
	 * The display has been reset as well,
3265
	 * so need a full re-initialization.
3266
	 */
3267
	intel_runtime_pm_disable_interrupts(dev_priv);
3268
	intel_runtime_pm_enable_interrupts(dev_priv);
3269
 
3270
	intel_modeset_init_hw(dev);
3271
 
3272
	spin_lock_irq(&dev_priv->irq_lock);
3273
	if (dev_priv->display.hpd_irq_setup)
3274
		dev_priv->display.hpd_irq_setup(dev);
3275
	spin_unlock_irq(&dev_priv->irq_lock);
3276
 
6084 serge 3277
	intel_display_resume(dev);
5354 serge 3278
 
6296 serge 3279
	intel_hpd_init(dev_priv);
5354 serge 3280
 
3281
	drm_modeset_unlock_all(dev);
3282
}
3283
 
5060 serge 3284
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
4104 Serge 3285
{
3286
	struct drm_device *dev = crtc->dev;
5060 serge 3287
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3288
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 3289
	bool pending;
4104 Serge 3290
 
5060 serge 3291
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3292
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3293
		return false;
4104 Serge 3294
 
5354 serge 3295
	spin_lock_irq(&dev->event_lock);
5060 serge 3296
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
5354 serge 3297
	spin_unlock_irq(&dev->event_lock);
4104 Serge 3298
 
5060 serge 3299
	return pending;
4104 Serge 3300
}
2327 Serge 3301
 
6084 serge 3302
static void intel_update_pipe_config(struct intel_crtc *crtc,
3303
				     struct intel_crtc_state *old_crtc_state)
5354 serge 3304
{
3305
	struct drm_device *dev = crtc->base.dev;
3306
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3307
	struct intel_crtc_state *pipe_config =
3308
		to_intel_crtc_state(crtc->base.state);
5354 serge 3309
 
6084 serge 3310
	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3311
	crtc->base.mode = crtc->base.state->mode;
5354 serge 3312
 
6084 serge 3313
	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3314
		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3315
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3316
 
3317
	if (HAS_DDI(dev))
3318
		intel_set_pipe_csc(&crtc->base);
3319
 
5354 serge 3320
	/*
3321
	 * Update pipe size and adjust fitter if needed: the reason for this is
3322
	 * that in compute_mode_changes we check the native mode (not the pfit
3323
	 * mode) to see if we can flip rather than do a full mode set. In the
3324
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3325
	 * pfit state, we'll end up with a big fb scanned out into the wrong
3326
	 * sized surface.
3327
	 */
3328
 
3329
	I915_WRITE(PIPESRC(crtc->pipe),
6084 serge 3330
		   ((pipe_config->pipe_src_w - 1) << 16) |
3331
		   (pipe_config->pipe_src_h - 1));
5354 serge 3332
 
6084 serge 3333
	/* on skylake this is done by detaching scalers */
3334
	if (INTEL_INFO(dev)->gen >= 9) {
3335
		skl_detach_scalers(crtc);
2327 Serge 3336
 
6084 serge 3337
		if (pipe_config->pch_pfit.enabled)
3338
			skylake_pfit_enable(crtc);
3339
	} else if (HAS_PCH_SPLIT(dev)) {
3340
		if (pipe_config->pch_pfit.enabled)
3341
			ironlake_pfit_enable(crtc);
3342
		else if (old_crtc_state->pch_pfit.enabled)
3343
			ironlake_pfit_disable(crtc, true);
2327 Serge 3344
	}
3345
}
3346
 
3347
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3348
{
3349
	struct drm_device *dev = crtc->dev;
3350
	struct drm_i915_private *dev_priv = dev->dev_private;
3351
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3352
	int pipe = intel_crtc->pipe;
6937 serge 3353
	i915_reg_t reg;
3354
	u32 temp;
2327 Serge 3355
 
3356
	/* enable normal train */
3357
	reg = FDI_TX_CTL(pipe);
3358
	temp = I915_READ(reg);
3359
	if (IS_IVYBRIDGE(dev)) {
3360
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3361
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3362
	} else {
3363
		temp &= ~FDI_LINK_TRAIN_NONE;
3364
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3365
	}
3366
	I915_WRITE(reg, temp);
3367
 
3368
	reg = FDI_RX_CTL(pipe);
3369
	temp = I915_READ(reg);
3370
	if (HAS_PCH_CPT(dev)) {
3371
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3372
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3373
	} else {
3374
		temp &= ~FDI_LINK_TRAIN_NONE;
3375
		temp |= FDI_LINK_TRAIN_NONE;
3376
	}
3377
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3378
 
3379
	/* wait one idle pattern time */
3380
	POSTING_READ(reg);
3381
	udelay(1000);
3382
 
3383
	/* IVB wants error correction enabled */
3384
	if (IS_IVYBRIDGE(dev))
3385
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3386
			   FDI_FE_ERRC_ENABLE);
3387
}
3388
 
3389
/* The FDI link training functions for ILK/Ibexpeak. */
3390
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3391
{
6084 serge 3392
	struct drm_device *dev = crtc->dev;
3393
	struct drm_i915_private *dev_priv = dev->dev_private;
3394
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3395
	int pipe = intel_crtc->pipe;
6937 serge 3396
	i915_reg_t reg;
3397
	u32 temp, tries;
2327 Serge 3398
 
5060 serge 3399
	/* FDI needs bits from pipe first */
6084 serge 3400
	assert_pipe_enabled(dev_priv, pipe);
2327 Serge 3401
 
6084 serge 3402
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3403
	   for train result */
3404
	reg = FDI_RX_IMR(pipe);
3405
	temp = I915_READ(reg);
3406
	temp &= ~FDI_RX_SYMBOL_LOCK;
3407
	temp &= ~FDI_RX_BIT_LOCK;
3408
	I915_WRITE(reg, temp);
3409
	I915_READ(reg);
3410
	udelay(150);
2327 Serge 3411
 
6084 serge 3412
	/* enable CPU FDI TX and PCH FDI RX */
3413
	reg = FDI_TX_CTL(pipe);
3414
	temp = I915_READ(reg);
4104 Serge 3415
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
6084 serge 3416
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3417
	temp &= ~FDI_LINK_TRAIN_NONE;
3418
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3419
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3420
 
6084 serge 3421
	reg = FDI_RX_CTL(pipe);
3422
	temp = I915_READ(reg);
3423
	temp &= ~FDI_LINK_TRAIN_NONE;
3424
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3425
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3426
 
6084 serge 3427
	POSTING_READ(reg);
3428
	udelay(150);
2327 Serge 3429
 
6084 serge 3430
	/* Ironlake workaround, enable clock pointer after FDI enable*/
3431
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3432
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3433
		   FDI_RX_PHASE_SYNC_POINTER_EN);
2327 Serge 3434
 
6084 serge 3435
	reg = FDI_RX_IIR(pipe);
3436
	for (tries = 0; tries < 5; tries++) {
3437
		temp = I915_READ(reg);
3438
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3439
 
6084 serge 3440
		if ((temp & FDI_RX_BIT_LOCK)) {
3441
			DRM_DEBUG_KMS("FDI train 1 done.\n");
3442
			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3443
			break;
3444
		}
3445
	}
3446
	if (tries == 5)
3447
		DRM_ERROR("FDI train 1 fail!\n");
2327 Serge 3448
 
6084 serge 3449
	/* Train 2 */
3450
	reg = FDI_TX_CTL(pipe);
3451
	temp = I915_READ(reg);
3452
	temp &= ~FDI_LINK_TRAIN_NONE;
3453
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3454
	I915_WRITE(reg, temp);
2327 Serge 3455
 
6084 serge 3456
	reg = FDI_RX_CTL(pipe);
3457
	temp = I915_READ(reg);
3458
	temp &= ~FDI_LINK_TRAIN_NONE;
3459
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3460
	I915_WRITE(reg, temp);
2327 Serge 3461
 
6084 serge 3462
	POSTING_READ(reg);
3463
	udelay(150);
2327 Serge 3464
 
6084 serge 3465
	reg = FDI_RX_IIR(pipe);
3466
	for (tries = 0; tries < 5; tries++) {
3467
		temp = I915_READ(reg);
3468
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3469
 
6084 serge 3470
		if (temp & FDI_RX_SYMBOL_LOCK) {
3471
			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3472
			DRM_DEBUG_KMS("FDI train 2 done.\n");
3473
			break;
3474
		}
3475
	}
3476
	if (tries == 5)
3477
		DRM_ERROR("FDI train 2 fail!\n");
2327 Serge 3478
 
6084 serge 3479
	DRM_DEBUG_KMS("FDI train done\n");
2327 Serge 3480
 
3481
}
3482
 
2342 Serge 3483
static const int snb_b_fdi_train_param[] = {
6084 serge 3484
	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3485
	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3486
	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3487
	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2327 Serge 3488
};
3489
 
3490
/* The FDI link training functions for SNB/Cougarpoint. */
3491
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3492
{
6084 serge 3493
	struct drm_device *dev = crtc->dev;
3494
	struct drm_i915_private *dev_priv = dev->dev_private;
3495
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3496
	int pipe = intel_crtc->pipe;
6937 serge 3497
	i915_reg_t reg;
3498
	u32 temp, i, retry;
2327 Serge 3499
 
6084 serge 3500
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3501
	   for train result */
3502
	reg = FDI_RX_IMR(pipe);
3503
	temp = I915_READ(reg);
3504
	temp &= ~FDI_RX_SYMBOL_LOCK;
3505
	temp &= ~FDI_RX_BIT_LOCK;
3506
	I915_WRITE(reg, temp);
2327 Serge 3507
 
6084 serge 3508
	POSTING_READ(reg);
3509
	udelay(150);
2327 Serge 3510
 
6084 serge 3511
	/* enable CPU FDI TX and PCH FDI RX */
3512
	reg = FDI_TX_CTL(pipe);
3513
	temp = I915_READ(reg);
4104 Serge 3514
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
6084 serge 3515
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3516
	temp &= ~FDI_LINK_TRAIN_NONE;
3517
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3518
	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3519
	/* SNB-B */
3520
	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3521
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3522
 
3243 Serge 3523
	I915_WRITE(FDI_RX_MISC(pipe),
3524
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3525
 
6084 serge 3526
	reg = FDI_RX_CTL(pipe);
3527
	temp = I915_READ(reg);
3528
	if (HAS_PCH_CPT(dev)) {
3529
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3530
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3531
	} else {
3532
		temp &= ~FDI_LINK_TRAIN_NONE;
3533
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3534
	}
3535
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3536
 
6084 serge 3537
	POSTING_READ(reg);
3538
	udelay(150);
2327 Serge 3539
 
2342 Serge 3540
	for (i = 0; i < 4; i++) {
6084 serge 3541
		reg = FDI_TX_CTL(pipe);
3542
		temp = I915_READ(reg);
3543
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3544
		temp |= snb_b_fdi_train_param[i];
3545
		I915_WRITE(reg, temp);
2327 Serge 3546
 
6084 serge 3547
		POSTING_READ(reg);
3548
		udelay(500);
2327 Serge 3549
 
3031 serge 3550
		for (retry = 0; retry < 5; retry++) {
6084 serge 3551
			reg = FDI_RX_IIR(pipe);
3552
			temp = I915_READ(reg);
3553
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3554
			if (temp & FDI_RX_BIT_LOCK) {
3555
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3556
				DRM_DEBUG_KMS("FDI train 1 done.\n");
3557
				break;
3558
			}
3031 serge 3559
			udelay(50);
3560
		}
3561
		if (retry < 5)
3562
			break;
6084 serge 3563
	}
3564
	if (i == 4)
3565
		DRM_ERROR("FDI train 1 fail!\n");
2327 Serge 3566
 
6084 serge 3567
	/* Train 2 */
3568
	reg = FDI_TX_CTL(pipe);
3569
	temp = I915_READ(reg);
3570
	temp &= ~FDI_LINK_TRAIN_NONE;
3571
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3572
	if (IS_GEN6(dev)) {
3573
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3574
		/* SNB-B */
3575
		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3576
	}
3577
	I915_WRITE(reg, temp);
2327 Serge 3578
 
6084 serge 3579
	reg = FDI_RX_CTL(pipe);
3580
	temp = I915_READ(reg);
3581
	if (HAS_PCH_CPT(dev)) {
3582
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3583
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3584
	} else {
3585
		temp &= ~FDI_LINK_TRAIN_NONE;
3586
		temp |= FDI_LINK_TRAIN_PATTERN_2;
3587
	}
3588
	I915_WRITE(reg, temp);
2327 Serge 3589
 
6084 serge 3590
	POSTING_READ(reg);
3591
	udelay(150);
2327 Serge 3592
 
2342 Serge 3593
	for (i = 0; i < 4; i++) {
6084 serge 3594
		reg = FDI_TX_CTL(pipe);
3595
		temp = I915_READ(reg);
3596
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3597
		temp |= snb_b_fdi_train_param[i];
3598
		I915_WRITE(reg, temp);
2327 Serge 3599
 
6084 serge 3600
		POSTING_READ(reg);
3601
		udelay(500);
2327 Serge 3602
 
3031 serge 3603
		for (retry = 0; retry < 5; retry++) {
6084 serge 3604
			reg = FDI_RX_IIR(pipe);
3605
			temp = I915_READ(reg);
3606
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3607
			if (temp & FDI_RX_SYMBOL_LOCK) {
3608
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3609
				DRM_DEBUG_KMS("FDI train 2 done.\n");
3610
				break;
3611
			}
3031 serge 3612
			udelay(50);
3613
		}
3614
		if (retry < 5)
3615
			break;
6084 serge 3616
	}
3617
	if (i == 4)
3618
		DRM_ERROR("FDI train 2 fail!\n");
2327 Serge 3619
 
6084 serge 3620
	DRM_DEBUG_KMS("FDI train done.\n");
2327 Serge 3621
}
3622
 
3623
/* Manual link training for Ivy Bridge A0 parts */
3624
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3625
{
6084 serge 3626
	struct drm_device *dev = crtc->dev;
3627
	struct drm_i915_private *dev_priv = dev->dev_private;
3628
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3629
	int pipe = intel_crtc->pipe;
6937 serge 3630
	i915_reg_t reg;
3631
	u32 temp, i, j;
2327 Serge 3632
 
6084 serge 3633
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3634
	   for train result */
3635
	reg = FDI_RX_IMR(pipe);
3636
	temp = I915_READ(reg);
3637
	temp &= ~FDI_RX_SYMBOL_LOCK;
3638
	temp &= ~FDI_RX_BIT_LOCK;
3639
	I915_WRITE(reg, temp);
2327 Serge 3640
 
6084 serge 3641
	POSTING_READ(reg);
3642
	udelay(150);
2327 Serge 3643
 
3243 Serge 3644
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3645
		      I915_READ(FDI_RX_IIR(pipe)));
3646
 
4104 Serge 3647
	/* Try each vswing and preemphasis setting twice before moving on */
3648
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3649
		/* disable first in case we need to retry */
3650
		reg = FDI_TX_CTL(pipe);
3651
		temp = I915_READ(reg);
3652
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3653
		temp &= ~FDI_TX_ENABLE;
3654
		I915_WRITE(reg, temp);
3655
 
3656
		reg = FDI_RX_CTL(pipe);
3657
		temp = I915_READ(reg);
3658
		temp &= ~FDI_LINK_TRAIN_AUTO;
3659
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3660
		temp &= ~FDI_RX_ENABLE;
3661
		I915_WRITE(reg, temp);
3662
 
6084 serge 3663
		/* enable CPU FDI TX and PCH FDI RX */
3664
		reg = FDI_TX_CTL(pipe);
3665
		temp = I915_READ(reg);
3666
		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3667
		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3668
		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3669
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 3670
		temp |= snb_b_fdi_train_param[j/2];
6084 serge 3671
		temp |= FDI_COMPOSITE_SYNC;
3672
		I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3673
 
6084 serge 3674
		I915_WRITE(FDI_RX_MISC(pipe),
3675
			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3243 Serge 3676
 
6084 serge 3677
		reg = FDI_RX_CTL(pipe);
3678
		temp = I915_READ(reg);
3679
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3680
		temp |= FDI_COMPOSITE_SYNC;
3681
		I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3682
 
6084 serge 3683
		POSTING_READ(reg);
4104 Serge 3684
		udelay(1); /* should be 0.5us */
2327 Serge 3685
 
6084 serge 3686
		for (i = 0; i < 4; i++) {
3687
			reg = FDI_RX_IIR(pipe);
3688
			temp = I915_READ(reg);
3689
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3690
 
6084 serge 3691
			if (temp & FDI_RX_BIT_LOCK ||
3692
			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3693
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 3694
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3695
					      i);
6084 serge 3696
				break;
3697
			}
4104 Serge 3698
			udelay(1); /* should be 0.5us */
3699
		}
3700
		if (i == 4) {
3701
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3702
			continue;
6084 serge 3703
		}
2327 Serge 3704
 
6084 serge 3705
		/* Train 2 */
3706
		reg = FDI_TX_CTL(pipe);
3707
		temp = I915_READ(reg);
3708
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3709
		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3710
		I915_WRITE(reg, temp);
2327 Serge 3711
 
6084 serge 3712
		reg = FDI_RX_CTL(pipe);
3713
		temp = I915_READ(reg);
3714
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3715
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3716
		I915_WRITE(reg, temp);
2327 Serge 3717
 
6084 serge 3718
		POSTING_READ(reg);
4104 Serge 3719
		udelay(2); /* should be 1.5us */
2327 Serge 3720
 
6084 serge 3721
		for (i = 0; i < 4; i++) {
3722
			reg = FDI_RX_IIR(pipe);
3723
			temp = I915_READ(reg);
3724
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3725
 
4104 Serge 3726
			if (temp & FDI_RX_SYMBOL_LOCK ||
3727
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
6084 serge 3728
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 3729
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3730
					      i);
3731
				goto train_done;
6084 serge 3732
			}
4104 Serge 3733
			udelay(2); /* should be 1.5us */
6084 serge 3734
		}
3735
		if (i == 4)
4104 Serge 3736
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3737
	}
2327 Serge 3738
 
4104 Serge 3739
train_done:
6084 serge 3740
	DRM_DEBUG_KMS("FDI train done.\n");
2327 Serge 3741
}
3742
 
3031 serge 3743
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 3744
{
3031 serge 3745
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 3746
	struct drm_i915_private *dev_priv = dev->dev_private;
3747
	int pipe = intel_crtc->pipe;
6937 serge 3748
	i915_reg_t reg;
3749
	u32 temp;
2327 Serge 3750
 
3751
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3752
	reg = FDI_RX_CTL(pipe);
3753
	temp = I915_READ(reg);
4104 Serge 3754
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6084 serge 3755
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3480 Serge 3756
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3757
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3758
 
3759
	POSTING_READ(reg);
3760
	udelay(200);
3761
 
3762
	/* Switch from Rawclk to PCDclk */
3763
	temp = I915_READ(reg);
3764
	I915_WRITE(reg, temp | FDI_PCDCLK);
3765
 
3766
	POSTING_READ(reg);
3767
	udelay(200);
3768
 
3769
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3770
	reg = FDI_TX_CTL(pipe);
3771
	temp = I915_READ(reg);
3772
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3773
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3774
 
3775
		POSTING_READ(reg);
3776
		udelay(100);
3777
	}
3778
}
3779
 
3031 serge 3780
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3781
{
3782
	struct drm_device *dev = intel_crtc->base.dev;
3783
	struct drm_i915_private *dev_priv = dev->dev_private;
3784
	int pipe = intel_crtc->pipe;
6937 serge 3785
	i915_reg_t reg;
3786
	u32 temp;
3031 serge 3787
 
3788
	/* Switch from PCDclk to Rawclk */
3789
	reg = FDI_RX_CTL(pipe);
3790
	temp = I915_READ(reg);
3791
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3792
 
3793
	/* Disable CPU FDI TX PLL */
3794
	reg = FDI_TX_CTL(pipe);
3795
	temp = I915_READ(reg);
3796
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3797
 
3798
	POSTING_READ(reg);
3799
	udelay(100);
3800
 
3801
	reg = FDI_RX_CTL(pipe);
3802
	temp = I915_READ(reg);
3803
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3804
 
3805
	/* Wait for the clocks to turn off. */
3806
	POSTING_READ(reg);
3807
	udelay(100);
3808
}
3809
 
2327 Serge 3810
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3811
{
3812
	struct drm_device *dev = crtc->dev;
3813
	struct drm_i915_private *dev_priv = dev->dev_private;
3814
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3815
	int pipe = intel_crtc->pipe;
6937 serge 3816
	i915_reg_t reg;
3817
	u32 temp;
2327 Serge 3818
 
3819
	/* disable CPU FDI tx and PCH FDI rx */
3820
	reg = FDI_TX_CTL(pipe);
3821
	temp = I915_READ(reg);
3822
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3823
	POSTING_READ(reg);
3824
 
3825
	reg = FDI_RX_CTL(pipe);
3826
	temp = I915_READ(reg);
3827
	temp &= ~(0x7 << 16);
3480 Serge 3828
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3829
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3830
 
3831
	POSTING_READ(reg);
3832
	udelay(100);
3833
 
3834
	/* Ironlake workaround, disable clock pointer after downing FDI */
5060 serge 3835
	if (HAS_PCH_IBX(dev))
2327 Serge 3836
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3837
 
3838
	/* still set train pattern 1 */
3839
	reg = FDI_TX_CTL(pipe);
3840
	temp = I915_READ(reg);
3841
	temp &= ~FDI_LINK_TRAIN_NONE;
3842
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3843
	I915_WRITE(reg, temp);
3844
 
3845
	reg = FDI_RX_CTL(pipe);
3846
	temp = I915_READ(reg);
3847
	if (HAS_PCH_CPT(dev)) {
3848
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3849
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3850
	} else {
3851
		temp &= ~FDI_LINK_TRAIN_NONE;
3852
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3853
	}
3854
	/* BPC in FDI rx is consistent with that in PIPECONF */
3855
	temp &= ~(0x07 << 16);
3480 Serge 3856
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3857
	I915_WRITE(reg, temp);
3858
 
3859
	POSTING_READ(reg);
3860
	udelay(100);
3861
}
3862
 
5060 serge 3863
bool intel_has_pending_fb_unpin(struct drm_device *dev)
2327 Serge 3864
{
5060 serge 3865
	struct intel_crtc *crtc;
2327 Serge 3866
 
5060 serge 3867
	/* Note that we don't need to be called with mode_config.lock here
3868
	 * as our list of CRTC objects is static for the lifetime of the
3869
	 * device and so cannot disappear as we iterate. Similarly, we can
3870
	 * happily treat the predicates as racy, atomic checks as userspace
3871
	 * cannot claim and pin a new fb without at least acquring the
3872
	 * struct_mutex and so serialising with us.
3873
	 */
3874
	for_each_intel_crtc(dev, crtc) {
3875
		if (atomic_read(&crtc->unpin_work_count) == 0)
3876
			continue;
2327 Serge 3877
 
5060 serge 3878
		if (crtc->unpin_work)
3879
			intel_wait_for_vblank(dev, crtc->pipe);
3031 serge 3880
 
5060 serge 3881
		return true;
3882
	}
3883
 
3884
	return false;
2327 Serge 3885
}
3886
 
6283 serge 3887
static void page_flip_completed(struct intel_crtc *intel_crtc)
3888
{
3889
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3890
	struct intel_unpin_work *work = intel_crtc->unpin_work;
3891
 
3892
	/* ensure that the unpin work is consistent wrt ->pending. */
3893
	smp_rmb();
3894
	intel_crtc->unpin_work = NULL;
3895
 
3896
	if (work->event)
3897
		drm_send_vblank_event(intel_crtc->base.dev,
3898
				      intel_crtc->pipe,
3899
				      work->event);
3900
 
3901
	drm_crtc_vblank_put(&intel_crtc->base);
3902
 
6320 serge 3903
	wake_up_all(&dev_priv->pending_flip_queue);
6937 serge 3904
	queue_work(dev_priv->wq, &work->work);
3905
 
6320 serge 3906
	trace_i915_flip_complete(intel_crtc->plane,
3907
				 work->pending_flip_obj);
6283 serge 3908
}
6320 serge 3909
 
6937 serge 3910
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2327 Serge 3911
{
3031 serge 3912
	struct drm_device *dev = crtc->dev;
3913
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 3914
	long ret;
2327 Serge 3915
 
3480 Serge 3916
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
6937 serge 3917
 
3918
	ret = wait_event_interruptible_timeout(
3919
					dev_priv->pending_flip_queue,
3920
					!intel_crtc_has_pending_flip(crtc),
3921
					60*HZ);
3922
 
3923
	if (ret < 0)
3924
		return ret;
3925
 
3926
	if (ret == 0) {
5354 serge 3927
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3480 Serge 3928
 
5354 serge 3929
		spin_lock_irq(&dev->event_lock);
3930
		if (intel_crtc->unpin_work) {
3931
			WARN_ONCE(1, "Removing stuck page flip\n");
3932
			page_flip_completed(intel_crtc);
3933
		}
3934
		spin_unlock_irq(&dev->event_lock);
3935
	}
3031 serge 3936
 
6937 serge 3937
	return 0;
7144 serge 3938
}
6937 serge 3939
 
3940
static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3941
{
3942
	u32 temp;
3943
 
3944
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3945
 
3946
	mutex_lock(&dev_priv->sb_lock);
3947
 
3948
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3949
	temp |= SBI_SSCCTL_DISABLE;
3950
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3951
 
3952
	mutex_unlock(&dev_priv->sb_lock);
2327 Serge 3953
}
3954
 
3031 serge 3955
/* Program iCLKIP clock to the desired frequency */
3956
static void lpt_program_iclkip(struct drm_crtc *crtc)
3957
{
3958
	struct drm_device *dev = crtc->dev;
3959
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3960
	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3031 serge 3961
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3962
	u32 temp;
3963
 
6937 serge 3964
	lpt_disable_iclkip(dev_priv);
3480 Serge 3965
 
3031 serge 3966
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 3967
	if (clock == 20000) {
3031 serge 3968
		auxdiv = 1;
3969
		divsel = 0x41;
3970
		phaseinc = 0x20;
3971
	} else {
3972
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 3973
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3974
		 * divisors, it is necessary to divide one by another, so we
3031 serge 3975
		 * convert the virtual clock precision to KHz here for higher
3976
		 * precision.
3977
		 */
3978
		u32 iclk_virtual_root_freq = 172800 * 1000;
3979
		u32 iclk_pi_range = 64;
3980
		u32 desired_divisor, msb_divisor_value, pi_value;
3981
 
6937 serge 3982
		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
3031 serge 3983
		msb_divisor_value = desired_divisor / iclk_pi_range;
3984
		pi_value = desired_divisor % iclk_pi_range;
3985
 
3986
		auxdiv = 0;
3987
		divsel = msb_divisor_value - 2;
3988
		phaseinc = pi_value;
3989
	}
3990
 
3991
	/* This should not happen with any sane values */
3992
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3993
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3994
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3995
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3996
 
3997
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 3998
			clock,
3031 serge 3999
			auxdiv,
4000
			divsel,
4001
			phasedir,
4002
			phaseinc);
4003
 
6937 serge 4004
	mutex_lock(&dev_priv->sb_lock);
4005
 
3031 serge 4006
	/* Program SSCDIVINTPHASE6 */
3243 Serge 4007
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 4008
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4009
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4010
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4011
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4012
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4013
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 4014
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 4015
 
4016
	/* Program SSCAUXDIV */
3243 Serge 4017
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 4018
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4019
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 4020
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 4021
 
4022
	/* Enable modulator and associated divider */
3243 Serge 4023
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 4024
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 4025
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 4026
 
6937 serge 4027
	mutex_unlock(&dev_priv->sb_lock);
4028
 
3031 serge 4029
	/* Wait for initialization time */
4030
	udelay(24);
4031
 
4032
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4033
}
4034
 
4104 Serge 4035
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4036
						enum pipe pch_transcoder)
4037
{
4038
	struct drm_device *dev = crtc->base.dev;
4039
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 4040
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4104 Serge 4041
 
4042
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4043
		   I915_READ(HTOTAL(cpu_transcoder)));
4044
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4045
		   I915_READ(HBLANK(cpu_transcoder)));
4046
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4047
		   I915_READ(HSYNC(cpu_transcoder)));
4048
 
4049
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4050
		   I915_READ(VTOTAL(cpu_transcoder)));
4051
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4052
		   I915_READ(VBLANK(cpu_transcoder)));
4053
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4054
		   I915_READ(VSYNC(cpu_transcoder)));
4055
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4056
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4057
}
4058
 
6084 serge 4059
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4280 Serge 4060
{
4061
	struct drm_i915_private *dev_priv = dev->dev_private;
4062
	uint32_t temp;
4063
 
4064
	temp = I915_READ(SOUTH_CHICKEN1);
6084 serge 4065
	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4280 Serge 4066
		return;
4067
 
4068
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4069
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4070
 
6084 serge 4071
	temp &= ~FDI_BC_BIFURCATION_SELECT;
4072
	if (enable)
4073
		temp |= FDI_BC_BIFURCATION_SELECT;
4074
 
4075
	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4280 Serge 4076
	I915_WRITE(SOUTH_CHICKEN1, temp);
4077
	POSTING_READ(SOUTH_CHICKEN1);
4078
}
4079
 
4080
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4081
{
4082
	struct drm_device *dev = intel_crtc->base.dev;
4083
 
4084
	switch (intel_crtc->pipe) {
4085
	case PIPE_A:
4086
		break;
4087
	case PIPE_B:
6084 serge 4088
		if (intel_crtc->config->fdi_lanes > 2)
4089
			cpt_set_fdi_bc_bifurcation(dev, false);
4280 Serge 4090
		else
6084 serge 4091
			cpt_set_fdi_bc_bifurcation(dev, true);
4280 Serge 4092
 
4093
		break;
4094
	case PIPE_C:
6084 serge 4095
		cpt_set_fdi_bc_bifurcation(dev, true);
4280 Serge 4096
 
4097
		break;
4098
	default:
4099
		BUG();
4100
	}
4101
}
4102
 
6937 serge 4103
/* Return which DP Port should be selected for Transcoder DP control */
4104
static enum port
4105
intel_trans_dp_port_sel(struct drm_crtc *crtc)
4106
{
4107
	struct drm_device *dev = crtc->dev;
4108
	struct intel_encoder *encoder;
4109
 
4110
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4111
		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4112
		    encoder->type == INTEL_OUTPUT_EDP)
4113
			return enc_to_dig_port(&encoder->base)->port;
4114
	}
4115
 
4116
	return -1;
4117
}
4118
 
2327 Serge 4119
/*
4120
 * Enable PCH resources required for PCH ports:
4121
 *   - PCH PLLs
4122
 *   - FDI training & RX/TX
4123
 *   - update transcoder timings
4124
 *   - DP transcoding bits
4125
 *   - transcoder
4126
 */
4127
static void ironlake_pch_enable(struct drm_crtc *crtc)
4128
{
4129
	struct drm_device *dev = crtc->dev;
4130
	struct drm_i915_private *dev_priv = dev->dev_private;
4131
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4132
	int pipe = intel_crtc->pipe;
6937 serge 4133
	u32 temp;
2327 Serge 4134
 
4104 Serge 4135
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 4136
 
4280 Serge 4137
	if (IS_IVYBRIDGE(dev))
4138
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4139
 
3243 Serge 4140
	/* Write the TU size bits before fdi link training, so that error
4141
	 * detection works. */
4142
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4143
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4144
 
6937 serge 4145
	/*
4146
	 * Sometimes spurious CPU pipe underruns happen during FDI
4147
	 * training, at least with VGA+HDMI cloning. Suppress them.
4148
	 */
4149
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4150
 
2327 Serge 4151
	/* For PCH output, training FDI link */
4152
	dev_priv->display.fdi_link_train(crtc);
4153
 
4104 Serge 4154
	/* We need to program the right clock selection before writing the pixel
4155
	 * mutliplier into the DPLL. */
3243 Serge 4156
	if (HAS_PCH_CPT(dev)) {
3031 serge 4157
		u32 sel;
2342 Serge 4158
 
2327 Serge 4159
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 4160
		temp |= TRANS_DPLL_ENABLE(pipe);
4161
		sel = TRANS_DPLLB_SEL(pipe);
6084 serge 4162
		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 4163
			temp |= sel;
4164
		else
4165
			temp &= ~sel;
2327 Serge 4166
		I915_WRITE(PCH_DPLL_SEL, temp);
4167
	}
4168
 
4104 Serge 4169
	/* XXX: pch pll's can be enabled any time before we enable the PCH
4170
	 * transcoder, and we actually should do this to not upset any PCH
4171
	 * transcoder that already use the clock when we share it.
4172
	 *
4173
	 * Note that enable_shared_dpll tries to do the right thing, but
4174
	 * get_shared_dpll unconditionally resets the pll - we need that to have
4175
	 * the right LVDS enable sequence. */
5060 serge 4176
	intel_enable_shared_dpll(intel_crtc);
4104 Serge 4177
 
2327 Serge 4178
	/* set transcoder timing, panel must allow it */
4179
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 4180
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 4181
 
4182
	intel_fdi_normal_train(crtc);
4183
 
6937 serge 4184
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4185
 
2327 Serge 4186
	/* For PCH DP, enable TRANS_DP_CTL */
6084 serge 4187
	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
6937 serge 4188
		const struct drm_display_mode *adjusted_mode =
4189
			&intel_crtc->config->base.adjusted_mode;
3480 Serge 4190
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
6937 serge 4191
		i915_reg_t reg = TRANS_DP_CTL(pipe);
2327 Serge 4192
		temp = I915_READ(reg);
4193
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4194
			  TRANS_DP_SYNC_MASK |
4195
			  TRANS_DP_BPC_MASK);
6084 serge 4196
		temp |= TRANS_DP_OUTPUT_ENABLE;
2327 Serge 4197
		temp |= bpc << 9; /* same format but at 11:9 */
4198
 
6937 serge 4199
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2327 Serge 4200
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
6937 serge 4201
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2327 Serge 4202
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4203
 
4204
		switch (intel_trans_dp_port_sel(crtc)) {
6937 serge 4205
		case PORT_B:
2327 Serge 4206
			temp |= TRANS_DP_PORT_SEL_B;
4207
			break;
6937 serge 4208
		case PORT_C:
2327 Serge 4209
			temp |= TRANS_DP_PORT_SEL_C;
4210
			break;
6937 serge 4211
		case PORT_D:
2327 Serge 4212
			temp |= TRANS_DP_PORT_SEL_D;
4213
			break;
4214
		default:
3243 Serge 4215
			BUG();
2327 Serge 4216
		}
4217
 
4218
		I915_WRITE(reg, temp);
4219
	}
4220
 
3243 Serge 4221
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 4222
}
4223
 
3243 Serge 4224
static void lpt_pch_enable(struct drm_crtc *crtc)
4225
{
4226
	struct drm_device *dev = crtc->dev;
4227
	struct drm_i915_private *dev_priv = dev->dev_private;
4228
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 4229
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 4230
 
4104 Serge 4231
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 4232
 
4233
	lpt_program_iclkip(crtc);
4234
 
4235
	/* Set transcoder timing. */
4104 Serge 4236
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 4237
 
4238
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4239
}
4240
 
6084 serge 4241
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4242
						struct intel_crtc_state *crtc_state)
3031 serge 4243
{
4104 Serge 4244
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
5354 serge 4245
	struct intel_shared_dpll *pll;
6084 serge 4246
	struct intel_shared_dpll_config *shared_dpll;
4104 Serge 4247
	enum intel_dpll_id i;
6084 serge 4248
	int max = dev_priv->num_shared_dpll;
3031 serge 4249
 
6084 serge 4250
	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4251
 
3031 serge 4252
	if (HAS_PCH_IBX(dev_priv->dev)) {
4253
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 4254
		i = (enum intel_dpll_id) crtc->pipe;
4255
		pll = &dev_priv->shared_dplls[i];
3031 serge 4256
 
4104 Serge 4257
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4258
			      crtc->base.base.id, pll->name);
3031 serge 4259
 
6084 serge 4260
		WARN_ON(shared_dpll[i].crtc_mask);
5060 serge 4261
 
3031 serge 4262
		goto found;
4263
	}
4264
 
6084 serge 4265
	if (IS_BROXTON(dev_priv->dev)) {
4266
		/* PLL is attached to port in bxt */
4267
		struct intel_encoder *encoder;
4268
		struct intel_digital_port *intel_dig_port;
4269
 
4270
		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4271
		if (WARN_ON(!encoder))
4272
			return NULL;
4273
 
4274
		intel_dig_port = enc_to_dig_port(&encoder->base);
4275
		/* 1:1 mapping between ports and PLLs */
4276
		i = (enum intel_dpll_id)intel_dig_port->port;
4104 Serge 4277
		pll = &dev_priv->shared_dplls[i];
6084 serge 4278
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4279
			crtc->base.base.id, pll->name);
4280
		WARN_ON(shared_dpll[i].crtc_mask);
3031 serge 4281
 
6084 serge 4282
		goto found;
4283
	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4284
		/* Do not consider SPLL */
4285
		max = 2;
4286
 
4287
	for (i = 0; i < max; i++) {
4288
		pll = &dev_priv->shared_dplls[i];
4289
 
3031 serge 4290
		/* Only want to check enabled timings first */
6084 serge 4291
		if (shared_dpll[i].crtc_mask == 0)
3031 serge 4292
			continue;
4293
 
6084 serge 4294
		if (memcmp(&crtc_state->dpll_hw_state,
4295
			   &shared_dpll[i].hw_state,
4296
			   sizeof(crtc_state->dpll_hw_state)) == 0) {
5354 serge 4297
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4298
				      crtc->base.base.id, pll->name,
6084 serge 4299
				      shared_dpll[i].crtc_mask,
5354 serge 4300
				      pll->active);
3031 serge 4301
			goto found;
4302
		}
4303
	}
4304
 
4305
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 4306
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4307
		pll = &dev_priv->shared_dplls[i];
6084 serge 4308
		if (shared_dpll[i].crtc_mask == 0) {
4104 Serge 4309
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4310
				      crtc->base.base.id, pll->name);
3031 serge 4311
			goto found;
4312
		}
4313
	}
4314
 
4315
	return NULL;
4316
 
4317
found:
6084 serge 4318
	if (shared_dpll[i].crtc_mask == 0)
4319
		shared_dpll[i].hw_state =
4320
			crtc_state->dpll_hw_state;
5060 serge 4321
 
6084 serge 4322
	crtc_state->shared_dpll = i;
4104 Serge 4323
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4324
			 pipe_name(crtc->pipe));
4325
 
6084 serge 4326
	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
3031 serge 4327
 
4328
	return pll;
4329
}
4330
 
6084 serge 4331
static void intel_shared_dpll_commit(struct drm_atomic_state *state)
5354 serge 4332
{
6084 serge 4333
	struct drm_i915_private *dev_priv = to_i915(state->dev);
4334
	struct intel_shared_dpll_config *shared_dpll;
5354 serge 4335
	struct intel_shared_dpll *pll;
4336
	enum intel_dpll_id i;
4337
 
6084 serge 4338
	if (!to_intel_atomic_state(state)->dpll_set)
4339
		return;
4340
 
4341
	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
5354 serge 4342
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4343
		pll = &dev_priv->shared_dplls[i];
6084 serge 4344
		pll->config = shared_dpll[i];
4345
	}
4346
}
5354 serge 4347
 
6084 serge 4348
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4349
{
4350
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 4351
	i915_reg_t dslreg = PIPEDSL(pipe);
6084 serge 4352
	u32 temp;
5354 serge 4353
 
6084 serge 4354
	temp = I915_READ(dslreg);
4355
	udelay(500);
4356
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4357
		if (wait_for(I915_READ(dslreg) != temp, 5))
4358
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5354 serge 4359
	}
6084 serge 4360
}
5354 serge 4361
 
6084 serge 4362
static int
4363
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4364
		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4365
		  int src_w, int src_h, int dst_w, int dst_h)
4366
{
4367
	struct intel_crtc_scaler_state *scaler_state =
4368
		&crtc_state->scaler_state;
4369
	struct intel_crtc *intel_crtc =
4370
		to_intel_crtc(crtc_state->base.crtc);
4371
	int need_scaling;
5354 serge 4372
 
6084 serge 4373
	need_scaling = intel_rotation_90_or_270(rotation) ?
4374
		(src_h != dst_w || src_w != dst_h):
4375
		(src_w != dst_w || src_h != dst_h);
4376
 
4377
	/*
4378
	 * if plane is being disabled or scaler is no more required or force detach
4379
	 *  - free scaler binded to this plane/crtc
4380
	 *  - in order to do this, update crtc->scaler_usage
4381
	 *
4382
	 * Here scaler state in crtc_state is set free so that
4383
	 * scaler can be assigned to other user. Actual register
4384
	 * update to free the scaler is done in plane/panel-fit programming.
4385
	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4386
	 */
4387
	if (force_detach || !need_scaling) {
4388
		if (*scaler_id >= 0) {
4389
			scaler_state->scaler_users &= ~(1 << scaler_user);
4390
			scaler_state->scalers[*scaler_id].in_use = 0;
4391
 
4392
			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4393
				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4394
				intel_crtc->pipe, scaler_user, *scaler_id,
4395
				scaler_state->scaler_users);
4396
			*scaler_id = -1;
4397
		}
4398
		return 0;
5354 serge 4399
	}
4400
 
6084 serge 4401
	/* range checks */
4402
	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4403
		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4404
 
4405
		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4406
		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4407
		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4408
			"size is out of scaler range\n",
4409
			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4410
		return -EINVAL;
4411
	}
4412
 
4413
	/* mark this plane as a scaler user in crtc_state */
4414
	scaler_state->scaler_users |= (1 << scaler_user);
4415
	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4416
		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4417
		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4418
		scaler_state->scaler_users);
4419
 
4420
	return 0;
5354 serge 4421
}
4422
 
6084 serge 4423
/**
4424
 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4425
 *
4426
 * @state: crtc's scaler state
4427
 *
4428
 * Return
4429
 *     0 - scaler_usage updated successfully
4430
 *    error - requested scaling cannot be supported or other error condition
4431
 */
4432
int skl_update_scaler_crtc(struct intel_crtc_state *state)
5354 serge 4433
{
6084 serge 4434
	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4435
	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5354 serge 4436
 
6084 serge 4437
	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4438
		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
5354 serge 4439
 
6084 serge 4440
	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
6660 serge 4441
		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
6084 serge 4442
		state->pipe_src_w, state->pipe_src_h,
4443
		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
5354 serge 4444
}
4445
 
6084 serge 4446
/**
4447
 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4448
 *
4449
 * @state: crtc's scaler state
4450
 * @plane_state: atomic plane state to update
4451
 *
4452
 * Return
4453
 *     0 - scaler_usage updated successfully
4454
 *    error - requested scaling cannot be supported or other error condition
4455
 */
4456
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4457
				   struct intel_plane_state *plane_state)
5354 serge 4458
{
4459
 
6084 serge 4460
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4461
	struct intel_plane *intel_plane =
4462
		to_intel_plane(plane_state->base.plane);
4463
	struct drm_framebuffer *fb = plane_state->base.fb;
4464
	int ret;
5354 serge 4465
 
6084 serge 4466
	bool force_detach = !fb || !plane_state->visible;
5354 serge 4467
 
6084 serge 4468
	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4469
		      intel_plane->base.base.id, intel_crtc->pipe,
4470
		      drm_plane_index(&intel_plane->base));
4471
 
4472
	ret = skl_update_scaler(crtc_state, force_detach,
4473
				drm_plane_index(&intel_plane->base),
4474
				&plane_state->scaler_id,
4475
				plane_state->base.rotation,
4476
				drm_rect_width(&plane_state->src) >> 16,
4477
				drm_rect_height(&plane_state->src) >> 16,
4478
				drm_rect_width(&plane_state->dst),
4479
				drm_rect_height(&plane_state->dst));
4480
 
4481
	if (ret || plane_state->scaler_id < 0)
4482
		return ret;
4483
 
4484
	/* check colorkey */
4485
	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4486
		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4487
			      intel_plane->base.base.id);
4488
		return -EINVAL;
5354 serge 4489
	}
6084 serge 4490
 
4491
	/* Check src format */
4492
	switch (fb->pixel_format) {
4493
	case DRM_FORMAT_RGB565:
4494
	case DRM_FORMAT_XBGR8888:
4495
	case DRM_FORMAT_XRGB8888:
4496
	case DRM_FORMAT_ABGR8888:
4497
	case DRM_FORMAT_ARGB8888:
4498
	case DRM_FORMAT_XRGB2101010:
4499
	case DRM_FORMAT_XBGR2101010:
4500
	case DRM_FORMAT_YUYV:
4501
	case DRM_FORMAT_YVYU:
4502
	case DRM_FORMAT_UYVY:
4503
	case DRM_FORMAT_VYUY:
4504
		break;
4505
	default:
4506
		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4507
			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4508
		return -EINVAL;
4509
	}
4510
 
4511
	return 0;
5354 serge 4512
}
4513
 
6084 serge 4514
static void skylake_scaler_disable(struct intel_crtc *crtc)
2342 Serge 4515
{
6084 serge 4516
	int i;
2342 Serge 4517
 
6084 serge 4518
	for (i = 0; i < crtc->num_scalers; i++)
4519
		skl_detach_scaler(crtc, i);
2342 Serge 4520
}
4521
 
5354 serge 4522
static void skylake_pfit_enable(struct intel_crtc *crtc)
4523
{
4524
	struct drm_device *dev = crtc->base.dev;
4525
	struct drm_i915_private *dev_priv = dev->dev_private;
4526
	int pipe = crtc->pipe;
6084 serge 4527
	struct intel_crtc_scaler_state *scaler_state =
4528
		&crtc->config->scaler_state;
5354 serge 4529
 
6084 serge 4530
	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4531
 
4532
	if (crtc->config->pch_pfit.enabled) {
4533
		int id;
4534
 
4535
		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4536
			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4537
			return;
4538
		}
4539
 
4540
		id = scaler_state->scaler_id;
4541
		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4542
			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4543
		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4544
		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4545
 
4546
		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
5354 serge 4547
	}
4548
}
4549
 
4104 Serge 4550
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4551
{
4552
	struct drm_device *dev = crtc->base.dev;
4553
	struct drm_i915_private *dev_priv = dev->dev_private;
4554
	int pipe = crtc->pipe;
4555
 
6084 serge 4556
	if (crtc->config->pch_pfit.enabled) {
4104 Serge 4557
		/* Force use of hard-coded filter coefficients
4558
		 * as some pre-programmed values are broken,
4559
		 * e.g. x201.
4560
		 */
4561
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4562
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4563
						 PF_PIPE_SEL_IVB(pipe));
4564
		else
4565
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6084 serge 4566
		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4567
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4104 Serge 4568
	}
4569
}
4570
 
4560 Serge 4571
void hsw_enable_ips(struct intel_crtc *crtc)
4572
{
5060 serge 4573
	struct drm_device *dev = crtc->base.dev;
4574
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 4575
 
6084 serge 4576
	if (!crtc->config->ips_enabled)
4560 Serge 4577
		return;
4578
 
5060 serge 4579
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4580
	intel_wait_for_vblank(dev, crtc->pipe);
4581
 
4560 Serge 4582
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4583
	if (IS_BROADWELL(dev)) {
4560 Serge 4584
		mutex_lock(&dev_priv->rps.hw_lock);
4585
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4586
		mutex_unlock(&dev_priv->rps.hw_lock);
4587
		/* Quoting Art Runyan: "its not safe to expect any particular
4588
		 * value in IPS_CTL bit 31 after enabling IPS through the
4589
		 * mailbox." Moreover, the mailbox may return a bogus state,
4590
		 * so we need to just enable it and continue on.
4591
		 */
4592
	} else {
4593
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4594
		/* The bit only becomes 1 in the next vblank, so this wait here
4595
		 * is essentially intel_wait_for_vblank. If we don't have this
4596
		 * and don't wait for vblanks until the end of crtc_enable, then
4597
		 * the HW state readout code will complain that the expected
4598
		 * IPS_CTL value is not the one we read. */
4599
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4600
			DRM_ERROR("Timed out waiting for IPS enable\n");
4601
	}
4602
}
4603
 
4604
void hsw_disable_ips(struct intel_crtc *crtc)
4605
{
4606
	struct drm_device *dev = crtc->base.dev;
4607
	struct drm_i915_private *dev_priv = dev->dev_private;
4608
 
6084 serge 4609
	if (!crtc->config->ips_enabled)
4560 Serge 4610
		return;
4611
 
4612
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4613
	if (IS_BROADWELL(dev)) {
4560 Serge 4614
		mutex_lock(&dev_priv->rps.hw_lock);
4615
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4616
		mutex_unlock(&dev_priv->rps.hw_lock);
5060 serge 4617
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4618
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4619
			DRM_ERROR("Timed out waiting for IPS disable\n");
4560 Serge 4620
	} else {
4621
		I915_WRITE(IPS_CTL, 0);
4622
		POSTING_READ(IPS_CTL);
4623
	}
4624
 
4625
	/* We need to wait for a vblank before we can disable the plane. */
4626
	intel_wait_for_vblank(dev, crtc->pipe);
4627
}
4628
 
4629
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4630
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4631
{
4632
	struct drm_device *dev = crtc->dev;
4633
	struct drm_i915_private *dev_priv = dev->dev_private;
4634
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4635
	enum pipe pipe = intel_crtc->pipe;
4636
	int i;
4637
	bool reenable_ips = false;
4638
 
4639
	/* The clocks have to be on to load the palette. */
6084 serge 4640
	if (!crtc->state->active)
4560 Serge 4641
		return;
4642
 
6084 serge 4643
	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
6937 serge 4644
		if (intel_crtc->config->has_dsi_encoder)
4560 Serge 4645
			assert_dsi_pll_enabled(dev_priv);
4646
		else
4647
			assert_pll_enabled(dev_priv, pipe);
4648
	}
4649
 
4650
	/* Workaround : Do not read or write the pipe palette/gamma data while
4651
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4652
	 */
6084 serge 4653
	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4560 Serge 4654
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4655
	     GAMMA_MODE_MODE_SPLIT)) {
4656
		hsw_disable_ips(intel_crtc);
4657
		reenable_ips = true;
4658
	}
4659
 
4660
	for (i = 0; i < 256; i++) {
6937 serge 4661
		i915_reg_t palreg;
6084 serge 4662
 
4663
		if (HAS_GMCH_DISPLAY(dev))
4664
			palreg = PALETTE(pipe, i);
4665
		else
4666
			palreg = LGC_PALETTE(pipe, i);
4667
 
4668
		I915_WRITE(palreg,
4560 Serge 4669
			   (intel_crtc->lut_r[i] << 16) |
4670
			   (intel_crtc->lut_g[i] << 8) |
4671
			   intel_crtc->lut_b[i]);
4672
	}
4673
 
4674
	if (reenable_ips)
4675
		hsw_enable_ips(intel_crtc);
4676
}
4677
 
6084 serge 4678
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5060 serge 4679
{
6084 serge 4680
	if (intel_crtc->overlay) {
5060 serge 4681
		struct drm_device *dev = intel_crtc->base.dev;
4682
		struct drm_i915_private *dev_priv = dev->dev_private;
4683
 
4684
		mutex_lock(&dev->struct_mutex);
4685
		dev_priv->mm.interruptible = false;
5354 serge 4686
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
6084 serge 4687
		dev_priv->mm.interruptible = true;
5060 serge 4688
		mutex_unlock(&dev->struct_mutex);
4689
	}
4690
 
4691
	/* Let userspace switch the overlay on again. In most cases userspace
4692
	 * has to recompute where to put it anyway.
4693
	 */
4694
}
4695
 
6084 serge 4696
/**
4697
 * intel_post_enable_primary - Perform operations after enabling primary plane
4698
 * @crtc: the CRTC whose primary plane was just enabled
4699
 *
4700
 * Performs potentially sleeping operations that must be done after the primary
4701
 * plane is enabled, such as updating FBC and IPS.  Note that this may be
4702
 * called due to an explicit primary plane update, or due to an implicit
4703
 * re-enable that is caused when a sprite plane is updated to no longer
4704
 * completely hide the primary plane.
4705
 */
4706
static void
4707
intel_post_enable_primary(struct drm_crtc *crtc)
5060 serge 4708
{
4709
	struct drm_device *dev = crtc->dev;
6084 serge 4710
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4711
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4712
	int pipe = intel_crtc->pipe;
4713
 
6084 serge 4714
	/*
4715
	 * FIXME IPS should be fine as long as one plane is
4716
	 * enabled, but in practice it seems to have problems
4717
	 * when going from primary only to sprite only and vice
4718
	 * versa.
4719
	 */
5060 serge 4720
	hsw_enable_ips(intel_crtc);
4721
 
5354 serge 4722
	/*
6084 serge 4723
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4724
	 * So don't enable underrun reporting before at least some planes
4725
	 * are enabled.
4726
	 * FIXME: Need to fix the logic to work when we turn off all planes
4727
	 * but leave the pipe running.
5354 serge 4728
	 */
6084 serge 4729
	if (IS_GEN2(dev))
4730
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4731
 
6937 serge 4732
	/* Underruns don't always raise interrupts, so check manually. */
4733
	intel_check_cpu_fifo_underruns(dev_priv);
4734
	intel_check_pch_fifo_underruns(dev_priv);
5060 serge 4735
}
4736
 
6084 serge 4737
/**
4738
 * intel_pre_disable_primary - Perform operations before disabling primary plane
4739
 * @crtc: the CRTC whose primary plane is to be disabled
4740
 *
4741
 * Performs potentially sleeping operations that must be done before the
4742
 * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4743
 * be called due to an explicit primary plane update, or due to an implicit
4744
 * disable that is caused when a sprite plane completely hides the primary
4745
 * plane.
4746
 */
4747
static void
4748
intel_pre_disable_primary(struct drm_crtc *crtc)
5060 serge 4749
{
4750
	struct drm_device *dev = crtc->dev;
4751
	struct drm_i915_private *dev_priv = dev->dev_private;
4752
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4753
	int pipe = intel_crtc->pipe;
4754
 
6084 serge 4755
	/*
4756
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4757
	 * So diasble underrun reporting before all the planes get disabled.
4758
	 * FIXME: Need to fix the logic to work when we turn off all planes
4759
	 * but leave the pipe running.
4760
	 */
4761
	if (IS_GEN2(dev))
4762
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 4763
 
6084 serge 4764
	/*
4765
	 * Vblank time updates from the shadow to live plane control register
4766
	 * are blocked if the memory self-refresh mode is active at that
4767
	 * moment. So to make sure the plane gets truly disabled, disable
4768
	 * first the self-refresh mode. The self-refresh enable bit in turn
4769
	 * will be checked/applied by the HW only at the next frame start
4770
	 * event which is after the vblank start event, so we need to have a
4771
	 * wait-for-vblank between disabling the plane and the pipe.
4772
	 */
4773
	if (HAS_GMCH_DISPLAY(dev)) {
4774
		intel_set_memory_cxsr(dev_priv, false);
4775
		dev_priv->wm.vlv.cxsr = false;
4776
		intel_wait_for_vblank(dev, pipe);
4777
	}
5060 serge 4778
 
6084 serge 4779
	/*
4780
	 * FIXME IPS should be fine as long as one plane is
4781
	 * enabled, but in practice it seems to have problems
4782
	 * when going from primary only to sprite only and vice
4783
	 * versa.
4784
	 */
5060 serge 4785
	hsw_disable_ips(intel_crtc);
6084 serge 4786
}
5060 serge 4787
 
6084 serge 4788
static void intel_post_plane_update(struct intel_crtc *crtc)
4789
{
4790
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
6937 serge 4791
	struct intel_crtc_state *pipe_config =
4792
		to_intel_crtc_state(crtc->base.state);
6084 serge 4793
	struct drm_device *dev = crtc->base.dev;
5354 serge 4794
 
6084 serge 4795
	intel_frontbuffer_flip(dev, atomic->fb_bits);
4796
 
7144 serge 4797
	crtc->wm.cxsr_allowed = true;
6084 serge 4798
 
6937 serge 4799
	if (pipe_config->update_wm_post && pipe_config->base.active)
6084 serge 4800
		intel_update_watermarks(&crtc->base);
4801
 
4802
	if (atomic->update_fbc)
7144 serge 4803
		intel_fbc_post_update(crtc);
6084 serge 4804
 
4805
	if (atomic->post_enable_primary)
4806
		intel_post_enable_primary(&crtc->base);
4807
 
4808
	memset(atomic, 0, sizeof(*atomic));
4809
}
4810
 
7144 serge 4811
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
6084 serge 4812
{
7144 serge 4813
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6084 serge 4814
	struct drm_device *dev = crtc->base.dev;
4815
	struct drm_i915_private *dev_priv = dev->dev_private;
4816
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
6937 serge 4817
	struct intel_crtc_state *pipe_config =
4818
		to_intel_crtc_state(crtc->base.state);
7144 serge 4819
	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4820
	struct drm_plane *primary = crtc->base.primary;
4821
	struct drm_plane_state *old_pri_state =
4822
		drm_atomic_get_existing_plane_state(old_state, primary);
4823
	bool modeset = needs_modeset(&pipe_config->base);
6084 serge 4824
 
7144 serge 4825
	if (atomic->update_fbc)
4826
		intel_fbc_pre_update(crtc);
6084 serge 4827
 
7144 serge 4828
	if (old_pri_state) {
4829
		struct intel_plane_state *primary_state =
4830
			to_intel_plane_state(primary->state);
4831
		struct intel_plane_state *old_primary_state =
4832
			to_intel_plane_state(old_pri_state);
6084 serge 4833
 
7144 serge 4834
		if (old_primary_state->visible &&
4835
		    (modeset || !primary_state->visible))
4836
			intel_pre_disable_primary(&crtc->base);
4837
	}
6084 serge 4838
 
6937 serge 4839
	if (pipe_config->disable_cxsr) {
6084 serge 4840
		crtc->wm.cxsr_allowed = false;
7144 serge 4841
 
4842
		if (old_crtc_state->base.active)
4843
			intel_set_memory_cxsr(dev_priv, false);
6084 serge 4844
	}
6937 serge 4845
 
4846
	if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
4847
		intel_update_watermarks(&crtc->base);
6084 serge 4848
}
4849
 
4850
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4851
{
4852
	struct drm_device *dev = crtc->dev;
4853
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4854
	struct drm_plane *p;
4855
	int pipe = intel_crtc->pipe;
4856
 
4857
	intel_crtc_dpms_overlay_disable(intel_crtc);
4858
 
4859
	drm_for_each_plane_mask(p, dev, plane_mask)
4860
		to_intel_plane(p)->disable_plane(p, crtc);
4861
 
5354 serge 4862
	/*
4863
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4864
	 * to compute the mask of flip planes precisely. For the time being
4865
	 * consider this a flip to a NULL plane.
4866
	 */
6320 serge 4867
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4868
}
4869
 
2327 Serge 4870
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4871
{
6084 serge 4872
	struct drm_device *dev = crtc->dev;
4873
	struct drm_i915_private *dev_priv = dev->dev_private;
4874
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4875
	struct intel_encoder *encoder;
6084 serge 4876
	int pipe = intel_crtc->pipe;
2327 Serge 4877
 
6084 serge 4878
	if (WARN_ON(intel_crtc->active))
4879
		return;
3031 serge 4880
 
6084 serge 4881
	if (intel_crtc->config->has_pch_encoder)
6937 serge 4882
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4883
 
4884
	if (intel_crtc->config->has_pch_encoder)
5060 serge 4885
		intel_prepare_shared_dpll(intel_crtc);
4886
 
6084 serge 4887
	if (intel_crtc->config->has_dp_encoder)
4888
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 4889
 
4890
	intel_set_pipe_timings(intel_crtc);
4891
 
6084 serge 4892
	if (intel_crtc->config->has_pch_encoder) {
5060 serge 4893
		intel_cpu_transcoder_set_m_n(intel_crtc,
6084 serge 4894
				     &intel_crtc->config->fdi_m_n, NULL);
5060 serge 4895
	}
4896
 
4897
	ironlake_set_pipeconf(crtc);
4898
 
6084 serge 4899
	intel_crtc->active = true;
4104 Serge 4900
 
5354 serge 4901
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4104 Serge 4902
 
4903
	for_each_encoder_on_crtc(dev, crtc, encoder)
4904
		if (encoder->pre_enable)
4905
			encoder->pre_enable(encoder);
2327 Serge 4906
 
6084 serge 4907
	if (intel_crtc->config->has_pch_encoder) {
3243 Serge 4908
		/* Note: FDI PLL enabling _must_ be done before we enable the
4909
		 * cpu pipes, hence this is separate from all the other fdi/pch
4910
		 * enabling. */
3031 serge 4911
		ironlake_fdi_pll_enable(intel_crtc);
4912
	} else {
4913
		assert_fdi_tx_disabled(dev_priv, pipe);
4914
		assert_fdi_rx_disabled(dev_priv, pipe);
4915
	}
2327 Serge 4916
 
4104 Serge 4917
	ironlake_pfit_enable(intel_crtc);
3031 serge 4918
 
6084 serge 4919
	/*
4920
	 * On ILK+ LUT must be loaded before the pipe is running but with
4921
	 * clocks enabled
4922
	 */
4923
	intel_crtc_load_lut(crtc);
2327 Serge 4924
 
4560 Serge 4925
	intel_update_watermarks(crtc);
5060 serge 4926
	intel_enable_pipe(intel_crtc);
2327 Serge 4927
 
6084 serge 4928
	if (intel_crtc->config->has_pch_encoder)
4929
		ironlake_pch_enable(crtc);
2327 Serge 4930
 
6084 serge 4931
	assert_vblank_disabled(crtc);
4932
	drm_crtc_vblank_on(crtc);
4933
 
3031 serge 4934
	for_each_encoder_on_crtc(dev, crtc, encoder)
4935
		encoder->enable(encoder);
4936
 
4937
	if (HAS_PCH_CPT(dev))
4104 Serge 4938
		cpt_verify_modeset(dev, intel_crtc->pipe);
6937 serge 4939
 
4940
	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4941
	if (intel_crtc->config->has_pch_encoder)
4942
		intel_wait_for_vblank(dev, pipe);
4943
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2327 Serge 4944
}
4945
 
4104 Serge 4946
/* IPS only exists on ULT machines and is tied to pipe A. */
4947
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4948
{
4949
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4950
}
4951
 
3243 Serge 4952
static void haswell_crtc_enable(struct drm_crtc *crtc)
4953
{
4954
	struct drm_device *dev = crtc->dev;
4955
	struct drm_i915_private *dev_priv = dev->dev_private;
4956
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4957
	struct intel_encoder *encoder;
6084 serge 4958
	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4959
	struct intel_crtc_state *pipe_config =
4960
		to_intel_crtc_state(crtc->state);
3243 Serge 4961
 
6084 serge 4962
	if (WARN_ON(intel_crtc->active))
3243 Serge 4963
		return;
4964
 
6937 serge 4965
	if (intel_crtc->config->has_pch_encoder)
4966
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4967
						      false);
4968
 
5060 serge 4969
	if (intel_crtc_to_shared_dpll(intel_crtc))
4970
		intel_enable_shared_dpll(intel_crtc);
4971
 
6084 serge 4972
	if (intel_crtc->config->has_dp_encoder)
4973
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 4974
 
4975
	intel_set_pipe_timings(intel_crtc);
4976
 
6084 serge 4977
	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4978
		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4979
			   intel_crtc->config->pixel_multiplier - 1);
5354 serge 4980
	}
4981
 
6084 serge 4982
	if (intel_crtc->config->has_pch_encoder) {
5060 serge 4983
		intel_cpu_transcoder_set_m_n(intel_crtc,
6084 serge 4984
				     &intel_crtc->config->fdi_m_n, NULL);
5060 serge 4985
	}
4986
 
4987
	haswell_set_pipeconf(crtc);
4988
 
4989
	intel_set_pipe_csc(crtc);
4990
 
3243 Serge 4991
	intel_crtc->active = true;
4104 Serge 4992
 
6937 serge 4993
	if (intel_crtc->config->has_pch_encoder)
4994
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4995
	else
7144 serge 4996
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6937 serge 4997
 
6084 serge 4998
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 4999
		if (encoder->pre_enable)
5000
			encoder->pre_enable(encoder);
6084 serge 5001
	}
3243 Serge 5002
 
6937 serge 5003
	if (intel_crtc->config->has_pch_encoder)
5060 serge 5004
		dev_priv->display.fdi_link_train(crtc);
5005
 
6937 serge 5006
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5007
		intel_ddi_enable_pipe_clock(intel_crtc);
3243 Serge 5008
 
6084 serge 5009
	if (INTEL_INFO(dev)->gen >= 9)
5354 serge 5010
		skylake_pfit_enable(intel_crtc);
5011
	else
6084 serge 5012
		ironlake_pfit_enable(intel_crtc);
3243 Serge 5013
 
5014
	/*
5015
	 * On ILK+ LUT must be loaded before the pipe is running but with
5016
	 * clocks enabled
5017
	 */
5018
	intel_crtc_load_lut(crtc);
5019
 
5020
	intel_ddi_set_pipe_settings(crtc);
6937 serge 5021
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5022
		intel_ddi_enable_transcoder_func(crtc);
3243 Serge 5023
 
4560 Serge 5024
	intel_update_watermarks(crtc);
5060 serge 5025
	intel_enable_pipe(intel_crtc);
3243 Serge 5026
 
6084 serge 5027
	if (intel_crtc->config->has_pch_encoder)
3243 Serge 5028
		lpt_pch_enable(crtc);
5029
 
6937 serge 5030
	if (intel_crtc->config->dp_encoder_is_mst)
5060 serge 5031
		intel_ddi_set_vc_payload_alloc(crtc, true);
5032
 
6084 serge 5033
	assert_vblank_disabled(crtc);
5034
	drm_crtc_vblank_on(crtc);
5035
 
4560 Serge 5036
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 5037
		encoder->enable(encoder);
4560 Serge 5038
		intel_opregion_notify_encoder(encoder, true);
5039
	}
3243 Serge 5040
 
6937 serge 5041
	if (intel_crtc->config->has_pch_encoder) {
5042
		intel_wait_for_vblank(dev, pipe);
5043
		intel_wait_for_vblank(dev, pipe);
5044
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5045
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5046
						      true);
5047
	}
5048
 
4560 Serge 5049
	/* If we change the relative order between pipe/planes enabling, we need
5050
	 * to change the workaround. */
6084 serge 5051
	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5052
	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5053
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5054
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5354 serge 5055
	}
5056
}
5057
 
6084 serge 5058
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4104 Serge 5059
{
5060
	struct drm_device *dev = crtc->base.dev;
5061
	struct drm_i915_private *dev_priv = dev->dev_private;
5062
	int pipe = crtc->pipe;
5063
 
5064
	/* To avoid upsetting the power well on haswell only disable the pfit if
5065
	 * it's in use. The hw state code will make sure we get this right. */
6084 serge 5066
	if (force || crtc->config->pch_pfit.enabled) {
4104 Serge 5067
		I915_WRITE(PF_CTL(pipe), 0);
5068
		I915_WRITE(PF_WIN_POS(pipe), 0);
5069
		I915_WRITE(PF_WIN_SZ(pipe), 0);
5070
	}
5071
}
5072
 
2327 Serge 5073
static void ironlake_crtc_disable(struct drm_crtc *crtc)
5074
{
6084 serge 5075
	struct drm_device *dev = crtc->dev;
5076
	struct drm_i915_private *dev_priv = dev->dev_private;
5077
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5078
	struct intel_encoder *encoder;
6084 serge 5079
	int pipe = intel_crtc->pipe;
2327 Serge 5080
 
6937 serge 5081
	if (intel_crtc->config->has_pch_encoder)
5082
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5083
 
6084 serge 5084
	for_each_encoder_on_crtc(dev, crtc, encoder)
5085
		encoder->disable(encoder);
2327 Serge 5086
 
5354 serge 5087
	drm_crtc_vblank_off(crtc);
5088
	assert_vblank_disabled(crtc);
5089
 
6937 serge 5090
	/*
5091
	 * Sometimes spurious CPU pipe underruns happen when the
5092
	 * pipe is already disabled, but FDI RX/TX is still enabled.
5093
	 * Happens at least with VGA+HDMI cloning. Suppress them.
5094
	 */
6084 serge 5095
	if (intel_crtc->config->has_pch_encoder)
6937 serge 5096
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 5097
 
5354 serge 5098
	intel_disable_pipe(intel_crtc);
5099
 
6084 serge 5100
	ironlake_pfit_disable(intel_crtc, false);
2327 Serge 5101
 
6937 serge 5102
	if (intel_crtc->config->has_pch_encoder) {
6084 serge 5103
		ironlake_fdi_disable(crtc);
6937 serge 5104
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5105
	}
6084 serge 5106
 
3031 serge 5107
	for_each_encoder_on_crtc(dev, crtc, encoder)
5108
		if (encoder->post_disable)
5109
			encoder->post_disable(encoder);
5110
 
6084 serge 5111
	if (intel_crtc->config->has_pch_encoder) {
5112
		ironlake_disable_pch_transcoder(dev_priv, pipe);
2327 Serge 5113
 
6084 serge 5114
		if (HAS_PCH_CPT(dev)) {
6937 serge 5115
			i915_reg_t reg;
5116
			u32 temp;
5117
 
6084 serge 5118
			/* disable TRANS_DP_CTL */
5119
			reg = TRANS_DP_CTL(pipe);
5120
			temp = I915_READ(reg);
4104 Serge 5121
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5122
				  TRANS_DP_PORT_SEL_MASK);
6084 serge 5123
			temp |= TRANS_DP_PORT_SEL_NONE;
5124
			I915_WRITE(reg, temp);
2327 Serge 5125
 
6084 serge 5126
			/* disable DPLL_SEL */
5127
			temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 5128
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6084 serge 5129
			I915_WRITE(PCH_DPLL_SEL, temp);
5130
		}
2327 Serge 5131
 
6084 serge 5132
		ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 5133
	}
6937 serge 5134
 
5135
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2327 Serge 5136
}
5137
 
3243 Serge 5138
static void haswell_crtc_disable(struct drm_crtc *crtc)
5139
{
5140
	struct drm_device *dev = crtc->dev;
5141
	struct drm_i915_private *dev_priv = dev->dev_private;
5142
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5143
	struct intel_encoder *encoder;
6084 serge 5144
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 5145
 
6937 serge 5146
	if (intel_crtc->config->has_pch_encoder)
5147
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5148
						      false);
5149
 
4560 Serge 5150
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5151
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 5152
		encoder->disable(encoder);
4560 Serge 5153
	}
3243 Serge 5154
 
6084 serge 5155
	drm_crtc_vblank_off(crtc);
5156
	assert_vblank_disabled(crtc);
5157
 
5354 serge 5158
	intel_disable_pipe(intel_crtc);
3243 Serge 5159
 
6084 serge 5160
	if (intel_crtc->config->dp_encoder_is_mst)
5097 serge 5161
		intel_ddi_set_vc_payload_alloc(crtc, false);
5162
 
6937 serge 5163
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5164
		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3243 Serge 5165
 
6084 serge 5166
	if (INTEL_INFO(dev)->gen >= 9)
5167
		skylake_scaler_disable(intel_crtc);
5354 serge 5168
	else
6084 serge 5169
		ironlake_pfit_disable(intel_crtc, false);
3243 Serge 5170
 
6937 serge 5171
	if (!intel_crtc->config->has_dsi_encoder)
6084 serge 5172
		intel_ddi_disable_pipe_clock(intel_crtc);
3243 Serge 5173
 
6937 serge 5174
	for_each_encoder_on_crtc(dev, crtc, encoder)
5175
		if (encoder->post_disable)
5176
			encoder->post_disable(encoder);
5177
 
6084 serge 5178
	if (intel_crtc->config->has_pch_encoder) {
3243 Serge 5179
		lpt_disable_pch_transcoder(dev_priv);
6937 serge 5180
		lpt_disable_iclkip(dev_priv);
3243 Serge 5181
		intel_ddi_fdi_disable(crtc);
6937 serge 5182
 
5183
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5184
						      true);
3243 Serge 5185
	}
5186
}
5187
 
4104 Serge 5188
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5189
{
5190
	struct drm_device *dev = crtc->base.dev;
5191
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 5192
	struct intel_crtc_state *pipe_config = crtc->config;
4104 Serge 5193
 
6084 serge 5194
	if (!pipe_config->gmch_pfit.control)
4104 Serge 5195
		return;
5196
 
5197
	/*
5198
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5199
	 * according to register description and PRM.
5200
	 */
5201
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5202
	assert_pipe_disabled(dev_priv, crtc->pipe);
5203
 
5204
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5205
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5206
 
5207
	/* Border color in case we don't scale up to the full screen. Black by
5208
	 * default, change to something else for debugging. */
5209
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5210
}
5211
 
5060 serge 5212
static enum intel_display_power_domain port_to_power_domain(enum port port)
4560 Serge 5213
{
5060 serge 5214
	switch (port) {
5215
	case PORT_A:
6937 serge 5216
		return POWER_DOMAIN_PORT_DDI_A_LANES;
5060 serge 5217
	case PORT_B:
6937 serge 5218
		return POWER_DOMAIN_PORT_DDI_B_LANES;
5060 serge 5219
	case PORT_C:
6937 serge 5220
		return POWER_DOMAIN_PORT_DDI_C_LANES;
5060 serge 5221
	case PORT_D:
6937 serge 5222
		return POWER_DOMAIN_PORT_DDI_D_LANES;
6084 serge 5223
	case PORT_E:
6937 serge 5224
		return POWER_DOMAIN_PORT_DDI_E_LANES;
5060 serge 5225
	default:
6084 serge 5226
		MISSING_CASE(port);
5060 serge 5227
		return POWER_DOMAIN_PORT_OTHER;
5228
	}
5229
}
5230
 
6084 serge 5231
static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5232
{
5233
	switch (port) {
5234
	case PORT_A:
5235
		return POWER_DOMAIN_AUX_A;
5236
	case PORT_B:
5237
		return POWER_DOMAIN_AUX_B;
5238
	case PORT_C:
5239
		return POWER_DOMAIN_AUX_C;
5240
	case PORT_D:
5241
		return POWER_DOMAIN_AUX_D;
5242
	case PORT_E:
5243
		/* FIXME: Check VBT for actual wiring of PORT E */
5244
		return POWER_DOMAIN_AUX_D;
5245
	default:
5246
		MISSING_CASE(port);
5247
		return POWER_DOMAIN_AUX_A;
5248
	}
5249
}
5250
 
5060 serge 5251
enum intel_display_power_domain
5252
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5253
{
5254
	struct drm_device *dev = intel_encoder->base.dev;
5255
	struct intel_digital_port *intel_dig_port;
5256
 
5257
	switch (intel_encoder->type) {
5258
	case INTEL_OUTPUT_UNKNOWN:
5259
		/* Only DDI platforms should ever use this output type */
5260
		WARN_ON_ONCE(!HAS_DDI(dev));
5261
	case INTEL_OUTPUT_DISPLAYPORT:
5262
	case INTEL_OUTPUT_HDMI:
5263
	case INTEL_OUTPUT_EDP:
5264
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5265
		return port_to_power_domain(intel_dig_port->port);
5266
	case INTEL_OUTPUT_DP_MST:
5267
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5268
		return port_to_power_domain(intel_dig_port->port);
5269
	case INTEL_OUTPUT_ANALOG:
5270
		return POWER_DOMAIN_PORT_CRT;
5271
	case INTEL_OUTPUT_DSI:
5272
		return POWER_DOMAIN_PORT_DSI;
5273
	default:
5274
		return POWER_DOMAIN_PORT_OTHER;
5275
	}
5276
}
5277
 
6084 serge 5278
enum intel_display_power_domain
5279
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5280
{
5281
	struct drm_device *dev = intel_encoder->base.dev;
5282
	struct intel_digital_port *intel_dig_port;
5283
 
5284
	switch (intel_encoder->type) {
5285
	case INTEL_OUTPUT_UNKNOWN:
5286
	case INTEL_OUTPUT_HDMI:
5287
		/*
5288
		 * Only DDI platforms should ever use these output types.
5289
		 * We can get here after the HDMI detect code has already set
5290
		 * the type of the shared encoder. Since we can't be sure
5291
		 * what's the status of the given connectors, play safe and
5292
		 * run the DP detection too.
5293
		 */
5294
		WARN_ON_ONCE(!HAS_DDI(dev));
5295
	case INTEL_OUTPUT_DISPLAYPORT:
5296
	case INTEL_OUTPUT_EDP:
5297
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5298
		return port_to_aux_power_domain(intel_dig_port->port);
5299
	case INTEL_OUTPUT_DP_MST:
5300
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5301
		return port_to_aux_power_domain(intel_dig_port->port);
5302
	default:
5303
		MISSING_CASE(intel_encoder->type);
5304
		return POWER_DOMAIN_AUX_A;
5305
	}
5306
}
5307
 
7144 serge 5308
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5309
					    struct intel_crtc_state *crtc_state)
5060 serge 5310
{
5311
	struct drm_device *dev = crtc->dev;
7144 serge 5312
	struct drm_encoder *encoder;
5060 serge 5313
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5314
	enum pipe pipe = intel_crtc->pipe;
5315
	unsigned long mask;
7144 serge 5316
	enum transcoder transcoder = crtc_state->cpu_transcoder;
5060 serge 5317
 
7144 serge 5318
	if (!crtc_state->base.active)
6084 serge 5319
		return 0;
5320
 
5060 serge 5321
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5322
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
7144 serge 5323
	if (crtc_state->pch_pfit.enabled ||
5324
	    crtc_state->pch_pfit.force_thru)
5060 serge 5325
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5326
 
7144 serge 5327
	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5328
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5329
 
5060 serge 5330
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
7144 serge 5331
	}
5060 serge 5332
 
5333
	return mask;
5334
}
5335
 
7144 serge 5336
static unsigned long
5337
modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5338
			       struct intel_crtc_state *crtc_state)
5060 serge 5339
{
6084 serge 5340
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5341
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5342
	enum intel_display_power_domain domain;
5343
	unsigned long domains, new_domains, old_domains;
5060 serge 5344
 
6084 serge 5345
	old_domains = intel_crtc->enabled_power_domains;
7144 serge 5346
	intel_crtc->enabled_power_domains = new_domains =
5347
		get_crtc_power_domains(crtc, crtc_state);
5060 serge 5348
 
6084 serge 5349
	domains = new_domains & ~old_domains;
5060 serge 5350
 
6084 serge 5351
	for_each_power_domain(domain, domains)
5352
		intel_display_power_get(dev_priv, domain);
5060 serge 5353
 
6084 serge 5354
	return old_domains & ~new_domains;
5355
}
5060 serge 5356
 
6084 serge 5357
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5358
				      unsigned long domains)
5359
{
5360
	enum intel_display_power_domain domain;
5354 serge 5361
 
6084 serge 5362
	for_each_power_domain(domain, domains)
5363
		intel_display_power_put(dev_priv, domain);
5364
}
5060 serge 5365
 
6084 serge 5366
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5060 serge 5367
{
6084 serge 5368
	int max_cdclk_freq = dev_priv->max_cdclk_freq;
4560 Serge 5369
 
6084 serge 5370
	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5371
	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5372
		return max_cdclk_freq;
5373
	else if (IS_CHERRYVIEW(dev_priv))
5374
		return max_cdclk_freq*95/100;
5375
	else if (INTEL_INFO(dev_priv)->gen < 4)
5376
		return 2*max_cdclk_freq*90/100;
5377
	else
5378
		return max_cdclk_freq*90/100;
5379
}
4560 Serge 5380
 
6084 serge 5381
static void intel_update_max_cdclk(struct drm_device *dev)
5382
{
5383
	struct drm_i915_private *dev_priv = dev->dev_private;
5384
 
6937 serge 5385
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 5386
		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5387
 
5388
		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5389
			dev_priv->max_cdclk_freq = 675000;
5390
		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5391
			dev_priv->max_cdclk_freq = 540000;
5392
		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5393
			dev_priv->max_cdclk_freq = 450000;
5394
		else
5395
			dev_priv->max_cdclk_freq = 337500;
5396
	} else if (IS_BROADWELL(dev))  {
5397
		/*
5398
		 * FIXME with extra cooling we can allow
5399
		 * 540 MHz for ULX and 675 Mhz for ULT.
5400
		 * How can we know if extra cooling is
5401
		 * available? PCI ID, VTB, something else?
5402
		 */
5403
		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5404
			dev_priv->max_cdclk_freq = 450000;
5405
		else if (IS_BDW_ULX(dev))
5406
			dev_priv->max_cdclk_freq = 450000;
5407
		else if (IS_BDW_ULT(dev))
5408
			dev_priv->max_cdclk_freq = 540000;
5409
		else
5410
			dev_priv->max_cdclk_freq = 675000;
5411
	} else if (IS_CHERRYVIEW(dev)) {
5412
		dev_priv->max_cdclk_freq = 320000;
5413
	} else if (IS_VALLEYVIEW(dev)) {
5414
		dev_priv->max_cdclk_freq = 400000;
5415
	} else {
5416
		/* otherwise assume cdclk is fixed */
5417
		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5418
	}
5419
 
5420
	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5421
 
5422
	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5423
			 dev_priv->max_cdclk_freq);
5424
 
5425
	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5426
			 dev_priv->max_dotclk_freq);
4560 Serge 5427
}
5428
 
6084 serge 5429
static void intel_update_cdclk(struct drm_device *dev)
5060 serge 5430
{
5431
	struct drm_i915_private *dev_priv = dev->dev_private;
5432
 
6084 serge 5433
	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5354 serge 5434
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
6084 serge 5435
			 dev_priv->cdclk_freq);
5060 serge 5436
 
5437
	/*
5438
	 * Program the gmbus_freq based on the cdclk frequency.
5439
	 * BSpec erroneously claims we should aim for 4MHz, but
5440
	 * in fact 1MHz is the correct frequency.
5441
	 */
6937 serge 5442
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 5443
		/*
5444
		 * Program the gmbus_freq based on the cdclk frequency.
5445
		 * BSpec erroneously claims we should aim for 4MHz, but
5446
		 * in fact 1MHz is the correct frequency.
5447
		 */
5448
		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5449
	}
5450
 
5451
	if (dev_priv->max_cdclk_freq == 0)
5452
		intel_update_max_cdclk(dev);
5060 serge 5453
}
5454
 
6084 serge 5455
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5456
{
5457
	struct drm_i915_private *dev_priv = dev->dev_private;
5458
	uint32_t divider;
5459
	uint32_t ratio;
5460
	uint32_t current_freq;
5461
	int ret;
5462
 
5463
	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5464
	switch (frequency) {
5465
	case 144000:
5466
		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5467
		ratio = BXT_DE_PLL_RATIO(60);
5468
		break;
5469
	case 288000:
5470
		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5471
		ratio = BXT_DE_PLL_RATIO(60);
5472
		break;
5473
	case 384000:
5474
		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5475
		ratio = BXT_DE_PLL_RATIO(60);
5476
		break;
5477
	case 576000:
5478
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5479
		ratio = BXT_DE_PLL_RATIO(60);
5480
		break;
5481
	case 624000:
5482
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5483
		ratio = BXT_DE_PLL_RATIO(65);
5484
		break;
5485
	case 19200:
5486
		/*
5487
		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5488
		 * to suppress GCC warning.
5489
		 */
5490
		ratio = 0;
5491
		divider = 0;
5492
		break;
5493
	default:
5494
		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5495
 
5496
		return;
5497
	}
5498
 
5499
	mutex_lock(&dev_priv->rps.hw_lock);
5500
	/* Inform power controller of upcoming frequency change */
5501
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5502
				      0x80000000);
5503
	mutex_unlock(&dev_priv->rps.hw_lock);
5504
 
5505
	if (ret) {
5506
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5507
			  ret, frequency);
5508
		return;
5509
	}
5510
 
5511
	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5512
	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5513
	current_freq = current_freq * 500 + 1000;
5514
 
5515
	/*
5516
	 * DE PLL has to be disabled when
5517
	 * - setting to 19.2MHz (bypass, PLL isn't used)
5518
	 * - before setting to 624MHz (PLL needs toggling)
5519
	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5520
	 */
5521
	if (frequency == 19200 || frequency == 624000 ||
5522
	    current_freq == 624000) {
5523
		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5524
		/* Timeout 200us */
5525
		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5526
			     1))
5527
			DRM_ERROR("timout waiting for DE PLL unlock\n");
5528
	}
5529
 
5530
	if (frequency != 19200) {
5531
		uint32_t val;
5532
 
5533
		val = I915_READ(BXT_DE_PLL_CTL);
5534
		val &= ~BXT_DE_PLL_RATIO_MASK;
5535
		val |= ratio;
5536
		I915_WRITE(BXT_DE_PLL_CTL, val);
5537
 
5538
		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5539
		/* Timeout 200us */
5540
		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5541
			DRM_ERROR("timeout waiting for DE PLL lock\n");
5542
 
5543
		val = I915_READ(CDCLK_CTL);
5544
		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5545
		val |= divider;
5546
		/*
5547
		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5548
		 * enable otherwise.
5549
		 */
5550
		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5551
		if (frequency >= 500000)
5552
			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5553
 
5554
		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5555
		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5556
		val |= (frequency - 1000) / 500;
5557
		I915_WRITE(CDCLK_CTL, val);
5558
	}
5559
 
5560
	mutex_lock(&dev_priv->rps.hw_lock);
5561
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5562
				      DIV_ROUND_UP(frequency, 25000));
5563
	mutex_unlock(&dev_priv->rps.hw_lock);
5564
 
5565
	if (ret) {
5566
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5567
			  ret, frequency);
5568
		return;
5569
	}
5570
 
5571
	intel_update_cdclk(dev);
5572
}
5573
 
5574
void broxton_init_cdclk(struct drm_device *dev)
5575
{
5576
	struct drm_i915_private *dev_priv = dev->dev_private;
5577
	uint32_t val;
5578
 
5579
	/*
5580
	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5581
	 * or else the reset will hang because there is no PCH to respond.
5582
	 * Move the handshake programming to initialization sequence.
5583
	 * Previously was left up to BIOS.
5584
	 */
5585
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5586
	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5587
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5588
 
5589
	/* Enable PG1 for cdclk */
5590
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5591
 
5592
	/* check if cd clock is enabled */
5593
	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5594
		DRM_DEBUG_KMS("Display already initialized\n");
5595
		return;
5596
	}
5597
 
5598
	/*
5599
	 * FIXME:
5600
	 * - The initial CDCLK needs to be read from VBT.
5601
	 *   Need to make this change after VBT has changes for BXT.
5602
	 * - check if setting the max (or any) cdclk freq is really necessary
5603
	 *   here, it belongs to modeset time
5604
	 */
5605
	broxton_set_cdclk(dev, 624000);
5606
 
5607
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5608
	POSTING_READ(DBUF_CTL);
5609
 
5610
	udelay(10);
5611
 
5612
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5613
		DRM_ERROR("DBuf power enable timeout!\n");
5614
}
5615
 
5616
void broxton_uninit_cdclk(struct drm_device *dev)
5617
{
5618
	struct drm_i915_private *dev_priv = dev->dev_private;
5619
 
5620
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5621
	POSTING_READ(DBUF_CTL);
5622
 
5623
	udelay(10);
5624
 
5625
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5626
		DRM_ERROR("DBuf power disable timeout!\n");
5627
 
5628
	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5629
	broxton_set_cdclk(dev, 19200);
5630
 
5631
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5632
}
5633
 
5634
static const struct skl_cdclk_entry {
5635
	unsigned int freq;
5636
	unsigned int vco;
5637
} skl_cdclk_frequencies[] = {
5638
	{ .freq = 308570, .vco = 8640 },
5639
	{ .freq = 337500, .vco = 8100 },
5640
	{ .freq = 432000, .vco = 8640 },
5641
	{ .freq = 450000, .vco = 8100 },
5642
	{ .freq = 540000, .vco = 8100 },
5643
	{ .freq = 617140, .vco = 8640 },
5644
	{ .freq = 675000, .vco = 8100 },
5645
};
5646
 
5647
static unsigned int skl_cdclk_decimal(unsigned int freq)
5648
{
5649
	return (freq - 1000) / 500;
5650
}
5651
 
5652
static unsigned int skl_cdclk_get_vco(unsigned int freq)
5653
{
5654
	unsigned int i;
5655
 
5656
	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5657
		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5658
 
5659
		if (e->freq == freq)
5660
			return e->vco;
5661
	}
5662
 
5663
	return 8100;
5664
}
5665
 
5666
static void
5667
skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5668
{
5669
	unsigned int min_freq;
5670
	u32 val;
5671
 
5672
	/* select the minimum CDCLK before enabling DPLL 0 */
5673
	val = I915_READ(CDCLK_CTL);
5674
	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5675
	val |= CDCLK_FREQ_337_308;
5676
 
5677
	if (required_vco == 8640)
5678
		min_freq = 308570;
5679
	else
5680
		min_freq = 337500;
5681
 
5682
	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5683
 
5684
	I915_WRITE(CDCLK_CTL, val);
5685
	POSTING_READ(CDCLK_CTL);
5686
 
5687
	/*
5688
	 * We always enable DPLL0 with the lowest link rate possible, but still
5689
	 * taking into account the VCO required to operate the eDP panel at the
5690
	 * desired frequency. The usual DP link rates operate with a VCO of
5691
	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5692
	 * The modeset code is responsible for the selection of the exact link
5693
	 * rate later on, with the constraint of choosing a frequency that
5694
	 * works with required_vco.
5695
	 */
5696
	val = I915_READ(DPLL_CTRL1);
5697
 
5698
	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5699
		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5700
	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5701
	if (required_vco == 8640)
5702
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5703
					    SKL_DPLL0);
5704
	else
5705
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5706
					    SKL_DPLL0);
5707
 
5708
	I915_WRITE(DPLL_CTRL1, val);
5709
	POSTING_READ(DPLL_CTRL1);
5710
 
5711
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5712
 
5713
	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5714
		DRM_ERROR("DPLL0 not locked\n");
5715
}
5716
 
5717
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5718
{
5719
	int ret;
5720
	u32 val;
5721
 
5722
	/* inform PCU we want to change CDCLK */
5723
	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5724
	mutex_lock(&dev_priv->rps.hw_lock);
5725
	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5726
	mutex_unlock(&dev_priv->rps.hw_lock);
5727
 
5728
	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5729
}
5730
 
5731
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5732
{
5733
	unsigned int i;
5734
 
5735
	for (i = 0; i < 15; i++) {
5736
		if (skl_cdclk_pcu_ready(dev_priv))
5737
			return true;
5738
		udelay(10);
5739
	}
5740
 
5741
	return false;
5742
}
5743
 
5744
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5745
{
5746
	struct drm_device *dev = dev_priv->dev;
5747
	u32 freq_select, pcu_ack;
5748
 
5749
	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5750
 
5751
	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5752
		DRM_ERROR("failed to inform PCU about cdclk change\n");
5753
		return;
5754
	}
5755
 
5756
	/* set CDCLK_CTL */
5757
	switch(freq) {
5758
	case 450000:
5759
	case 432000:
5760
		freq_select = CDCLK_FREQ_450_432;
5761
		pcu_ack = 1;
5762
		break;
5763
	case 540000:
5764
		freq_select = CDCLK_FREQ_540;
5765
		pcu_ack = 2;
5766
		break;
5767
	case 308570:
5768
	case 337500:
5769
	default:
5770
		freq_select = CDCLK_FREQ_337_308;
5771
		pcu_ack = 0;
5772
		break;
5773
	case 617140:
5774
	case 675000:
5775
		freq_select = CDCLK_FREQ_675_617;
5776
		pcu_ack = 3;
5777
		break;
5778
	}
5779
 
5780
	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5781
	POSTING_READ(CDCLK_CTL);
5782
 
5783
	/* inform PCU of the change */
5784
	mutex_lock(&dev_priv->rps.hw_lock);
5785
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5786
	mutex_unlock(&dev_priv->rps.hw_lock);
5787
 
5788
	intel_update_cdclk(dev);
5789
}
5790
 
5791
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5792
{
5793
	/* disable DBUF power */
5794
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5795
	POSTING_READ(DBUF_CTL);
5796
 
5797
	udelay(10);
5798
 
5799
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5800
		DRM_ERROR("DBuf power disable timeout\n");
5801
 
7144 serge 5802
	/* disable DPLL0 */
6937 serge 5803
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
7144 serge 5804
	if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5805
		DRM_ERROR("Couldn't disable DPLL0\n");
5806
}
6084 serge 5807
 
5808
void skl_init_cdclk(struct drm_i915_private *dev_priv)
5809
{
5810
	unsigned int required_vco;
5811
 
5812
	/* DPLL0 not enabled (happens on early BIOS versions) */
5813
	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5814
		/* enable DPLL0 */
5815
		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5816
		skl_dpll0_enable(dev_priv, required_vco);
5817
	}
5818
 
5819
	/* set CDCLK to the frequency the BIOS chose */
5820
	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5821
 
5822
	/* enable DBUF power */
5823
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5824
	POSTING_READ(DBUF_CTL);
5825
 
5826
	udelay(10);
5827
 
5828
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5829
		DRM_ERROR("DBuf power enable timeout\n");
5830
}
5831
 
6937 serge 5832
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5833
{
5834
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5835
	uint32_t cdctl = I915_READ(CDCLK_CTL);
5836
	int freq = dev_priv->skl_boot_cdclk;
5837
 
5838
	/*
5839
	 * check if the pre-os intialized the display
5840
	 * There is SWF18 scratchpad register defined which is set by the
5841
	 * pre-os which can be used by the OS drivers to check the status
5842
	 */
5843
	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5844
		goto sanitize;
5845
 
5846
	/* Is PLL enabled and locked ? */
5847
	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5848
		goto sanitize;
5849
 
5850
	/* DPLL okay; verify the cdclock
5851
	 *
5852
	 * Noticed in some instances that the freq selection is correct but
5853
	 * decimal part is programmed wrong from BIOS where pre-os does not
5854
	 * enable display. Verify the same as well.
5855
	 */
5856
	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5857
		/* All well; nothing to sanitize */
5858
		return false;
5859
sanitize:
5860
	/*
5861
	 * As of now initialize with max cdclk till
5862
	 * we get dynamic cdclk support
5863
	 * */
5864
	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5865
	skl_init_cdclk(dev_priv);
5866
 
5867
	/* we did have to sanitize */
5868
	return true;
5869
}
5870
 
4560 Serge 5871
/* Adjust CDclk dividers to allow high res or save power if possible */
5872
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5873
{
5874
	struct drm_i915_private *dev_priv = dev->dev_private;
5875
	u32 val, cmd;
5876
 
6084 serge 5877
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5878
					!= dev_priv->cdclk_freq);
5060 serge 5879
 
5880
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4560 Serge 5881
		cmd = 2;
5060 serge 5882
	else if (cdclk == 266667)
4560 Serge 5883
		cmd = 1;
5884
	else
5885
		cmd = 0;
5886
 
5887
	mutex_lock(&dev_priv->rps.hw_lock);
5888
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5889
	val &= ~DSPFREQGUAR_MASK;
5890
	val |= (cmd << DSPFREQGUAR_SHIFT);
5891
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5892
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5893
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5894
		     50)) {
5895
		DRM_ERROR("timed out waiting for CDclk change\n");
5896
	}
5897
	mutex_unlock(&dev_priv->rps.hw_lock);
5898
 
6084 serge 5899
	mutex_lock(&dev_priv->sb_lock);
5900
 
5060 serge 5901
	if (cdclk == 400000) {
5354 serge 5902
		u32 divider;
4560 Serge 5903
 
5354 serge 5904
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4560 Serge 5905
 
5906
		/* adjust cdclk divider */
5907
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
6084 serge 5908
		val &= ~CCK_FREQUENCY_VALUES;
4560 Serge 5909
		val |= divider;
5910
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5060 serge 5911
 
5912
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
6084 serge 5913
			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5060 serge 5914
			     50))
5915
			DRM_ERROR("timed out waiting for CDclk change\n");
4560 Serge 5916
	}
5917
 
5918
	/* adjust self-refresh exit latency value */
5919
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5920
	val &= ~0x7f;
5921
 
5922
	/*
5923
	 * For high bandwidth configs, we set a higher latency in the bunit
5924
	 * so that the core display fetch happens in time to avoid underruns.
5925
	 */
5060 serge 5926
	if (cdclk == 400000)
4560 Serge 5927
		val |= 4500 / 250; /* 4.5 usec */
5928
	else
5929
		val |= 3000 / 250; /* 3.0 usec */
5930
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5931
 
6084 serge 5932
	mutex_unlock(&dev_priv->sb_lock);
5933
 
5934
	intel_update_cdclk(dev);
4560 Serge 5935
}
5936
 
5354 serge 5937
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5938
{
5939
	struct drm_i915_private *dev_priv = dev->dev_private;
5940
	u32 val, cmd;
5941
 
6084 serge 5942
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5943
						!= dev_priv->cdclk_freq);
5354 serge 5944
 
5945
	switch (cdclk) {
5946
	case 333333:
5947
	case 320000:
5948
	case 266667:
5949
	case 200000:
5950
		break;
5951
	default:
6084 serge 5952
		MISSING_CASE(cdclk);
5354 serge 5953
		return;
5954
	}
5955
 
6084 serge 5956
	/*
5957
	 * Specs are full of misinformation, but testing on actual
5958
	 * hardware has shown that we just need to write the desired
5959
	 * CCK divider into the Punit register.
5960
	 */
5961
	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5962
 
5354 serge 5963
	mutex_lock(&dev_priv->rps.hw_lock);
5964
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5965
	val &= ~DSPFREQGUAR_MASK_CHV;
5966
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5967
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5968
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5969
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5970
		     50)) {
5971
		DRM_ERROR("timed out waiting for CDclk change\n");
5972
	}
5973
	mutex_unlock(&dev_priv->rps.hw_lock);
5974
 
6084 serge 5975
	intel_update_cdclk(dev);
5354 serge 5976
}
5977
 
4560 Serge 5978
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5979
				 int max_pixclk)
5980
{
5354 serge 5981
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
6084 serge 5982
	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
4560 Serge 5983
 
5984
	/*
5985
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5986
	 *   200MHz
5987
	 *   267MHz
5060 serge 5988
	 *   320/333MHz (depends on HPLL freq)
6084 serge 5989
	 *   400MHz (VLV only)
5990
	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5991
	 * of the lower bin and adjust if needed.
5060 serge 5992
	 *
5993
	 * We seem to get an unstable or solid color picture at 200MHz.
5994
	 * Not sure what's wrong. For now use 200MHz only when all pipes
5995
	 * are off.
4560 Serge 5996
	 */
6084 serge 5997
	if (!IS_CHERRYVIEW(dev_priv) &&
5998
	    max_pixclk > freq_320*limit/100)
5060 serge 5999
		return 400000;
6084 serge 6000
	else if (max_pixclk > 266667*limit/100)
5060 serge 6001
		return freq_320;
6002
	else if (max_pixclk > 0)
6003
		return 266667;
6004
	else
6005
		return 200000;
4560 Serge 6006
}
6007
 
6084 serge 6008
static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6009
			      int max_pixclk)
4560 Serge 6010
{
6084 serge 6011
	/*
6012
	 * FIXME:
6013
	 * - remove the guardband, it's not needed on BXT
6014
	 * - set 19.2MHz bypass frequency if there are no active pipes
6015
	 */
6016
	if (max_pixclk > 576000*9/10)
6017
		return 624000;
6018
	else if (max_pixclk > 384000*9/10)
6019
		return 576000;
6020
	else if (max_pixclk > 288000*9/10)
6021
		return 384000;
6022
	else if (max_pixclk > 144000*9/10)
6023
		return 288000;
6024
	else
6025
		return 144000;
6026
}
6027
 
7144 serge 6028
/* Compute the max pixel clock for new configuration. */
6084 serge 6029
static int intel_mode_max_pixclk(struct drm_device *dev,
6030
				 struct drm_atomic_state *state)
6031
{
7144 serge 6032
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6033
	struct drm_i915_private *dev_priv = dev->dev_private;
6034
	struct drm_crtc *crtc;
6035
	struct drm_crtc_state *crtc_state;
6036
	unsigned max_pixclk = 0, i;
6037
	enum pipe pipe;
4560 Serge 6038
 
7144 serge 6039
	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6040
	       sizeof(intel_state->min_pixclk));
6084 serge 6041
 
7144 serge 6042
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
6043
		int pixclk = 0;
6084 serge 6044
 
7144 serge 6045
		if (crtc_state->enable)
6046
			pixclk = crtc_state->adjusted_mode.crtc_clock;
6047
 
6048
		intel_state->min_pixclk[i] = pixclk;
4560 Serge 6049
	}
6050
 
7144 serge 6051
	for_each_pipe(dev_priv, pipe)
6052
		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6053
 
4560 Serge 6054
	return max_pixclk;
6055
}
6056
 
6084 serge 6057
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
4560 Serge 6058
{
6084 serge 6059
	struct drm_device *dev = state->dev;
4560 Serge 6060
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6061
	int max_pixclk = intel_mode_max_pixclk(dev, state);
7144 serge 6062
	struct intel_atomic_state *intel_state =
6063
		to_intel_atomic_state(state);
4560 Serge 6064
 
6084 serge 6065
	if (max_pixclk < 0)
6066
		return max_pixclk;
4560 Serge 6067
 
7144 serge 6068
	intel_state->cdclk = intel_state->dev_cdclk =
6084 serge 6069
		valleyview_calc_cdclk(dev_priv, max_pixclk);
6070
 
7144 serge 6071
	if (!intel_state->active_crtcs)
6072
		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6073
 
6084 serge 6074
	return 0;
4560 Serge 6075
}
6076
 
6084 serge 6077
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
4560 Serge 6078
{
6084 serge 6079
	struct drm_device *dev = state->dev;
4560 Serge 6080
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6081
	int max_pixclk = intel_mode_max_pixclk(dev, state);
7144 serge 6082
	struct intel_atomic_state *intel_state =
6083
		to_intel_atomic_state(state);
4560 Serge 6084
 
6084 serge 6085
	if (max_pixclk < 0)
6086
		return max_pixclk;
5354 serge 6087
 
7144 serge 6088
	intel_state->cdclk = intel_state->dev_cdclk =
6084 serge 6089
		broxton_calc_cdclk(dev_priv, max_pixclk);
6090
 
7144 serge 6091
	if (!intel_state->active_crtcs)
6092
		intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
6093
 
6084 serge 6094
	return 0;
6095
}
6096
 
6097
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6098
{
6099
	unsigned int credits, default_credits;
6100
 
6101
	if (IS_CHERRYVIEW(dev_priv))
6102
		default_credits = PFI_CREDIT(12);
6103
	else
6104
		default_credits = PFI_CREDIT(8);
6105
 
6106
	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6107
		/* CHV suggested value is 31 or 63 */
6108
		if (IS_CHERRYVIEW(dev_priv))
6109
			credits = PFI_CREDIT_63;
5354 serge 6110
		else
6084 serge 6111
			credits = PFI_CREDIT(15);
6112
	} else {
6113
		credits = default_credits;
6114
	}
6115
 
6116
	/*
6117
	 * WA - write default credits before re-programming
6118
	 * FIXME: should we also set the resend bit here?
6119
	 */
6120
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6121
		   default_credits);
6122
 
6123
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6124
		   credits | PFI_CREDIT_RESEND);
6125
 
6126
	/*
6127
	 * FIXME is this guaranteed to clear
6128
	 * immediately or should we poll for it?
6129
	 */
6130
	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6131
}
6132
 
6133
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6134
{
6135
	struct drm_device *dev = old_state->dev;
6136
	struct drm_i915_private *dev_priv = dev->dev_private;
7144 serge 6137
	struct intel_atomic_state *old_intel_state =
6138
		to_intel_atomic_state(old_state);
6139
	unsigned req_cdclk = old_intel_state->dev_cdclk;
6084 serge 6140
 
6141
	/*
6142
	 * FIXME: We can end up here with all power domains off, yet
6143
	 * with a CDCLK frequency other than the minimum. To account
6144
	 * for this take the PIPE-A power domain, which covers the HW
6145
	 * blocks needed for the following programming. This can be
6146
	 * removed once it's guaranteed that we get here either with
6147
	 * the minimum CDCLK set, or the required power domains
6148
	 * enabled.
6149
	 */
6150
	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6151
 
6152
	if (IS_CHERRYVIEW(dev))
6153
		cherryview_set_cdclk(dev, req_cdclk);
6154
	else
4560 Serge 6155
		valleyview_set_cdclk(dev, req_cdclk);
5354 serge 6156
 
6084 serge 6157
	vlv_program_pfi_credits(dev_priv);
6158
 
6159
	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4560 Serge 6160
}
6161
 
4104 Serge 6162
static void valleyview_crtc_enable(struct drm_crtc *crtc)
6163
{
6164
	struct drm_device *dev = crtc->dev;
5354 serge 6165
	struct drm_i915_private *dev_priv = to_i915(dev);
4104 Serge 6166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6167
	struct intel_encoder *encoder;
6168
	int pipe = intel_crtc->pipe;
6169
 
6084 serge 6170
	if (WARN_ON(intel_crtc->active))
4104 Serge 6171
		return;
6172
 
6084 serge 6173
	if (intel_crtc->config->has_dp_encoder)
6174
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 6175
 
6176
	intel_set_pipe_timings(intel_crtc);
6177
 
5354 serge 6178
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6179
		struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 6180
 
5354 serge 6181
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6182
		I915_WRITE(CHV_CANVAS(pipe), 0);
6183
	}
6184
 
5060 serge 6185
	i9xx_set_pipeconf(intel_crtc);
6186
 
4104 Serge 6187
	intel_crtc->active = true;
6188
 
5354 serge 6189
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 6190
 
4104 Serge 6191
	for_each_encoder_on_crtc(dev, crtc, encoder)
6192
		if (encoder->pre_pll_enable)
6193
			encoder->pre_pll_enable(encoder);
6194
 
6937 serge 6195
	if (!intel_crtc->config->has_dsi_encoder) {
6084 serge 6196
		if (IS_CHERRYVIEW(dev)) {
6197
			chv_prepare_pll(intel_crtc, intel_crtc->config);
6198
			chv_enable_pll(intel_crtc, intel_crtc->config);
6199
		} else {
6200
			vlv_prepare_pll(intel_crtc, intel_crtc->config);
6201
			vlv_enable_pll(intel_crtc, intel_crtc->config);
6202
		}
5060 serge 6203
	}
4104 Serge 6204
 
6205
	for_each_encoder_on_crtc(dev, crtc, encoder)
6206
		if (encoder->pre_enable)
6207
			encoder->pre_enable(encoder);
6208
 
6209
	i9xx_pfit_enable(intel_crtc);
6210
 
6211
	intel_crtc_load_lut(crtc);
6212
 
6937 serge 6213
	intel_update_watermarks(crtc);
5060 serge 6214
	intel_enable_pipe(intel_crtc);
4104 Serge 6215
 
5354 serge 6216
	assert_vblank_disabled(crtc);
6217
	drm_crtc_vblank_on(crtc);
6218
 
6084 serge 6219
	for_each_encoder_on_crtc(dev, crtc, encoder)
6220
		encoder->enable(encoder);
4104 Serge 6221
}
6222
 
5060 serge 6223
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6224
{
6225
	struct drm_device *dev = crtc->base.dev;
6226
	struct drm_i915_private *dev_priv = dev->dev_private;
6227
 
6084 serge 6228
	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6229
	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
5060 serge 6230
}
6231
 
2327 Serge 6232
static void i9xx_crtc_enable(struct drm_crtc *crtc)
6233
{
6084 serge 6234
	struct drm_device *dev = crtc->dev;
5354 serge 6235
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 6236
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6237
	struct intel_encoder *encoder;
6084 serge 6238
	int pipe = intel_crtc->pipe;
2327 Serge 6239
 
6084 serge 6240
	if (WARN_ON(intel_crtc->active))
6241
		return;
3031 serge 6242
 
5060 serge 6243
	i9xx_set_pll_dividers(intel_crtc);
6244
 
6084 serge 6245
	if (intel_crtc->config->has_dp_encoder)
6246
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 6247
 
6248
	intel_set_pipe_timings(intel_crtc);
6249
 
6250
	i9xx_set_pipeconf(intel_crtc);
6251
 
6084 serge 6252
	intel_crtc->active = true;
2327 Serge 6253
 
5060 serge 6254
	if (!IS_GEN2(dev))
5354 serge 6255
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 6256
 
3480 Serge 6257
	for_each_encoder_on_crtc(dev, crtc, encoder)
6258
		if (encoder->pre_enable)
6259
			encoder->pre_enable(encoder);
6260
 
4104 Serge 6261
	i9xx_enable_pll(intel_crtc);
6262
 
6263
	i9xx_pfit_enable(intel_crtc);
6264
 
6265
	intel_crtc_load_lut(crtc);
6266
 
4560 Serge 6267
	intel_update_watermarks(crtc);
5060 serge 6268
	intel_enable_pipe(intel_crtc);
2327 Serge 6269
 
5354 serge 6270
	assert_vblank_disabled(crtc);
6271
	drm_crtc_vblank_on(crtc);
6272
 
6084 serge 6273
	for_each_encoder_on_crtc(dev, crtc, encoder)
6274
		encoder->enable(encoder);
2327 Serge 6275
}
6276
 
3746 Serge 6277
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6278
{
6279
	struct drm_device *dev = crtc->base.dev;
6280
	struct drm_i915_private *dev_priv = dev->dev_private;
6281
 
6084 serge 6282
	if (!crtc->config->gmch_pfit.control)
4104 Serge 6283
		return;
6284
 
3746 Serge 6285
	assert_pipe_disabled(dev_priv, crtc->pipe);
6286
 
4104 Serge 6287
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6288
			 I915_READ(PFIT_CONTROL));
6084 serge 6289
	I915_WRITE(PFIT_CONTROL, 0);
3746 Serge 6290
}
6291
 
2327 Serge 6292
static void i9xx_crtc_disable(struct drm_crtc *crtc)
6293
{
6084 serge 6294
	struct drm_device *dev = crtc->dev;
6295
	struct drm_i915_private *dev_priv = dev->dev_private;
6296
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6297
	struct intel_encoder *encoder;
6084 serge 6298
	int pipe = intel_crtc->pipe;
2327 Serge 6299
 
5060 serge 6300
	/*
6301
	 * On gen2 planes are double buffered but the pipe isn't, so we must
6302
	 * wait for planes to fully turn off before disabling the pipe.
6303
	 * We also need to wait on all gmch platforms because of the
6304
	 * self-refresh mode constraint explained above.
6305
	 */
6084 serge 6306
	intel_wait_for_vblank(dev, pipe);
2327 Serge 6307
 
6084 serge 6308
	for_each_encoder_on_crtc(dev, crtc, encoder)
6309
		encoder->disable(encoder);
6310
 
5354 serge 6311
	drm_crtc_vblank_off(crtc);
6312
	assert_vblank_disabled(crtc);
3480 Serge 6313
 
5354 serge 6314
	intel_disable_pipe(intel_crtc);
6315
 
3746 Serge 6316
	i9xx_pfit_disable(intel_crtc);
3480 Serge 6317
 
4104 Serge 6318
	for_each_encoder_on_crtc(dev, crtc, encoder)
6319
		if (encoder->post_disable)
6320
			encoder->post_disable(encoder);
2327 Serge 6321
 
6937 serge 6322
	if (!intel_crtc->config->has_dsi_encoder) {
5060 serge 6323
		if (IS_CHERRYVIEW(dev))
6324
			chv_disable_pll(dev_priv, pipe);
6325
		else if (IS_VALLEYVIEW(dev))
6084 serge 6326
			vlv_disable_pll(dev_priv, pipe);
5060 serge 6327
		else
5354 serge 6328
			i9xx_disable_pll(intel_crtc);
5060 serge 6329
	}
4104 Serge 6330
 
6084 serge 6331
	for_each_encoder_on_crtc(dev, crtc, encoder)
6332
		if (encoder->post_pll_disable)
6333
			encoder->post_pll_disable(encoder);
6334
 
5060 serge 6335
	if (!IS_GEN2(dev))
5354 serge 6336
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 6337
}
6338
 
6084 serge 6339
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
2327 Serge 6340
{
5060 serge 6341
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 6342
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5060 serge 6343
	enum intel_display_power_domain domain;
6344
	unsigned long domains;
6345
 
6084 serge 6346
	if (!intel_crtc->active)
6347
		return;
5060 serge 6348
 
6084 serge 6349
	if (to_intel_plane_state(crtc->primary->state)->visible) {
6937 serge 6350
		WARN_ON(intel_crtc->unpin_work);
6351
 
6084 serge 6352
		intel_pre_disable_primary(crtc);
5060 serge 6353
 
6084 serge 6354
		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6355
		to_intel_plane_state(crtc->primary->state)->visible = false;
5060 serge 6356
	}
6084 serge 6357
 
6358
	dev_priv->display.crtc_disable(crtc);
6359
	intel_crtc->active = false;
7144 serge 6360
	intel_fbc_disable(intel_crtc);
6084 serge 6361
	intel_update_watermarks(crtc);
6362
	intel_disable_shared_dpll(intel_crtc);
6363
 
6364
	domains = intel_crtc->enabled_power_domains;
6365
	for_each_power_domain(domain, domains)
6366
		intel_display_power_put(dev_priv, domain);
6367
	intel_crtc->enabled_power_domains = 0;
7144 serge 6368
 
6369
	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6370
	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
2330 Serge 6371
}
2327 Serge 6372
 
6084 serge 6373
/*
6374
 * turn all crtc's off, but do not adjust state
6375
 * This has to be paired with a call to intel_modeset_setup_hw_state.
3031 serge 6376
 */
6084 serge 6377
int intel_display_suspend(struct drm_device *dev)
3031 serge 6378
{
7144 serge 6379
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 6380
	struct drm_atomic_state *state;
7144 serge 6381
	int ret;
3031 serge 6382
 
7144 serge 6383
	state = drm_atomic_helper_suspend(dev);
6384
	ret = PTR_ERR_OR_ZERO(state);
6084 serge 6385
	if (ret)
6386
		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7144 serge 6387
	else
6388
		dev_priv->modeset_restore_state = state;
6084 serge 6389
	return ret;
2330 Serge 6390
}
2327 Serge 6391
 
3031 serge 6392
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 6393
{
3031 serge 6394
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6395
 
6396
	drm_encoder_cleanup(encoder);
6397
	kfree(intel_encoder);
2330 Serge 6398
}
2327 Serge 6399
 
3031 serge 6400
/* Cross check the actual hw state with our own modeset state tracking (and it's
6401
 * internal consistency). */
6402
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 6403
{
6084 serge 6404
	struct drm_crtc *crtc = connector->base.state->crtc;
6405
 
6406
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6407
		      connector->base.base.id,
6408
		      connector->base.name);
6409
 
3031 serge 6410
	if (connector->get_hw_state(connector)) {
6411
		struct intel_encoder *encoder = connector->encoder;
6084 serge 6412
		struct drm_connector_state *conn_state = connector->base.state;
3031 serge 6413
 
6084 serge 6414
		I915_STATE_WARN(!crtc,
6415
			 "connector enabled without attached crtc\n");
3031 serge 6416
 
6084 serge 6417
		if (!crtc)
5060 serge 6418
			return;
6419
 
6084 serge 6420
		I915_STATE_WARN(!crtc->state->active,
6421
		      "connector is active, but attached crtc isn't\n");
5060 serge 6422
 
6084 serge 6423
		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3031 serge 6424
			return;
6425
 
6084 serge 6426
		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6427
			"atomic encoder doesn't match attached encoder\n");
3031 serge 6428
 
6084 serge 6429
		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6430
			"attached encoder crtc differs from connector crtc\n");
6431
	} else {
6432
		I915_STATE_WARN(crtc && crtc->state->active,
6433
			"attached crtc is active, but connector isn't\n");
6434
		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6435
			"best encoder set without crtc!\n");
3031 serge 6436
	}
2330 Serge 6437
}
2327 Serge 6438
 
6084 serge 6439
int intel_connector_init(struct intel_connector *connector)
2330 Serge 6440
{
6937 serge 6441
	drm_atomic_helper_connector_reset(&connector->base);
2342 Serge 6442
 
6937 serge 6443
	if (!connector->base.state)
6084 serge 6444
		return -ENOMEM;
3031 serge 6445
 
6084 serge 6446
	return 0;
6447
}
3031 serge 6448
 
6084 serge 6449
struct intel_connector *intel_connector_alloc(void)
6450
{
6451
	struct intel_connector *connector;
3031 serge 6452
 
6084 serge 6453
	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6454
	if (!connector)
6455
		return NULL;
6456
 
6457
	if (intel_connector_init(connector) < 0) {
6458
		kfree(connector);
6459
		return NULL;
6460
	}
6461
 
6462
	return connector;
2330 Serge 6463
}
2327 Serge 6464
 
3031 serge 6465
/* Simple connector->get_hw_state implementation for encoders that support only
6466
 * one connector and no cloning and hence the encoder state determines the state
6467
 * of the connector. */
6468
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 6469
{
3031 serge 6470
	enum pipe pipe = 0;
6471
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 6472
 
3031 serge 6473
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 6474
}
6475
 
6084 serge 6476
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
4104 Serge 6477
{
6084 serge 6478
	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6479
		return crtc_state->fdi_lanes;
4104 Serge 6480
 
6084 serge 6481
	return 0;
6482
}
6483
 
6484
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6485
				     struct intel_crtc_state *pipe_config)
6486
{
6487
	struct drm_atomic_state *state = pipe_config->base.state;
6488
	struct intel_crtc *other_crtc;
6489
	struct intel_crtc_state *other_crtc_state;
6490
 
4104 Serge 6491
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6492
		      pipe_name(pipe), pipe_config->fdi_lanes);
6493
	if (pipe_config->fdi_lanes > 4) {
6494
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6495
			      pipe_name(pipe), pipe_config->fdi_lanes);
6084 serge 6496
		return -EINVAL;
4104 Serge 6497
	}
6498
 
4560 Serge 6499
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 6500
		if (pipe_config->fdi_lanes > 2) {
6501
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6502
				      pipe_config->fdi_lanes);
6084 serge 6503
			return -EINVAL;
4104 Serge 6504
		} else {
6084 serge 6505
			return 0;
4104 Serge 6506
		}
6507
	}
6508
 
6509
	if (INTEL_INFO(dev)->num_pipes == 2)
6084 serge 6510
		return 0;
4104 Serge 6511
 
6512
	/* Ivybridge 3 pipe is really complicated */
6513
	switch (pipe) {
6514
	case PIPE_A:
6084 serge 6515
		return 0;
4104 Serge 6516
	case PIPE_B:
6084 serge 6517
		if (pipe_config->fdi_lanes <= 2)
6518
			return 0;
6519
 
6520
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6521
		other_crtc_state =
6522
			intel_atomic_get_crtc_state(state, other_crtc);
6523
		if (IS_ERR(other_crtc_state))
6524
			return PTR_ERR(other_crtc_state);
6525
 
6526
		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
4104 Serge 6527
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6528
				      pipe_name(pipe), pipe_config->fdi_lanes);
6084 serge 6529
			return -EINVAL;
4104 Serge 6530
		}
6084 serge 6531
		return 0;
4104 Serge 6532
	case PIPE_C:
6084 serge 6533
		if (pipe_config->fdi_lanes > 2) {
6534
			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6535
				      pipe_name(pipe), pipe_config->fdi_lanes);
6536
			return -EINVAL;
6537
		}
6538
 
6539
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6540
		other_crtc_state =
6541
			intel_atomic_get_crtc_state(state, other_crtc);
6542
		if (IS_ERR(other_crtc_state))
6543
			return PTR_ERR(other_crtc_state);
6544
 
6545
		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
4104 Serge 6546
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6084 serge 6547
			return -EINVAL;
4104 Serge 6548
		}
6084 serge 6549
		return 0;
4104 Serge 6550
	default:
6551
		BUG();
6552
	}
6553
}
6554
 
6555
#define RETRY 1
6556
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6084 serge 6557
				       struct intel_crtc_state *pipe_config)
2330 Serge 6558
{
4104 Serge 6559
	struct drm_device *dev = intel_crtc->base.dev;
6084 serge 6560
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6561
	int lane, link_bw, fdi_dotclock, ret;
6562
	bool needs_recompute = false;
2330 Serge 6563
 
4104 Serge 6564
retry:
6565
	/* FDI is a binary signal running at ~2.7GHz, encoding
6566
	 * each output octet as 10 bits. The actual frequency
6567
	 * is stored as a divider into a 100MHz clock, and the
6568
	 * mode pixel clock is stored in units of 1KHz.
6569
	 * Hence the bw of each lane in terms of the mode signal
6570
	 * is:
6571
	 */
6572
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6573
 
4560 Serge 6574
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 6575
 
6576
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6577
					   pipe_config->pipe_bpp);
6578
 
6579
	pipe_config->fdi_lanes = lane;
6580
 
6581
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6582
			       link_bw, &pipe_config->fdi_m_n);
6583
 
6084 serge 6584
	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6585
				       intel_crtc->pipe, pipe_config);
6586
	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
4104 Serge 6587
		pipe_config->pipe_bpp -= 2*3;
6588
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6589
			      pipe_config->pipe_bpp);
6590
		needs_recompute = true;
6591
		pipe_config->bw_constrained = true;
6592
 
6593
		goto retry;
6594
	}
6595
 
6596
	if (needs_recompute)
6597
		return RETRY;
6598
 
6084 serge 6599
	return ret;
4104 Serge 6600
}
6601
 
6084 serge 6602
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6603
				     struct intel_crtc_state *pipe_config)
6604
{
6605
	if (pipe_config->pipe_bpp > 24)
6606
		return false;
6607
 
6608
	/* HSW can handle pixel rate up to cdclk? */
6609
	if (IS_HASWELL(dev_priv->dev))
6610
		return true;
6611
 
6612
	/*
6613
	 * We compare against max which means we must take
6614
	 * the increased cdclk requirement into account when
6615
	 * calculating the new cdclk.
6616
	 *
6617
	 * Should measure whether using a lower cdclk w/o IPS
6618
	 */
6619
	return ilk_pipe_pixel_rate(pipe_config) <=
6620
		dev_priv->max_cdclk_freq * 95 / 100;
6621
}
6622
 
4104 Serge 6623
static void hsw_compute_ips_config(struct intel_crtc *crtc,
6084 serge 6624
				   struct intel_crtc_state *pipe_config)
4104 Serge 6625
{
6084 serge 6626
	struct drm_device *dev = crtc->base.dev;
6627
	struct drm_i915_private *dev_priv = dev->dev_private;
6628
 
5060 serge 6629
	pipe_config->ips_enabled = i915.enable_ips &&
6084 serge 6630
		hsw_crtc_supports_ips(crtc) &&
6631
		pipe_config_supports_ips(dev_priv, pipe_config);
4104 Serge 6632
}
6633
 
6937 serge 6634
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6635
{
6636
	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6637
 
6638
	/* GDG double wide on either pipe, otherwise pipe A only */
6639
	return INTEL_INFO(dev_priv)->gen < 4 &&
6640
		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6641
}
6642
 
4104 Serge 6643
static int intel_crtc_compute_config(struct intel_crtc *crtc,
6084 serge 6644
				     struct intel_crtc_state *pipe_config)
4104 Serge 6645
{
6646
	struct drm_device *dev = crtc->base.dev;
5354 serge 6647
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6648
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
4104 Serge 6649
 
4560 Serge 6650
	/* FIXME should check pixel clock limits on all platforms */
6651
	if (INTEL_INFO(dev)->gen < 4) {
6937 serge 6652
		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4560 Serge 6653
 
6654
		/*
6937 serge 6655
		 * Enable double wide mode when the dot clock
4560 Serge 6656
		 * is > 90% of the (display) core speed.
6657
		 */
6937 serge 6658
		if (intel_crtc_supports_double_wide(crtc) &&
6659
		    adjusted_mode->crtc_clock > clock_limit) {
4560 Serge 6660
			clock_limit *= 2;
6661
			pipe_config->double_wide = true;
6662
		}
6663
 
6937 serge 6664
		if (adjusted_mode->crtc_clock > clock_limit) {
6665
			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6666
				      adjusted_mode->crtc_clock, clock_limit,
6667
				      yesno(pipe_config->double_wide));
4104 Serge 6668
			return -EINVAL;
7144 serge 6669
		}
2330 Serge 6670
	}
6671
 
4560 Serge 6672
	/*
6673
	 * Pipe horizontal size must be even in:
6674
	 * - DVO ganged mode
6675
	 * - LVDS dual channel mode
6676
	 * - Double wide pipe
6677
	 */
6084 serge 6678
	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4560 Serge 6679
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6680
		pipe_config->pipe_src_w &= ~1;
6681
 
4104 Serge 6682
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6683
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 6684
	 */
6685
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6084 serge 6686
		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
4104 Serge 6687
		return -EINVAL;
3031 serge 6688
 
4104 Serge 6689
	if (HAS_IPS(dev))
6690
		hsw_compute_ips_config(crtc, pipe_config);
6691
 
6692
	if (pipe_config->has_pch_encoder)
6693
		return ironlake_fdi_compute_config(crtc, pipe_config);
6694
 
6695
	return 0;
2330 Serge 6696
}
6697
 
6084 serge 6698
static int skylake_get_display_clock_speed(struct drm_device *dev)
3031 serge 6699
{
6084 serge 6700
	struct drm_i915_private *dev_priv = to_i915(dev);
6701
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6702
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6703
	uint32_t linkrate;
5060 serge 6704
 
6084 serge 6705
	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6706
		return 24000; /* 24MHz is the cd freq with NSSC ref */
5354 serge 6707
 
6084 serge 6708
	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6709
		return 540000;
5354 serge 6710
 
6084 serge 6711
	linkrate = (I915_READ(DPLL_CTRL1) &
6712
		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
5060 serge 6713
 
6084 serge 6714
	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6715
	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6716
		/* vco 8640 */
6717
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6718
		case CDCLK_FREQ_450_432:
6719
			return 432000;
6720
		case CDCLK_FREQ_337_308:
6721
			return 308570;
6722
		case CDCLK_FREQ_675_617:
6723
			return 617140;
6724
		default:
6725
			WARN(1, "Unknown cd freq selection\n");
6726
		}
6727
	} else {
6728
		/* vco 8100 */
6729
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6730
		case CDCLK_FREQ_450_432:
6731
			return 450000;
6732
		case CDCLK_FREQ_337_308:
6733
			return 337500;
6734
		case CDCLK_FREQ_675_617:
6735
			return 675000;
6736
		default:
6737
			WARN(1, "Unknown cd freq selection\n");
6738
		}
6739
	}
5060 serge 6740
 
6084 serge 6741
	/* error case, do as if DPLL0 isn't enabled */
6742
	return 24000;
6743
}
5060 serge 6744
 
6084 serge 6745
static int broxton_get_display_clock_speed(struct drm_device *dev)
6746
{
6747
	struct drm_i915_private *dev_priv = to_i915(dev);
6748
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6749
	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6750
	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6751
	int cdclk;
6752
 
6753
	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6754
		return 19200;
6755
 
6756
	cdclk = 19200 * pll_ratio / 2;
6757
 
6758
	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6759
	case BXT_CDCLK_CD2X_DIV_SEL_1:
6760
		return cdclk;  /* 576MHz or 624MHz */
6761
	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6762
		return cdclk * 2 / 3; /* 384MHz */
6763
	case BXT_CDCLK_CD2X_DIV_SEL_2:
6764
		return cdclk / 2; /* 288MHz */
6765
	case BXT_CDCLK_CD2X_DIV_SEL_4:
6766
		return cdclk / 4; /* 144MHz */
6767
	}
6768
 
6769
	/* error case, do as if DE PLL isn't enabled */
6770
	return 19200;
3031 serge 6771
}
6772
 
6084 serge 6773
static int broadwell_get_display_clock_speed(struct drm_device *dev)
6774
{
6775
	struct drm_i915_private *dev_priv = dev->dev_private;
6776
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6777
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6778
 
6779
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6780
		return 800000;
6781
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6782
		return 450000;
6783
	else if (freq == LCPLL_CLK_FREQ_450)
6784
		return 450000;
6785
	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6786
		return 540000;
6787
	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6788
		return 337500;
6789
	else
6790
		return 675000;
6791
}
6792
 
6793
static int haswell_get_display_clock_speed(struct drm_device *dev)
6794
{
6795
	struct drm_i915_private *dev_priv = dev->dev_private;
6796
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6797
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6798
 
6799
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6800
		return 800000;
6801
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6802
		return 450000;
6803
	else if (freq == LCPLL_CLK_FREQ_450)
6804
		return 450000;
6805
	else if (IS_HSW_ULT(dev))
6806
		return 337500;
6807
	else
6808
		return 540000;
6809
}
6810
 
6811
static int valleyview_get_display_clock_speed(struct drm_device *dev)
6812
{
6813
	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6814
				      CCK_DISPLAY_CLOCK_CONTROL);
6815
}
6816
 
6817
static int ilk_get_display_clock_speed(struct drm_device *dev)
6818
{
6819
	return 450000;
6820
}
6821
 
2327 Serge 6822
static int i945_get_display_clock_speed(struct drm_device *dev)
6823
{
6824
	return 400000;
6825
}
6826
 
6827
static int i915_get_display_clock_speed(struct drm_device *dev)
6828
{
6084 serge 6829
	return 333333;
2327 Serge 6830
}
6831
 
6832
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6833
{
6834
	return 200000;
6835
}
6836
 
4104 Serge 6837
static int pnv_get_display_clock_speed(struct drm_device *dev)
6838
{
6839
	u16 gcfgc = 0;
6840
 
6841
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6842
 
6843
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6844
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6084 serge 6845
		return 266667;
4104 Serge 6846
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6084 serge 6847
		return 333333;
4104 Serge 6848
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6084 serge 6849
		return 444444;
4104 Serge 6850
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6851
		return 200000;
6852
	default:
6853
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6854
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6084 serge 6855
		return 133333;
4104 Serge 6856
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6084 serge 6857
		return 166667;
4104 Serge 6858
	}
6859
}
6860
 
2327 Serge 6861
static int i915gm_get_display_clock_speed(struct drm_device *dev)
6862
{
6863
	u16 gcfgc = 0;
6864
 
6865
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6866
 
6867
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6084 serge 6868
		return 133333;
2327 Serge 6869
	else {
6870
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6871
		case GC_DISPLAY_CLOCK_333_MHZ:
6084 serge 6872
			return 333333;
2327 Serge 6873
		default:
6874
		case GC_DISPLAY_CLOCK_190_200_MHZ:
6875
			return 190000;
6876
		}
6877
	}
6878
}
6879
 
6880
static int i865_get_display_clock_speed(struct drm_device *dev)
6881
{
6084 serge 6882
	return 266667;
2327 Serge 6883
}
6884
 
6084 serge 6885
static int i85x_get_display_clock_speed(struct drm_device *dev)
2327 Serge 6886
{
6887
	u16 hpllcc = 0;
6084 serge 6888
 
6889
	/*
6890
	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6891
	 * encoding is different :(
6892
	 * FIXME is this the right way to detect 852GM/852GMV?
6893
	 */
6894
	if (dev->pdev->revision == 0x1)
6895
		return 133333;
6896
 
6897
//   pci_bus_read_config_word(dev->pdev->bus,
6898
//                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6899
 
2327 Serge 6900
	/* Assume that the hardware is in the high speed state.  This
6901
	 * should be the default.
6902
	 */
6903
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6904
	case GC_CLOCK_133_200:
6084 serge 6905
	case GC_CLOCK_133_200_2:
2327 Serge 6906
	case GC_CLOCK_100_200:
6907
		return 200000;
6908
	case GC_CLOCK_166_250:
6909
		return 250000;
6910
	case GC_CLOCK_100_133:
6084 serge 6911
		return 133333;
6912
	case GC_CLOCK_133_266:
6913
	case GC_CLOCK_133_266_2:
6914
	case GC_CLOCK_166_266:
6915
		return 266667;
2327 Serge 6916
	}
6917
 
6918
	/* Shouldn't happen */
6919
	return 0;
6920
}
6921
 
6922
static int i830_get_display_clock_speed(struct drm_device *dev)
6923
{
6084 serge 6924
	return 133333;
2327 Serge 6925
}
6926
 
6084 serge 6927
static unsigned int intel_hpll_vco(struct drm_device *dev)
6928
{
6929
	struct drm_i915_private *dev_priv = dev->dev_private;
6930
	static const unsigned int blb_vco[8] = {
6931
		[0] = 3200000,
6932
		[1] = 4000000,
6933
		[2] = 5333333,
6934
		[3] = 4800000,
6935
		[4] = 6400000,
6936
	};
6937
	static const unsigned int pnv_vco[8] = {
6938
		[0] = 3200000,
6939
		[1] = 4000000,
6940
		[2] = 5333333,
6941
		[3] = 4800000,
6942
		[4] = 2666667,
6943
	};
6944
	static const unsigned int cl_vco[8] = {
6945
		[0] = 3200000,
6946
		[1] = 4000000,
6947
		[2] = 5333333,
6948
		[3] = 6400000,
6949
		[4] = 3333333,
6950
		[5] = 3566667,
6951
		[6] = 4266667,
6952
	};
6953
	static const unsigned int elk_vco[8] = {
6954
		[0] = 3200000,
6955
		[1] = 4000000,
6956
		[2] = 5333333,
6957
		[3] = 4800000,
6958
	};
6959
	static const unsigned int ctg_vco[8] = {
6960
		[0] = 3200000,
6961
		[1] = 4000000,
6962
		[2] = 5333333,
6963
		[3] = 6400000,
6964
		[4] = 2666667,
6965
		[5] = 4266667,
6966
	};
6967
	const unsigned int *vco_table;
6968
	unsigned int vco;
6969
	uint8_t tmp = 0;
6970
 
6971
	/* FIXME other chipsets? */
6972
	if (IS_GM45(dev))
6973
		vco_table = ctg_vco;
6974
	else if (IS_G4X(dev))
6975
		vco_table = elk_vco;
6976
	else if (IS_CRESTLINE(dev))
6977
		vco_table = cl_vco;
6978
	else if (IS_PINEVIEW(dev))
6979
		vco_table = pnv_vco;
6980
	else if (IS_G33(dev))
6981
		vco_table = blb_vco;
6982
	else
6983
		return 0;
6984
 
6985
	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6986
 
6987
	vco = vco_table[tmp & 0x7];
6988
	if (vco == 0)
6989
		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6990
	else
6991
		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6992
 
6993
	return vco;
6994
}
6995
 
6996
static int gm45_get_display_clock_speed(struct drm_device *dev)
6997
{
6998
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6999
	uint16_t tmp = 0;
7000
 
7001
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7002
 
7003
	cdclk_sel = (tmp >> 12) & 0x1;
7004
 
7005
	switch (vco) {
7006
	case 2666667:
7007
	case 4000000:
7008
	case 5333333:
7009
		return cdclk_sel ? 333333 : 222222;
7010
	case 3200000:
7011
		return cdclk_sel ? 320000 : 228571;
7012
	default:
7013
		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7014
		return 222222;
7015
	}
7016
}
7017
 
7018
static int i965gm_get_display_clock_speed(struct drm_device *dev)
7019
{
7020
	static const uint8_t div_3200[] = { 16, 10,  8 };
7021
	static const uint8_t div_4000[] = { 20, 12, 10 };
7022
	static const uint8_t div_5333[] = { 24, 16, 14 };
7023
	const uint8_t *div_table;
7024
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7025
	uint16_t tmp = 0;
7026
 
7027
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7028
 
7029
	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7030
 
7031
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7032
		goto fail;
7033
 
7034
	switch (vco) {
7035
	case 3200000:
7036
		div_table = div_3200;
7037
		break;
7038
	case 4000000:
7039
		div_table = div_4000;
7040
		break;
7041
	case 5333333:
7042
		div_table = div_5333;
7043
		break;
7044
	default:
7045
		goto fail;
7046
	}
7047
 
7048
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7049
 
7050
fail:
7051
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7052
	return 200000;
7053
}
7054
 
7055
static int g33_get_display_clock_speed(struct drm_device *dev)
7056
{
7057
	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7058
	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7059
	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7060
	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7061
	const uint8_t *div_table;
7062
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7063
	uint16_t tmp = 0;
7064
 
7065
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7066
 
7067
	cdclk_sel = (tmp >> 4) & 0x7;
7068
 
7069
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7070
		goto fail;
7071
 
7072
	switch (vco) {
7073
	case 3200000:
7074
		div_table = div_3200;
7075
		break;
7076
	case 4000000:
7077
		div_table = div_4000;
7078
		break;
7079
	case 4800000:
7080
		div_table = div_4800;
7081
		break;
7082
	case 5333333:
7083
		div_table = div_5333;
7084
		break;
7085
	default:
7086
		goto fail;
7087
	}
7088
 
7089
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7090
 
7091
fail:
7092
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7093
	return 190476;
7094
}
7095
 
2327 Serge 7096
static void
3746 Serge 7097
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 7098
{
3746 Serge 7099
	while (*num > DATA_LINK_M_N_MASK ||
7100
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 7101
		*num >>= 1;
7102
		*den >>= 1;
7103
	}
7104
}
7105
 
3746 Serge 7106
static void compute_m_n(unsigned int m, unsigned int n,
7107
			uint32_t *ret_m, uint32_t *ret_n)
7108
{
7109
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7110
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7111
	intel_reduce_m_n_ratio(ret_m, ret_n);
7112
}
7113
 
3480 Serge 7114
void
7115
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7116
		       int pixel_clock, int link_clock,
7117
		       struct intel_link_m_n *m_n)
2327 Serge 7118
{
3480 Serge 7119
	m_n->tu = 64;
3746 Serge 7120
 
7121
	compute_m_n(bits_per_pixel * pixel_clock,
7122
		    link_clock * nlanes * 8,
7123
		    &m_n->gmch_m, &m_n->gmch_n);
7124
 
7125
	compute_m_n(pixel_clock, link_clock,
7126
		    &m_n->link_m, &m_n->link_n);
2327 Serge 7127
}
7128
 
7129
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7130
{
5060 serge 7131
	if (i915.panel_use_ssc >= 0)
7132
		return i915.panel_use_ssc != 0;
4104 Serge 7133
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 7134
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7135
}
7136
 
6084 serge 7137
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7138
			   int num_connectors)
3031 serge 7139
{
6084 serge 7140
	struct drm_device *dev = crtc_state->base.crtc->dev;
3031 serge 7141
	struct drm_i915_private *dev_priv = dev->dev_private;
7142
	int refclk;
2327 Serge 7143
 
6084 serge 7144
	WARN_ON(!crtc_state->base.state);
7145
 
6937 serge 7146
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
4560 Serge 7147
		refclk = 100000;
6084 serge 7148
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7149
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 7150
		refclk = dev_priv->vbt.lvds_ssc_freq;
7151
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 7152
	} else if (!IS_GEN2(dev)) {
7153
		refclk = 96000;
7154
	} else {
7155
		refclk = 48000;
7156
	}
2327 Serge 7157
 
3031 serge 7158
	return refclk;
7159
}
2327 Serge 7160
 
4104 Serge 7161
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 7162
{
4104 Serge 7163
	return (1 << dpll->n) << 16 | dpll->m2;
7164
}
3746 Serge 7165
 
4104 Serge 7166
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7167
{
7168
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 7169
}
2327 Serge 7170
 
3746 Serge 7171
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6084 serge 7172
				     struct intel_crtc_state *crtc_state,
3031 serge 7173
				     intel_clock_t *reduced_clock)
7174
{
3746 Serge 7175
	struct drm_device *dev = crtc->base.dev;
3031 serge 7176
	u32 fp, fp2 = 0;
2327 Serge 7177
 
3031 serge 7178
	if (IS_PINEVIEW(dev)) {
6084 serge 7179
		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
3031 serge 7180
		if (reduced_clock)
4104 Serge 7181
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 7182
	} else {
6084 serge 7183
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
3031 serge 7184
		if (reduced_clock)
4104 Serge 7185
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 7186
	}
2327 Serge 7187
 
6084 serge 7188
	crtc_state->dpll_hw_state.fp0 = fp;
2327 Serge 7189
 
3746 Serge 7190
	crtc->lowfreq_avail = false;
6084 serge 7191
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7192
	    reduced_clock) {
7193
		crtc_state->dpll_hw_state.fp1 = fp2;
3746 Serge 7194
		crtc->lowfreq_avail = true;
3031 serge 7195
	} else {
6084 serge 7196
		crtc_state->dpll_hw_state.fp1 = fp;
3031 serge 7197
	}
7198
}
2327 Serge 7199
 
4560 Serge 7200
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7201
		pipe)
4104 Serge 7202
{
7203
	u32 reg_val;
7204
 
7205
	/*
7206
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7207
	 * and set it to a reasonable value instead.
7208
	 */
4560 Serge 7209
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 7210
	reg_val &= 0xffffff00;
7211
	reg_val |= 0x00000030;
4560 Serge 7212
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 7213
 
4560 Serge 7214
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 7215
	reg_val &= 0x8cffffff;
7216
	reg_val = 0x8c000000;
4560 Serge 7217
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 7218
 
4560 Serge 7219
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 7220
	reg_val &= 0xffffff00;
4560 Serge 7221
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 7222
 
4560 Serge 7223
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 7224
	reg_val &= 0x00ffffff;
7225
	reg_val |= 0xb0000000;
4560 Serge 7226
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 7227
}
7228
 
7229
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7230
					 struct intel_link_m_n *m_n)
7231
{
7232
	struct drm_device *dev = crtc->base.dev;
7233
	struct drm_i915_private *dev_priv = dev->dev_private;
7234
	int pipe = crtc->pipe;
7235
 
7236
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7237
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7238
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7239
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7240
}
7241
 
7242
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 7243
					 struct intel_link_m_n *m_n,
7244
					 struct intel_link_m_n *m2_n2)
4104 Serge 7245
{
7246
	struct drm_device *dev = crtc->base.dev;
7247
	struct drm_i915_private *dev_priv = dev->dev_private;
7248
	int pipe = crtc->pipe;
6084 serge 7249
	enum transcoder transcoder = crtc->config->cpu_transcoder;
4104 Serge 7250
 
7251
	if (INTEL_INFO(dev)->gen >= 5) {
7252
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7253
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7254
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7255
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5354 serge 7256
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7257
		 * for gen < 8) and if DRRS is supported (to make sure the
7258
		 * registers are not unnecessarily accessed).
7259
		 */
6084 serge 7260
		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7261
			crtc->config->has_drrs) {
5354 serge 7262
			I915_WRITE(PIPE_DATA_M2(transcoder),
7263
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7264
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7265
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7266
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7267
		}
4104 Serge 7268
	} else {
7269
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7270
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7271
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7272
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7273
	}
7274
}
7275
 
6084 serge 7276
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
3031 serge 7277
{
6084 serge 7278
	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7279
 
7280
	if (m_n == M1_N1) {
7281
		dp_m_n = &crtc->config->dp_m_n;
7282
		dp_m2_n2 = &crtc->config->dp_m2_n2;
7283
	} else if (m_n == M2_N2) {
7284
 
7285
		/*
7286
		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7287
		 * needs to be programmed into M1_N1.
7288
		 */
7289
		dp_m_n = &crtc->config->dp_m2_n2;
7290
	} else {
7291
		DRM_ERROR("Unsupported divider value\n");
7292
		return;
7293
	}
7294
 
7295
	if (crtc->config->has_pch_encoder)
7296
		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
3746 Serge 7297
	else
6084 serge 7298
		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
3746 Serge 7299
}
7300
 
6084 serge 7301
static void vlv_compute_dpll(struct intel_crtc *crtc,
7302
			     struct intel_crtc_state *pipe_config)
3746 Serge 7303
{
5060 serge 7304
	u32 dpll, dpll_md;
7305
 
7306
	/*
7307
	 * Enable DPIO clock input. We should never disable the reference
7308
	 * clock for pipe B, since VGA hotplug / manual detection depends
7309
	 * on it.
7310
	 */
6084 serge 7311
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7312
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
5060 serge 7313
	/* We should never disable this, set it here for state tracking */
7314
	if (crtc->pipe == PIPE_B)
7315
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7316
	dpll |= DPLL_VCO_ENABLE;
5354 serge 7317
	pipe_config->dpll_hw_state.dpll = dpll;
5060 serge 7318
 
5354 serge 7319
	dpll_md = (pipe_config->pixel_multiplier - 1)
5060 serge 7320
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 7321
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
5060 serge 7322
}
7323
 
5354 serge 7324
static void vlv_prepare_pll(struct intel_crtc *crtc,
6084 serge 7325
			    const struct intel_crtc_state *pipe_config)
5060 serge 7326
{
3746 Serge 7327
	struct drm_device *dev = crtc->base.dev;
3031 serge 7328
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 7329
	int pipe = crtc->pipe;
5060 serge 7330
	u32 mdiv;
3031 serge 7331
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5060 serge 7332
	u32 coreclk, reg_val;
2327 Serge 7333
 
6084 serge 7334
	mutex_lock(&dev_priv->sb_lock);
3480 Serge 7335
 
5354 serge 7336
	bestn = pipe_config->dpll.n;
7337
	bestm1 = pipe_config->dpll.m1;
7338
	bestm2 = pipe_config->dpll.m2;
7339
	bestp1 = pipe_config->dpll.p1;
7340
	bestp2 = pipe_config->dpll.p2;
3031 serge 7341
 
4104 Serge 7342
	/* See eDP HDMI DPIO driver vbios notes doc */
7343
 
7344
	/* PLL B needs special handling */
5060 serge 7345
	if (pipe == PIPE_B)
4560 Serge 7346
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 7347
 
7348
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 7349
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 7350
 
7351
	/* Disable target IRef on PLL */
4560 Serge 7352
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 7353
	reg_val &= 0x00ffffff;
4560 Serge 7354
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 7355
 
7356
	/* Disable fast lock */
4560 Serge 7357
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 7358
 
7359
	/* Set idtafcrecal before PLL is enabled */
3031 serge 7360
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7361
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7362
	mdiv |= ((bestn << DPIO_N_SHIFT));
7363
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 7364
 
7365
	/*
7366
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7367
	 * but we don't support that).
7368
	 * Note: don't use the DAC post divider as it seems unstable.
7369
	 */
7370
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 7371
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 7372
 
3031 serge 7373
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 7374
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 7375
 
4104 Serge 7376
	/* Set HBR and RBR LPF coefficients */
5354 serge 7377
	if (pipe_config->port_clock == 162000 ||
7378
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7379
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4560 Serge 7380
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 7381
				 0x009f0003);
7382
	else
4560 Serge 7383
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 7384
				 0x00d0000f);
3031 serge 7385
 
6084 serge 7386
	if (pipe_config->has_dp_encoder) {
4104 Serge 7387
		/* Use SSC source */
5060 serge 7388
		if (pipe == PIPE_A)
4560 Serge 7389
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7390
					 0x0df40000);
7391
		else
4560 Serge 7392
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7393
					 0x0df70000);
7394
	} else { /* HDMI or VGA */
7395
		/* Use bend source */
5060 serge 7396
		if (pipe == PIPE_A)
4560 Serge 7397
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7398
					 0x0df70000);
7399
		else
4560 Serge 7400
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7401
					 0x0df40000);
7402
	}
3031 serge 7403
 
4560 Serge 7404
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 7405
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5354 serge 7406
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7407
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
4104 Serge 7408
		coreclk |= 0x01000000;
4560 Serge 7409
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 7410
 
4560 Serge 7411
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6084 serge 7412
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 7413
}
4104 Serge 7414
 
6084 serge 7415
static void chv_compute_dpll(struct intel_crtc *crtc,
7416
			     struct intel_crtc_state *pipe_config)
5060 serge 7417
{
6084 serge 7418
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7419
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5354 serge 7420
		DPLL_VCO_ENABLE;
7421
	if (crtc->pipe != PIPE_A)
7422
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7423
 
7424
	pipe_config->dpll_hw_state.dpll_md =
7425
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7426
}
7427
 
7428
static void chv_prepare_pll(struct intel_crtc *crtc,
6084 serge 7429
			    const struct intel_crtc_state *pipe_config)
5354 serge 7430
{
5060 serge 7431
	struct drm_device *dev = crtc->base.dev;
7432
	struct drm_i915_private *dev_priv = dev->dev_private;
7433
	int pipe = crtc->pipe;
6937 serge 7434
	i915_reg_t dpll_reg = DPLL(crtc->pipe);
5060 serge 7435
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6084 serge 7436
	u32 loopfilter, tribuf_calcntr;
5060 serge 7437
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6084 serge 7438
	u32 dpio_val;
7439
	int vco;
5060 serge 7440
 
5354 serge 7441
	bestn = pipe_config->dpll.n;
7442
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7443
	bestm1 = pipe_config->dpll.m1;
7444
	bestm2 = pipe_config->dpll.m2 >> 22;
7445
	bestp1 = pipe_config->dpll.p1;
7446
	bestp2 = pipe_config->dpll.p2;
6084 serge 7447
	vco = pipe_config->dpll.vco;
7448
	dpio_val = 0;
7449
	loopfilter = 0;
5060 serge 7450
 
4560 Serge 7451
	/*
5060 serge 7452
	 * Enable Refclk and SSC
4560 Serge 7453
	 */
5060 serge 7454
	I915_WRITE(dpll_reg,
5354 serge 7455
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
3031 serge 7456
 
6084 serge 7457
	mutex_lock(&dev_priv->sb_lock);
3031 serge 7458
 
5060 serge 7459
	/* p1 and p2 divider */
7460
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7461
			5 << DPIO_CHV_S1_DIV_SHIFT |
7462
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7463
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7464
			1 << DPIO_CHV_K_DIV_SHIFT);
3243 Serge 7465
 
5060 serge 7466
	/* Feedback post-divider - m2 */
7467
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7468
 
7469
	/* Feedback refclk divider - n and m1 */
7470
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7471
			DPIO_CHV_M1_DIV_BY_2 |
7472
			1 << DPIO_CHV_N_DIV_SHIFT);
7473
 
7474
	/* M2 fraction division */
7475
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7476
 
7477
	/* M2 fraction division enable */
6084 serge 7478
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7479
	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7480
	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7481
	if (bestm2_frac)
7482
		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7483
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
5060 serge 7484
 
6084 serge 7485
	/* Program digital lock detect threshold */
7486
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7487
	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7488
					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7489
	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7490
	if (!bestm2_frac)
7491
		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7492
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7493
 
5060 serge 7494
	/* Loop filter */
6084 serge 7495
	if (vco == 5400000) {
7496
		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7497
		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7498
		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7499
		tribuf_calcntr = 0x9;
7500
	} else if (vco <= 6200000) {
7501
		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7502
		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7503
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7504
		tribuf_calcntr = 0x9;
7505
	} else if (vco <= 6480000) {
7506
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7507
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7508
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7509
		tribuf_calcntr = 0x8;
7510
	} else {
7511
		/* Not supported. Apply the same limits as in the max case */
7512
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7513
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7514
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7515
		tribuf_calcntr = 0;
7516
	}
5060 serge 7517
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7518
 
6084 serge 7519
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7520
	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7521
	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7522
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7523
 
5060 serge 7524
	/* AFC Recal */
7525
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7526
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7527
			DPIO_AFC_RECAL);
7528
 
6084 serge 7529
	mutex_unlock(&dev_priv->sb_lock);
3031 serge 7530
}
7531
 
5354 serge 7532
/**
7533
 * vlv_force_pll_on - forcibly enable just the PLL
7534
 * @dev_priv: i915 private structure
7535
 * @pipe: pipe PLL to enable
7536
 * @dpll: PLL configuration
7537
 *
7538
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7539
 * in cases where we need the PLL enabled even when @pipe is not going to
7540
 * be enabled.
7541
 */
7144 serge 7542
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7543
		     const struct dpll *dpll)
5354 serge 7544
{
7545
	struct intel_crtc *crtc =
7546
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7144 serge 7547
	struct intel_crtc_state *pipe_config;
5354 serge 7548
 
7144 serge 7549
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7550
	if (!pipe_config)
7551
		return -ENOMEM;
7552
 
7553
	pipe_config->base.crtc = &crtc->base;
7554
	pipe_config->pixel_multiplier = 1;
7555
	pipe_config->dpll = *dpll;
7556
 
5354 serge 7557
	if (IS_CHERRYVIEW(dev)) {
7144 serge 7558
		chv_compute_dpll(crtc, pipe_config);
7559
		chv_prepare_pll(crtc, pipe_config);
7560
		chv_enable_pll(crtc, pipe_config);
5354 serge 7561
	} else {
7144 serge 7562
		vlv_compute_dpll(crtc, pipe_config);
7563
		vlv_prepare_pll(crtc, pipe_config);
7564
		vlv_enable_pll(crtc, pipe_config);
5354 serge 7565
	}
7144 serge 7566
 
7567
	kfree(pipe_config);
7568
 
7569
	return 0;
5354 serge 7570
}
7571
 
7572
/**
7573
 * vlv_force_pll_off - forcibly disable just the PLL
7574
 * @dev_priv: i915 private structure
7575
 * @pipe: pipe PLL to disable
7576
 *
7577
 * Disable the PLL for @pipe. To be used in cases where we need
7578
 * the PLL enabled even when @pipe is not going to be enabled.
7579
 */
7580
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7581
{
7582
	if (IS_CHERRYVIEW(dev))
7583
		chv_disable_pll(to_i915(dev), pipe);
7584
	else
7585
		vlv_disable_pll(to_i915(dev), pipe);
7586
}
7587
 
6084 serge 7588
static void i9xx_compute_dpll(struct intel_crtc *crtc,
7589
			      struct intel_crtc_state *crtc_state,
7590
			      intel_clock_t *reduced_clock,
7591
			      int num_connectors)
3031 serge 7592
{
3746 Serge 7593
	struct drm_device *dev = crtc->base.dev;
3031 serge 7594
	struct drm_i915_private *dev_priv = dev->dev_private;
7595
	u32 dpll;
7596
	bool is_sdvo;
6084 serge 7597
	struct dpll *clock = &crtc_state->dpll;
3031 serge 7598
 
6084 serge 7599
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
3243 Serge 7600
 
6084 serge 7601
	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7602
		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
3031 serge 7603
 
7604
	dpll = DPLL_VGA_MODE_DIS;
7605
 
6084 serge 7606
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
3031 serge 7607
		dpll |= DPLLB_MODE_LVDS;
7608
	else
7609
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 7610
 
4104 Serge 7611
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6084 serge 7612
		dpll |= (crtc_state->pixel_multiplier - 1)
7613
			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7614
	}
4104 Serge 7615
 
7616
	if (is_sdvo)
7617
		dpll |= DPLL_SDVO_HIGH_SPEED;
7618
 
6084 serge 7619
	if (crtc_state->has_dp_encoder)
4104 Serge 7620
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 7621
 
3031 serge 7622
	/* compute bitmask from p1 value */
7623
	if (IS_PINEVIEW(dev))
7624
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7625
	else {
7626
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7627
		if (IS_G4X(dev) && reduced_clock)
7628
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7629
	}
7630
	switch (clock->p2) {
7631
	case 5:
7632
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7633
		break;
7634
	case 7:
7635
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7636
		break;
7637
	case 10:
7638
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7639
		break;
7640
	case 14:
7641
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7642
		break;
7643
	}
7644
	if (INTEL_INFO(dev)->gen >= 4)
7645
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 7646
 
6084 serge 7647
	if (crtc_state->sdvo_tv_clock)
3031 serge 7648
		dpll |= PLL_REF_INPUT_TVCLKINBC;
6084 serge 7649
	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7650
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7651
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7652
	else
7653
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 7654
 
3031 serge 7655
	dpll |= DPLL_VCO_ENABLE;
6084 serge 7656
	crtc_state->dpll_hw_state.dpll = dpll;
2327 Serge 7657
 
4104 Serge 7658
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 7659
		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7660
			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7661
		crtc_state->dpll_hw_state.dpll_md = dpll_md;
4104 Serge 7662
	}
3031 serge 7663
}
2327 Serge 7664
 
6084 serge 7665
static void i8xx_compute_dpll(struct intel_crtc *crtc,
7666
			      struct intel_crtc_state *crtc_state,
7667
			      intel_clock_t *reduced_clock,
7668
			      int num_connectors)
3031 serge 7669
{
3746 Serge 7670
	struct drm_device *dev = crtc->base.dev;
3031 serge 7671
	struct drm_i915_private *dev_priv = dev->dev_private;
7672
	u32 dpll;
6084 serge 7673
	struct dpll *clock = &crtc_state->dpll;
2327 Serge 7674
 
6084 serge 7675
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
3243 Serge 7676
 
3031 serge 7677
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 7678
 
6084 serge 7679
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3031 serge 7680
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7681
	} else {
7682
		if (clock->p1 == 2)
7683
			dpll |= PLL_P1_DIVIDE_BY_TWO;
7684
		else
7685
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7686
		if (clock->p2 == 4)
7687
			dpll |= PLL_P2_DIVIDE_BY_4;
7688
	}
2327 Serge 7689
 
6084 serge 7690
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4104 Serge 7691
		dpll |= DPLL_DVO_2X_MODE;
7692
 
6084 serge 7693
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7694
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7695
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7696
	else
7697
		dpll |= PLL_REF_INPUT_DREFCLK;
7698
 
7699
	dpll |= DPLL_VCO_ENABLE;
6084 serge 7700
	crtc_state->dpll_hw_state.dpll = dpll;
3031 serge 7701
}
7702
 
4104 Serge 7703
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 7704
{
7705
	struct drm_device *dev = intel_crtc->base.dev;
7706
	struct drm_i915_private *dev_priv = dev->dev_private;
7707
	enum pipe pipe = intel_crtc->pipe;
6084 serge 7708
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7709
	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
5060 serge 7710
	uint32_t crtc_vtotal, crtc_vblank_end;
7711
	int vsyncshift = 0;
3243 Serge 7712
 
4104 Serge 7713
	/* We need to be careful not to changed the adjusted mode, for otherwise
7714
	 * the hw state checker will get angry at the mismatch. */
7715
	crtc_vtotal = adjusted_mode->crtc_vtotal;
7716
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7717
 
5060 serge 7718
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3243 Serge 7719
		/* the chip adds 2 halflines automatically */
4104 Serge 7720
		crtc_vtotal -= 1;
7721
		crtc_vblank_end -= 1;
5060 serge 7722
 
5354 serge 7723
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
5060 serge 7724
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7725
		else
7726
			vsyncshift = adjusted_mode->crtc_hsync_start -
7727
				adjusted_mode->crtc_htotal / 2;
7728
		if (vsyncshift < 0)
7729
			vsyncshift += adjusted_mode->crtc_htotal;
3243 Serge 7730
	}
7731
 
7732
	if (INTEL_INFO(dev)->gen > 3)
7733
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7734
 
7735
	I915_WRITE(HTOTAL(cpu_transcoder),
7736
		   (adjusted_mode->crtc_hdisplay - 1) |
7737
		   ((adjusted_mode->crtc_htotal - 1) << 16));
7738
	I915_WRITE(HBLANK(cpu_transcoder),
7739
		   (adjusted_mode->crtc_hblank_start - 1) |
7740
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7741
	I915_WRITE(HSYNC(cpu_transcoder),
7742
		   (adjusted_mode->crtc_hsync_start - 1) |
7743
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7744
 
7745
	I915_WRITE(VTOTAL(cpu_transcoder),
7746
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 7747
		   ((crtc_vtotal - 1) << 16));
3243 Serge 7748
	I915_WRITE(VBLANK(cpu_transcoder),
7749
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 7750
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 7751
	I915_WRITE(VSYNC(cpu_transcoder),
7752
		   (adjusted_mode->crtc_vsync_start - 1) |
7753
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7754
 
7755
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7756
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7757
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7758
	 * bits. */
7759
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7760
	    (pipe == PIPE_B || pipe == PIPE_C))
7761
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7762
 
7763
	/* pipesrc controls the size that is scaled from, which should
7764
	 * always be the user's requested size.
7765
	 */
7766
	I915_WRITE(PIPESRC(pipe),
6084 serge 7767
		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7768
		   (intel_crtc->config->pipe_src_h - 1));
3243 Serge 7769
}
7770
 
4104 Serge 7771
static void intel_get_pipe_timings(struct intel_crtc *crtc,
6084 serge 7772
				   struct intel_crtc_state *pipe_config)
4104 Serge 7773
{
7774
	struct drm_device *dev = crtc->base.dev;
7775
	struct drm_i915_private *dev_priv = dev->dev_private;
7776
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7777
	uint32_t tmp;
7778
 
7779
	tmp = I915_READ(HTOTAL(cpu_transcoder));
6084 serge 7780
	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7781
	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7782
	tmp = I915_READ(HBLANK(cpu_transcoder));
6084 serge 7783
	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7784
	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7785
	tmp = I915_READ(HSYNC(cpu_transcoder));
6084 serge 7786
	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7787
	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7788
 
7789
	tmp = I915_READ(VTOTAL(cpu_transcoder));
6084 serge 7790
	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7791
	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7792
	tmp = I915_READ(VBLANK(cpu_transcoder));
6084 serge 7793
	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7794
	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7795
	tmp = I915_READ(VSYNC(cpu_transcoder));
6084 serge 7796
	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7797
	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7798
 
7799
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6084 serge 7800
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7801
		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7802
		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
4104 Serge 7803
	}
7804
 
7805
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 7806
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7807
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7808
 
6084 serge 7809
	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7810
	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 7811
}
7812
 
5060 serge 7813
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6084 serge 7814
				 struct intel_crtc_state *pipe_config)
4104 Serge 7815
{
6084 serge 7816
	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7817
	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7818
	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7819
	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
4104 Serge 7820
 
6084 serge 7821
	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7822
	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7823
	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7824
	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
4104 Serge 7825
 
6084 serge 7826
	mode->flags = pipe_config->base.adjusted_mode.flags;
7827
	mode->type = DRM_MODE_TYPE_DRIVER;
4104 Serge 7828
 
6084 serge 7829
	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7830
	mode->flags |= pipe_config->base.adjusted_mode.flags;
7831
 
7832
	mode->hsync = drm_mode_hsync(mode);
7833
	mode->vrefresh = drm_mode_vrefresh(mode);
7834
	drm_mode_set_name(mode);
4104 Serge 7835
}
7836
 
3746 Serge 7837
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7838
{
7839
	struct drm_device *dev = intel_crtc->base.dev;
7840
	struct drm_i915_private *dev_priv = dev->dev_private;
7841
	uint32_t pipeconf;
7842
 
4104 Serge 7843
	pipeconf = 0;
3746 Serge 7844
 
5354 serge 7845
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7846
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7847
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
4104 Serge 7848
 
6084 serge 7849
	if (intel_crtc->config->double_wide)
7850
		pipeconf |= PIPECONF_DOUBLE_WIDE;
3746 Serge 7851
 
4104 Serge 7852
	/* only g4x and later have fancy bpc/dither controls */
6937 serge 7853
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4104 Serge 7854
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6084 serge 7855
		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
4104 Serge 7856
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 7857
				    PIPECONF_DITHER_TYPE_SP;
7858
 
6084 serge 7859
		switch (intel_crtc->config->pipe_bpp) {
4104 Serge 7860
		case 18:
7861
			pipeconf |= PIPECONF_6BPC;
7862
			break;
7863
		case 24:
7864
			pipeconf |= PIPECONF_8BPC;
7865
			break;
7866
		case 30:
7867
			pipeconf |= PIPECONF_10BPC;
7868
			break;
7869
		default:
7870
			/* Case prevented by intel_choose_pipe_bpp_dither. */
7871
			BUG();
3746 Serge 7872
		}
7873
	}
7874
 
7875
	if (HAS_PIPE_CXSR(dev)) {
7876
		if (intel_crtc->lowfreq_avail) {
7877
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7878
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7879
		} else {
7880
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7881
		}
7882
	}
7883
 
6084 serge 7884
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5060 serge 7885
		if (INTEL_INFO(dev)->gen < 4 ||
5354 serge 7886
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6084 serge 7887
			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7888
		else
5060 serge 7889
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7890
	} else
3746 Serge 7891
		pipeconf |= PIPECONF_PROGRESSIVE;
7892
 
6937 serge 7893
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7894
	     intel_crtc->config->limited_color_range)
6084 serge 7895
		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3746 Serge 7896
 
7897
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7898
	POSTING_READ(PIPECONF(intel_crtc->pipe));
7899
}
7900
 
6084 serge 7901
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7902
				   struct intel_crtc_state *crtc_state)
3031 serge 7903
{
5354 serge 7904
	struct drm_device *dev = crtc->base.dev;
3031 serge 7905
	struct drm_i915_private *dev_priv = dev->dev_private;
7906
	int refclk, num_connectors = 0;
6084 serge 7907
	intel_clock_t clock;
7908
	bool ok;
3031 serge 7909
	const intel_limit_t *limit;
6084 serge 7910
	struct drm_atomic_state *state = crtc_state->base.state;
7911
	struct drm_connector *connector;
7912
	struct drm_connector_state *connector_state;
7913
	int i;
3031 serge 7914
 
6084 serge 7915
	memset(&crtc_state->dpll_hw_state, 0,
7916
	       sizeof(crtc_state->dpll_hw_state));
7917
 
6937 serge 7918
	if (crtc_state->has_dsi_encoder)
7919
		return 0;
7920
 
6084 serge 7921
	for_each_connector_in_state(state, connector, connector_state, i) {
6937 serge 7922
		if (connector_state->crtc == &crtc->base)
7144 serge 7923
			num_connectors++;
3031 serge 7924
	}
7925
 
6084 serge 7926
	if (!crtc_state->clock_set) {
7927
		refclk = i9xx_get_refclk(crtc_state, num_connectors);
3031 serge 7928
 
6084 serge 7929
		/*
4560 Serge 7930
		 * Returns a set of divisors for the desired target clock with
7931
		 * the given refclk, or FALSE.  The returned values represent
7932
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7933
		 * 2) / p1 / p2.
6084 serge 7934
		 */
7935
		limit = intel_limit(crtc_state, refclk);
7936
		ok = dev_priv->display.find_dpll(limit, crtc_state,
7937
						 crtc_state->port_clock,
7938
						 refclk, NULL, &clock);
4560 Serge 7939
		if (!ok) {
6084 serge 7940
			DRM_ERROR("Couldn't find PLL settings for mode!\n");
7941
			return -EINVAL;
7942
		}
3031 serge 7943
 
6084 serge 7944
		/* Compat-code for transition, will disappear. */
7945
		crtc_state->dpll.n = clock.n;
7946
		crtc_state->dpll.m1 = clock.m1;
7947
		crtc_state->dpll.m2 = clock.m2;
7948
		crtc_state->dpll.p1 = clock.p1;
7949
		crtc_state->dpll.p2 = clock.p2;
3031 serge 7950
	}
7951
 
4560 Serge 7952
	if (IS_GEN2(dev)) {
6084 serge 7953
		i8xx_compute_dpll(crtc, crtc_state, NULL,
7954
				  num_connectors);
5060 serge 7955
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 7956
		chv_compute_dpll(crtc, crtc_state);
4560 Serge 7957
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 7958
		vlv_compute_dpll(crtc, crtc_state);
4560 Serge 7959
	} else {
6084 serge 7960
		i9xx_compute_dpll(crtc, crtc_state, NULL,
7961
				  num_connectors);
4560 Serge 7962
	}
3031 serge 7963
 
5060 serge 7964
	return 0;
2327 Serge 7965
}
7966
 
4104 Serge 7967
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6084 serge 7968
				 struct intel_crtc_state *pipe_config)
4104 Serge 7969
{
7970
	struct drm_device *dev = crtc->base.dev;
7971
	struct drm_i915_private *dev_priv = dev->dev_private;
7972
	uint32_t tmp;
7973
 
4560 Serge 7974
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7975
		return;
7976
 
4104 Serge 7977
	tmp = I915_READ(PFIT_CONTROL);
7978
	if (!(tmp & PFIT_ENABLE))
7979
		return;
7980
 
7981
	/* Check whether the pfit is attached to our pipe. */
7982
	if (INTEL_INFO(dev)->gen < 4) {
7983
		if (crtc->pipe != PIPE_B)
7984
			return;
7985
	} else {
7986
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7987
			return;
7988
	}
7989
 
7990
	pipe_config->gmch_pfit.control = tmp;
7991
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7992
}
7993
 
4398 Serge 7994
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 7995
			       struct intel_crtc_state *pipe_config)
4398 Serge 7996
{
7997
	struct drm_device *dev = crtc->base.dev;
7998
	struct drm_i915_private *dev_priv = dev->dev_private;
7999
	int pipe = pipe_config->cpu_transcoder;
8000
	intel_clock_t clock;
8001
	u32 mdiv;
8002
	int refclk = 100000;
8003
 
5060 serge 8004
	/* In case of MIPI DPLL will not even be used */
8005
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8006
		return;
8007
 
6084 serge 8008
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 8009
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6084 serge 8010
	mutex_unlock(&dev_priv->sb_lock);
4398 Serge 8011
 
8012
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8013
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8014
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8015
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8016
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8017
 
6084 serge 8018
	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4398 Serge 8019
}
8020
 
6084 serge 8021
static void
8022
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8023
			      struct intel_initial_plane_config *plane_config)
5060 serge 8024
{
8025
	struct drm_device *dev = crtc->base.dev;
8026
	struct drm_i915_private *dev_priv = dev->dev_private;
8027
	u32 val, base, offset;
8028
	int pipe = crtc->pipe, plane = crtc->plane;
8029
	int fourcc, pixel_format;
6084 serge 8030
	unsigned int aligned_height;
8031
	struct drm_framebuffer *fb;
8032
	struct intel_framebuffer *intel_fb;
5060 serge 8033
 
6084 serge 8034
	val = I915_READ(DSPCNTR(plane));
8035
	if (!(val & DISPLAY_PLANE_ENABLE))
8036
		return;
8037
 
8038
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8039
	if (!intel_fb) {
5060 serge 8040
		DRM_DEBUG_KMS("failed to alloc fb\n");
8041
		return;
8042
	}
8043
 
6084 serge 8044
	fb = &intel_fb->base;
5060 serge 8045
 
6084 serge 8046
	if (INTEL_INFO(dev)->gen >= 4) {
8047
		if (val & DISPPLANE_TILED) {
8048
			plane_config->tiling = I915_TILING_X;
8049
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8050
		}
8051
	}
5060 serge 8052
 
8053
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6084 serge 8054
	fourcc = i9xx_format_to_fourcc(pixel_format);
8055
	fb->pixel_format = fourcc;
8056
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
5060 serge 8057
 
8058
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 8059
		if (plane_config->tiling)
5060 serge 8060
			offset = I915_READ(DSPTILEOFF(plane));
8061
		else
8062
			offset = I915_READ(DSPLINOFF(plane));
8063
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8064
	} else {
8065
		base = I915_READ(DSPADDR(plane));
8066
	}
8067
	plane_config->base = base;
8068
 
8069
	val = I915_READ(PIPESRC(pipe));
6084 serge 8070
	fb->width = ((val >> 16) & 0xfff) + 1;
8071
	fb->height = ((val >> 0) & 0xfff) + 1;
5060 serge 8072
 
8073
	val = I915_READ(DSPSTRIDE(pipe));
6283 serge 8074
	fb->pitches[0] = val & 0xffffffc0;
5060 serge 8075
 
6084 serge 8076
	aligned_height = intel_fb_align_height(dev, fb->height,
8077
					       fb->pixel_format,
8078
					       fb->modifier[0]);
5060 serge 8079
 
6283 serge 8080
	plane_config->size = fb->pitches[0] * aligned_height;
5060 serge 8081
 
6084 serge 8082
	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8083
		      pipe_name(pipe), plane, fb->width, fb->height,
8084
		      fb->bits_per_pixel, base, fb->pitches[0],
5060 serge 8085
		      plane_config->size);
8086
 
6084 serge 8087
	plane_config->fb = intel_fb;
5060 serge 8088
}
8089
 
8090
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 8091
			       struct intel_crtc_state *pipe_config)
5060 serge 8092
{
8093
	struct drm_device *dev = crtc->base.dev;
8094
	struct drm_i915_private *dev_priv = dev->dev_private;
8095
	int pipe = pipe_config->cpu_transcoder;
8096
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8097
	intel_clock_t clock;
6084 serge 8098
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5060 serge 8099
	int refclk = 100000;
8100
 
6084 serge 8101
	mutex_lock(&dev_priv->sb_lock);
5060 serge 8102
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8103
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8104
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8105
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6084 serge 8106
	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8107
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 8108
 
8109
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6084 serge 8110
	clock.m2 = (pll_dw0 & 0xff) << 22;
8111
	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8112
		clock.m2 |= pll_dw2 & 0x3fffff;
5060 serge 8113
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8114
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8115
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8116
 
6084 serge 8117
	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5060 serge 8118
}
8119
 
3746 Serge 8120
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6084 serge 8121
				 struct intel_crtc_state *pipe_config)
3746 Serge 8122
{
8123
	struct drm_device *dev = crtc->base.dev;
8124
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 8125
	enum intel_display_power_domain power_domain;
3746 Serge 8126
	uint32_t tmp;
6937 serge 8127
	bool ret;
3746 Serge 8128
 
6937 serge 8129
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8130
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 8131
		return false;
8132
 
4104 Serge 8133
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8134
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8135
 
6937 serge 8136
	ret = false;
8137
 
3746 Serge 8138
	tmp = I915_READ(PIPECONF(crtc->pipe));
8139
	if (!(tmp & PIPECONF_ENABLE))
6937 serge 8140
		goto out;
3746 Serge 8141
 
6937 serge 8142
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4280 Serge 8143
		switch (tmp & PIPECONF_BPC_MASK) {
8144
		case PIPECONF_6BPC:
8145
			pipe_config->pipe_bpp = 18;
8146
			break;
8147
		case PIPECONF_8BPC:
8148
			pipe_config->pipe_bpp = 24;
8149
			break;
8150
		case PIPECONF_10BPC:
8151
			pipe_config->pipe_bpp = 30;
8152
			break;
8153
		default:
8154
			break;
8155
		}
8156
	}
8157
 
6937 serge 8158
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8159
	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
5060 serge 8160
		pipe_config->limited_color_range = true;
8161
 
4560 Serge 8162
	if (INTEL_INFO(dev)->gen < 4)
8163
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8164
 
4104 Serge 8165
	intel_get_pipe_timings(crtc, pipe_config);
8166
 
8167
	i9xx_get_pfit_config(crtc, pipe_config);
8168
 
8169
	if (INTEL_INFO(dev)->gen >= 4) {
8170
		tmp = I915_READ(DPLL_MD(crtc->pipe));
8171
		pipe_config->pixel_multiplier =
8172
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8173
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8174
		pipe_config->dpll_hw_state.dpll_md = tmp;
8175
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8176
		tmp = I915_READ(DPLL(crtc->pipe));
8177
		pipe_config->pixel_multiplier =
8178
			((tmp & SDVO_MULTIPLIER_MASK)
8179
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8180
	} else {
8181
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8182
		 * port and will be fixed up in the encoder->get_config
8183
		 * function. */
8184
		pipe_config->pixel_multiplier = 1;
8185
	}
8186
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6937 serge 8187
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
5354 serge 8188
		/*
8189
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8190
		 * on 830. Filter it out here so that we don't
8191
		 * report errors due to that.
8192
		 */
8193
		if (IS_I830(dev))
8194
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8195
 
4104 Serge 8196
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8197
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8198
	} else {
8199
		/* Mask out read-only status bits. */
8200
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8201
						     DPLL_PORTC_READY_MASK |
8202
						     DPLL_PORTB_READY_MASK);
8203
	}
8204
 
5060 serge 8205
	if (IS_CHERRYVIEW(dev))
8206
		chv_crtc_clock_get(crtc, pipe_config);
8207
	else if (IS_VALLEYVIEW(dev))
4560 Serge 8208
		vlv_crtc_clock_get(crtc, pipe_config);
8209
	else
8210
		i9xx_crtc_clock_get(crtc, pipe_config);
8211
 
6084 serge 8212
	/*
8213
	 * Normally the dotclock is filled in by the encoder .get_config()
8214
	 * but in case the pipe is enabled w/o any ports we need a sane
8215
	 * default.
8216
	 */
8217
	pipe_config->base.adjusted_mode.crtc_clock =
8218
		pipe_config->port_clock / pipe_config->pixel_multiplier;
8219
 
6937 serge 8220
	ret = true;
8221
 
8222
out:
8223
	intel_display_power_put(dev_priv, power_domain);
8224
 
8225
	return ret;
3746 Serge 8226
}
8227
 
3243 Serge 8228
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 8229
{
8230
	struct drm_i915_private *dev_priv = dev->dev_private;
8231
	struct intel_encoder *encoder;
7144 serge 8232
	int i;
3746 Serge 8233
	u32 val, final;
2327 Serge 8234
	bool has_lvds = false;
2342 Serge 8235
	bool has_cpu_edp = false;
8236
	bool has_panel = false;
8237
	bool has_ck505 = false;
8238
	bool can_ssc = false;
7144 serge 8239
	bool using_ssc_source = false;
2327 Serge 8240
 
8241
	/* We need to take the global config into account */
5354 serge 8242
	for_each_intel_encoder(dev, encoder) {
6084 serge 8243
		switch (encoder->type) {
8244
		case INTEL_OUTPUT_LVDS:
2342 Serge 8245
			has_panel = true;
6084 serge 8246
			has_lvds = true;
2342 Serge 8247
			break;
6084 serge 8248
		case INTEL_OUTPUT_EDP:
2342 Serge 8249
			has_panel = true;
4104 Serge 8250
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 8251
				has_cpu_edp = true;
6084 serge 8252
			break;
5354 serge 8253
		default:
8254
			break;
2327 Serge 8255
		}
6084 serge 8256
	}
2342 Serge 8257
 
8258
	if (HAS_PCH_IBX(dev)) {
4104 Serge 8259
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 8260
		can_ssc = has_ck505;
8261
	} else {
8262
		has_ck505 = false;
8263
		can_ssc = true;
2327 Serge 8264
	}
8265
 
7144 serge 8266
	/* Check if any DPLLs are using the SSC source */
8267
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8268
		u32 temp = I915_READ(PCH_DPLL(i));
2342 Serge 8269
 
7144 serge 8270
		if (!(temp & DPLL_VCO_ENABLE))
8271
			continue;
8272
 
8273
		if ((temp & PLL_REF_INPUT_MASK) ==
8274
		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8275
			using_ssc_source = true;
8276
			break;
8277
		}
8278
	}
8279
 
8280
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8281
		      has_panel, has_lvds, has_ck505, using_ssc_source);
8282
 
2327 Serge 8283
	/* Ironlake: try to setup display ref clock before DPLL
8284
	 * enabling. This is only under driver's control after
8285
	 * PCH B stepping, previous chipset stepping should be
8286
	 * ignoring this setting.
8287
	 */
3746 Serge 8288
	val = I915_READ(PCH_DREF_CONTROL);
8289
 
8290
	/* As we must carefully and slowly disable/enable each source in turn,
8291
	 * compute the final state we want first and check if we need to
8292
	 * make any changes at all.
8293
	 */
8294
	final = val;
8295
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8296
	if (has_ck505)
8297
		final |= DREF_NONSPREAD_CK505_ENABLE;
8298
	else
8299
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8300
 
8301
	final &= ~DREF_SSC_SOURCE_MASK;
8302
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8303
	final &= ~DREF_SSC1_ENABLE;
8304
 
8305
	if (has_panel) {
8306
		final |= DREF_SSC_SOURCE_ENABLE;
8307
 
8308
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8309
			final |= DREF_SSC1_ENABLE;
8310
 
8311
		if (has_cpu_edp) {
8312
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8313
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8314
			else
8315
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8316
		} else
8317
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
7144 serge 8318
	} else if (using_ssc_source) {
8319
		final |= DREF_SSC_SOURCE_ENABLE;
8320
		final |= DREF_SSC1_ENABLE;
3746 Serge 8321
	}
8322
 
8323
	if (final == val)
8324
		return;
8325
 
2327 Serge 8326
	/* Always enable nonspread source */
3746 Serge 8327
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 8328
 
8329
	if (has_ck505)
3746 Serge 8330
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 8331
	else
3746 Serge 8332
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 8333
 
8334
	if (has_panel) {
3746 Serge 8335
		val &= ~DREF_SSC_SOURCE_MASK;
8336
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 8337
 
2342 Serge 8338
		/* SSC must be turned on before enabling the CPU output  */
8339
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8340
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 8341
			val |= DREF_SSC1_ENABLE;
3031 serge 8342
		} else
3746 Serge 8343
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 8344
 
2342 Serge 8345
		/* Get SSC going before enabling the outputs */
3746 Serge 8346
		I915_WRITE(PCH_DREF_CONTROL, val);
6084 serge 8347
		POSTING_READ(PCH_DREF_CONTROL);
8348
		udelay(200);
2342 Serge 8349
 
3746 Serge 8350
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 8351
 
8352
		/* Enable CPU source on CPU attached eDP */
2342 Serge 8353
		if (has_cpu_edp) {
8354
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8355
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 8356
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5060 serge 8357
			} else
3746 Serge 8358
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 8359
		} else
3746 Serge 8360
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 8361
 
3746 Serge 8362
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 8363
		POSTING_READ(PCH_DREF_CONTROL);
8364
		udelay(200);
6084 serge 8365
	} else {
7144 serge 8366
		DRM_DEBUG_KMS("Disabling CPU source output\n");
2342 Serge 8367
 
3746 Serge 8368
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 8369
 
8370
		/* Turn off CPU output */
3746 Serge 8371
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 8372
 
3746 Serge 8373
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 8374
		POSTING_READ(PCH_DREF_CONTROL);
8375
		udelay(200);
2342 Serge 8376
 
7144 serge 8377
		if (!using_ssc_source) {
8378
			DRM_DEBUG_KMS("Disabling SSC source\n");
2342 Serge 8379
 
7144 serge 8380
			/* Turn off the SSC source */
8381
			val &= ~DREF_SSC_SOURCE_MASK;
8382
			val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 8383
 
7144 serge 8384
			/* Turn off SSC1 */
8385
			val &= ~DREF_SSC1_ENABLE;
8386
 
8387
			I915_WRITE(PCH_DREF_CONTROL, val);
8388
			POSTING_READ(PCH_DREF_CONTROL);
8389
			udelay(200);
8390
		}
2327 Serge 8391
	}
3746 Serge 8392
 
8393
	BUG_ON(val != final);
2327 Serge 8394
}
8395
 
4104 Serge 8396
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 8397
{
4104 Serge 8398
	uint32_t tmp;
3243 Serge 8399
 
6084 serge 8400
	tmp = I915_READ(SOUTH_CHICKEN2);
8401
	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8402
	I915_WRITE(SOUTH_CHICKEN2, tmp);
3243 Serge 8403
 
6084 serge 8404
	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8405
			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8406
		DRM_ERROR("FDI mPHY reset assert timeout\n");
3243 Serge 8407
 
6084 serge 8408
	tmp = I915_READ(SOUTH_CHICKEN2);
8409
	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8410
	I915_WRITE(SOUTH_CHICKEN2, tmp);
3243 Serge 8411
 
6084 serge 8412
	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 8413
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6084 serge 8414
		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 8415
}
3243 Serge 8416
 
4104 Serge 8417
/* WaMPhyProgramming:hsw */
8418
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8419
{
8420
	uint32_t tmp;
8421
 
3243 Serge 8422
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8423
	tmp &= ~(0xFF << 24);
8424
	tmp |= (0x12 << 24);
8425
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8426
 
8427
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8428
	tmp |= (1 << 11);
8429
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8430
 
8431
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8432
	tmp |= (1 << 11);
8433
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8434
 
8435
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8436
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8437
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8438
 
8439
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8440
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8441
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8442
 
6084 serge 8443
	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8444
	tmp &= ~(7 << 13);
8445
	tmp |= (5 << 13);
8446
	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
3243 Serge 8447
 
6084 serge 8448
	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8449
	tmp &= ~(7 << 13);
8450
	tmp |= (5 << 13);
8451
	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
3243 Serge 8452
 
8453
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8454
	tmp &= ~0xFF;
8455
	tmp |= 0x1C;
8456
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8457
 
8458
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8459
	tmp &= ~0xFF;
8460
	tmp |= 0x1C;
8461
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8462
 
8463
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8464
	tmp &= ~(0xFF << 16);
8465
	tmp |= (0x1C << 16);
8466
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8467
 
8468
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8469
	tmp &= ~(0xFF << 16);
8470
	tmp |= (0x1C << 16);
8471
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8472
 
6084 serge 8473
	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8474
	tmp |= (1 << 27);
8475
	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
3243 Serge 8476
 
6084 serge 8477
	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8478
	tmp |= (1 << 27);
8479
	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
3243 Serge 8480
 
6084 serge 8481
	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8482
	tmp &= ~(0xF << 28);
8483
	tmp |= (4 << 28);
8484
	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
3243 Serge 8485
 
6084 serge 8486
	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8487
	tmp &= ~(0xF << 28);
8488
	tmp |= (4 << 28);
8489
	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 8490
}
3243 Serge 8491
 
4104 Serge 8492
/* Implements 3 different sequences from BSpec chapter "Display iCLK
8493
 * Programming" based on the parameters passed:
8494
 * - Sequence to enable CLKOUT_DP
8495
 * - Sequence to enable CLKOUT_DP without spread
8496
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8497
 */
8498
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8499
				 bool with_fdi)
8500
{
8501
	struct drm_i915_private *dev_priv = dev->dev_private;
8502
	uint32_t reg, tmp;
3480 Serge 8503
 
4104 Serge 8504
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8505
		with_spread = true;
6084 serge 8506
	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
4104 Serge 8507
		with_fdi = false;
8508
 
6084 serge 8509
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 8510
 
8511
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8512
	tmp &= ~SBI_SSCCTL_DISABLE;
8513
	tmp |= SBI_SSCCTL_PATHALT;
8514
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8515
 
8516
	udelay(24);
8517
 
8518
	if (with_spread) {
8519
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8520
		tmp &= ~SBI_SSCCTL_PATHALT;
8521
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8522
 
8523
		if (with_fdi) {
8524
			lpt_reset_fdi_mphy(dev_priv);
8525
			lpt_program_fdi_mphy(dev_priv);
8526
		}
8527
	}
8528
 
6084 serge 8529
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
4104 Serge 8530
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8531
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8532
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8533
 
6084 serge 8534
	mutex_unlock(&dev_priv->sb_lock);
3243 Serge 8535
}
8536
 
4104 Serge 8537
/* Sequence to disable CLKOUT_DP */
8538
static void lpt_disable_clkout_dp(struct drm_device *dev)
8539
{
8540
	struct drm_i915_private *dev_priv = dev->dev_private;
8541
	uint32_t reg, tmp;
8542
 
6084 serge 8543
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 8544
 
6084 serge 8545
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
4104 Serge 8546
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8547
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8548
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8549
 
8550
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8551
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8552
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8553
			tmp |= SBI_SSCCTL_PATHALT;
8554
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8555
			udelay(32);
8556
		}
8557
		tmp |= SBI_SSCCTL_DISABLE;
8558
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8559
	}
8560
 
6084 serge 8561
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 8562
}
8563
 
6937 serge 8564
#define BEND_IDX(steps) ((50 + (steps)) / 5)
8565
 
8566
static const uint16_t sscdivintphase[] = {
8567
	[BEND_IDX( 50)] = 0x3B23,
8568
	[BEND_IDX( 45)] = 0x3B23,
8569
	[BEND_IDX( 40)] = 0x3C23,
8570
	[BEND_IDX( 35)] = 0x3C23,
8571
	[BEND_IDX( 30)] = 0x3D23,
8572
	[BEND_IDX( 25)] = 0x3D23,
8573
	[BEND_IDX( 20)] = 0x3E23,
8574
	[BEND_IDX( 15)] = 0x3E23,
8575
	[BEND_IDX( 10)] = 0x3F23,
8576
	[BEND_IDX(  5)] = 0x3F23,
8577
	[BEND_IDX(  0)] = 0x0025,
8578
	[BEND_IDX( -5)] = 0x0025,
8579
	[BEND_IDX(-10)] = 0x0125,
8580
	[BEND_IDX(-15)] = 0x0125,
8581
	[BEND_IDX(-20)] = 0x0225,
8582
	[BEND_IDX(-25)] = 0x0225,
8583
	[BEND_IDX(-30)] = 0x0325,
8584
	[BEND_IDX(-35)] = 0x0325,
8585
	[BEND_IDX(-40)] = 0x0425,
8586
	[BEND_IDX(-45)] = 0x0425,
8587
	[BEND_IDX(-50)] = 0x0525,
8588
};
8589
 
8590
/*
8591
 * Bend CLKOUT_DP
8592
 * steps -50 to 50 inclusive, in steps of 5
8593
 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8594
 * change in clock period = -(steps / 10) * 5.787 ps
8595
 */
8596
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8597
{
8598
	uint32_t tmp;
8599
	int idx = BEND_IDX(steps);
8600
 
8601
	if (WARN_ON(steps % 5 != 0))
8602
		return;
8603
 
8604
	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8605
		return;
8606
 
8607
	mutex_lock(&dev_priv->sb_lock);
8608
 
8609
	if (steps % 10 != 0)
8610
		tmp = 0xAAAAAAAB;
8611
	else
8612
		tmp = 0x00000000;
8613
	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8614
 
8615
	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8616
	tmp &= 0xffff0000;
8617
	tmp |= sscdivintphase[idx];
8618
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8619
 
8620
	mutex_unlock(&dev_priv->sb_lock);
8621
}
8622
 
8623
#undef BEND_IDX
8624
 
4104 Serge 8625
static void lpt_init_pch_refclk(struct drm_device *dev)
8626
{
8627
	struct intel_encoder *encoder;
8628
	bool has_vga = false;
8629
 
5354 serge 8630
	for_each_intel_encoder(dev, encoder) {
4104 Serge 8631
		switch (encoder->type) {
8632
		case INTEL_OUTPUT_ANALOG:
8633
			has_vga = true;
8634
			break;
5354 serge 8635
		default:
8636
			break;
4104 Serge 8637
		}
8638
	}
8639
 
6937 serge 8640
	if (has_vga) {
8641
		lpt_bend_clkout_dp(to_i915(dev), 0);
4104 Serge 8642
		lpt_enable_clkout_dp(dev, true, true);
6937 serge 8643
	} else {
4104 Serge 8644
		lpt_disable_clkout_dp(dev);
7144 serge 8645
	}
4104 Serge 8646
}
8647
 
3243 Serge 8648
/*
8649
 * Initialize reference clocks when the driver loads
8650
 */
8651
void intel_init_pch_refclk(struct drm_device *dev)
8652
{
8653
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8654
		ironlake_init_pch_refclk(dev);
8655
	else if (HAS_PCH_LPT(dev))
8656
		lpt_init_pch_refclk(dev);
8657
}
8658
 
6084 serge 8659
static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
2342 Serge 8660
{
6084 serge 8661
	struct drm_device *dev = crtc_state->base.crtc->dev;
2342 Serge 8662
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 8663
	struct drm_atomic_state *state = crtc_state->base.state;
8664
	struct drm_connector *connector;
8665
	struct drm_connector_state *connector_state;
2342 Serge 8666
	struct intel_encoder *encoder;
6084 serge 8667
	int num_connectors = 0, i;
2342 Serge 8668
	bool is_lvds = false;
8669
 
6084 serge 8670
	for_each_connector_in_state(state, connector, connector_state, i) {
8671
		if (connector_state->crtc != crtc_state->base.crtc)
5354 serge 8672
			continue;
8673
 
6084 serge 8674
		encoder = to_intel_encoder(connector_state->best_encoder);
8675
 
2342 Serge 8676
		switch (encoder->type) {
8677
		case INTEL_OUTPUT_LVDS:
8678
			is_lvds = true;
8679
			break;
5354 serge 8680
		default:
8681
			break;
2342 Serge 8682
		}
8683
		num_connectors++;
8684
	}
8685
 
8686
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 8687
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 8688
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 8689
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 8690
	}
8691
 
8692
	return 120000;
8693
}
8694
 
4104 Serge 8695
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 8696
{
8697
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8698
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8699
	int pipe = intel_crtc->pipe;
8700
	uint32_t val;
8701
 
4104 Serge 8702
	val = 0;
3031 serge 8703
 
6084 serge 8704
	switch (intel_crtc->config->pipe_bpp) {
3031 serge 8705
	case 18:
3480 Serge 8706
		val |= PIPECONF_6BPC;
3031 serge 8707
		break;
8708
	case 24:
3480 Serge 8709
		val |= PIPECONF_8BPC;
3031 serge 8710
		break;
8711
	case 30:
3480 Serge 8712
		val |= PIPECONF_10BPC;
3031 serge 8713
		break;
8714
	case 36:
3480 Serge 8715
		val |= PIPECONF_12BPC;
3031 serge 8716
		break;
8717
	default:
3243 Serge 8718
		/* Case prevented by intel_choose_pipe_bpp_dither. */
8719
		BUG();
3031 serge 8720
	}
8721
 
6084 serge 8722
	if (intel_crtc->config->dither)
3031 serge 8723
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8724
 
6084 serge 8725
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 8726
		val |= PIPECONF_INTERLACED_ILK;
8727
	else
8728
		val |= PIPECONF_PROGRESSIVE;
8729
 
6084 serge 8730
	if (intel_crtc->config->limited_color_range)
3480 Serge 8731
		val |= PIPECONF_COLOR_RANGE_SELECT;
8732
 
3031 serge 8733
	I915_WRITE(PIPECONF(pipe), val);
8734
	POSTING_READ(PIPECONF(pipe));
8735
}
8736
 
3480 Serge 8737
/*
8738
 * Set up the pipe CSC unit.
8739
 *
8740
 * Currently only full range RGB to limited range RGB conversion
8741
 * is supported, but eventually this should handle various
8742
 * RGB<->YCbCr scenarios as well.
8743
 */
3746 Serge 8744
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 8745
{
8746
	struct drm_device *dev = crtc->dev;
8747
	struct drm_i915_private *dev_priv = dev->dev_private;
8748
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8749
	int pipe = intel_crtc->pipe;
8750
	uint16_t coeff = 0x7800; /* 1.0 */
8751
 
8752
	/*
8753
	 * TODO: Check what kind of values actually come out of the pipe
8754
	 * with these coeff/postoff values and adjust to get the best
8755
	 * accuracy. Perhaps we even need to take the bpc value into
8756
	 * consideration.
8757
	 */
8758
 
6084 serge 8759
	if (intel_crtc->config->limited_color_range)
3480 Serge 8760
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8761
 
8762
	/*
8763
	 * GY/GU and RY/RU should be the other way around according
8764
	 * to BSpec, but reality doesn't agree. Just set them up in
8765
	 * a way that results in the correct picture.
8766
	 */
8767
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8768
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8769
 
8770
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8771
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8772
 
8773
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8774
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8775
 
8776
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8777
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8778
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8779
 
8780
	if (INTEL_INFO(dev)->gen > 6) {
8781
		uint16_t postoff = 0;
8782
 
6084 serge 8783
		if (intel_crtc->config->limited_color_range)
4398 Serge 8784
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 8785
 
8786
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8787
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8788
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8789
 
8790
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8791
	} else {
8792
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
8793
 
6084 serge 8794
		if (intel_crtc->config->limited_color_range)
3480 Serge 8795
			mode |= CSC_BLACK_SCREEN_OFFSET;
8796
 
8797
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8798
	}
8799
}
8800
 
4104 Serge 8801
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 8802
{
4560 Serge 8803
	struct drm_device *dev = crtc->dev;
8804
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 8805
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 8806
	enum pipe pipe = intel_crtc->pipe;
6084 serge 8807
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 8808
	uint32_t val;
8809
 
4104 Serge 8810
	val = 0;
3243 Serge 8811
 
6084 serge 8812
	if (IS_HASWELL(dev) && intel_crtc->config->dither)
3243 Serge 8813
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8814
 
6084 serge 8815
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 8816
		val |= PIPECONF_INTERLACED_ILK;
8817
	else
8818
		val |= PIPECONF_PROGRESSIVE;
8819
 
8820
	I915_WRITE(PIPECONF(cpu_transcoder), val);
8821
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 8822
 
8823
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8824
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 8825
 
5354 serge 8826
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
4560 Serge 8827
		val = 0;
8828
 
6084 serge 8829
		switch (intel_crtc->config->pipe_bpp) {
4560 Serge 8830
		case 18:
8831
			val |= PIPEMISC_DITHER_6_BPC;
8832
			break;
8833
		case 24:
8834
			val |= PIPEMISC_DITHER_8_BPC;
8835
			break;
8836
		case 30:
8837
			val |= PIPEMISC_DITHER_10_BPC;
8838
			break;
8839
		case 36:
8840
			val |= PIPEMISC_DITHER_12_BPC;
8841
			break;
8842
		default:
8843
			/* Case prevented by pipe_config_set_bpp. */
8844
			BUG();
8845
		}
8846
 
6084 serge 8847
		if (intel_crtc->config->dither)
4560 Serge 8848
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8849
 
8850
		I915_WRITE(PIPEMISC(pipe), val);
8851
	}
3243 Serge 8852
}
8853
 
3031 serge 8854
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6084 serge 8855
				    struct intel_crtc_state *crtc_state,
3031 serge 8856
				    intel_clock_t *clock,
8857
				    bool *has_reduced_clock,
8858
				    intel_clock_t *reduced_clock)
8859
{
8860
	struct drm_device *dev = crtc->dev;
8861
	struct drm_i915_private *dev_priv = dev->dev_private;
8862
	int refclk;
8863
	const intel_limit_t *limit;
6084 serge 8864
	bool ret;
3031 serge 8865
 
6084 serge 8866
	refclk = ironlake_get_refclk(crtc_state);
3031 serge 8867
 
8868
	/*
8869
	 * Returns a set of divisors for the desired target clock with the given
8870
	 * refclk, or FALSE.  The returned values represent the clock equation:
8871
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8872
	 */
6084 serge 8873
	limit = intel_limit(crtc_state, refclk);
8874
	ret = dev_priv->display.find_dpll(limit, crtc_state,
8875
					  crtc_state->port_clock,
4104 Serge 8876
					  refclk, NULL, clock);
3031 serge 8877
	if (!ret)
8878
		return false;
8879
 
8880
	return true;
8881
}
8882
 
3243 Serge 8883
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8884
{
8885
	/*
8886
	 * Account for spread spectrum to avoid
8887
	 * oversubscribing the link. Max center spread
8888
	 * is 2.5%; use 5% for safety's sake.
8889
	 */
8890
	u32 bps = target_clock * bpp * 21 / 20;
5060 serge 8891
	return DIV_ROUND_UP(bps, link_bw * 8);
3243 Serge 8892
}
8893
 
4104 Serge 8894
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 8895
{
4104 Serge 8896
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 8897
}
8898
 
3243 Serge 8899
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6084 serge 8900
				      struct intel_crtc_state *crtc_state,
4104 Serge 8901
				      u32 *fp,
3746 Serge 8902
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 8903
{
8904
	struct drm_crtc *crtc = &intel_crtc->base;
8905
	struct drm_device *dev = crtc->dev;
8906
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 8907
	struct drm_atomic_state *state = crtc_state->base.state;
8908
	struct drm_connector *connector;
8909
	struct drm_connector_state *connector_state;
8910
	struct intel_encoder *encoder;
3243 Serge 8911
	uint32_t dpll;
6084 serge 8912
	int factor, num_connectors = 0, i;
4104 Serge 8913
	bool is_lvds = false, is_sdvo = false;
3243 Serge 8914
 
6084 serge 8915
	for_each_connector_in_state(state, connector, connector_state, i) {
8916
		if (connector_state->crtc != crtc_state->base.crtc)
5354 serge 8917
			continue;
8918
 
6084 serge 8919
		encoder = to_intel_encoder(connector_state->best_encoder);
8920
 
8921
		switch (encoder->type) {
3243 Serge 8922
		case INTEL_OUTPUT_LVDS:
8923
			is_lvds = true;
8924
			break;
8925
		case INTEL_OUTPUT_SDVO:
8926
		case INTEL_OUTPUT_HDMI:
8927
			is_sdvo = true;
8928
			break;
5354 serge 8929
		default:
8930
			break;
3243 Serge 8931
		}
8932
 
8933
		num_connectors++;
8934
	}
8935
 
6084 serge 8936
	/* Enable autotuning of the PLL clock (if permissible) */
8937
	factor = 21;
8938
	if (is_lvds) {
8939
		if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 8940
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 8941
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6084 serge 8942
			factor = 25;
8943
	} else if (crtc_state->sdvo_tv_clock)
8944
		factor = 20;
2327 Serge 8945
 
6084 serge 8946
	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
3746 Serge 8947
		*fp |= FP_CB_TUNE;
2327 Serge 8948
 
3746 Serge 8949
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8950
		*fp2 |= FP_CB_TUNE;
8951
 
6084 serge 8952
	dpll = 0;
2327 Serge 8953
 
6084 serge 8954
	if (is_lvds)
8955
		dpll |= DPLLB_MODE_LVDS;
8956
	else
8957
		dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 8958
 
6084 serge 8959
	dpll |= (crtc_state->pixel_multiplier - 1)
8960
		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 8961
 
4104 Serge 8962
	if (is_sdvo)
8963
		dpll |= DPLL_SDVO_HIGH_SPEED;
6084 serge 8964
	if (crtc_state->has_dp_encoder)
4104 Serge 8965
		dpll |= DPLL_SDVO_HIGH_SPEED;
8966
 
6084 serge 8967
	/* compute bitmask from p1 value */
8968
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8969
	/* also FPA1 */
8970
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 8971
 
6084 serge 8972
	switch (crtc_state->dpll.p2) {
8973
	case 5:
8974
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8975
		break;
8976
	case 7:
8977
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8978
		break;
8979
	case 10:
8980
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8981
		break;
8982
	case 14:
8983
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8984
		break;
8985
	}
2327 Serge 8986
 
4104 Serge 8987
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6084 serge 8988
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8989
	else
8990
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 8991
 
4104 Serge 8992
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 8993
}
8994
 
6084 serge 8995
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8996
				       struct intel_crtc_state *crtc_state)
3243 Serge 8997
{
5354 serge 8998
	struct drm_device *dev = crtc->base.dev;
3243 Serge 8999
	intel_clock_t clock, reduced_clock;
4104 Serge 9000
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 9001
	bool ok, has_reduced_clock = false;
3746 Serge 9002
	bool is_lvds = false;
4104 Serge 9003
	struct intel_shared_dpll *pll;
3243 Serge 9004
 
6084 serge 9005
	memset(&crtc_state->dpll_hw_state, 0,
9006
	       sizeof(crtc_state->dpll_hw_state));
9007
 
6937 serge 9008
	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
3243 Serge 9009
 
9010
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
9011
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9012
 
6084 serge 9013
	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
3243 Serge 9014
				     &has_reduced_clock, &reduced_clock);
6084 serge 9015
	if (!ok && !crtc_state->clock_set) {
3243 Serge 9016
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9017
		return -EINVAL;
9018
	}
3746 Serge 9019
	/* Compat-code for transition, will disappear. */
6084 serge 9020
	if (!crtc_state->clock_set) {
9021
		crtc_state->dpll.n = clock.n;
9022
		crtc_state->dpll.m1 = clock.m1;
9023
		crtc_state->dpll.m2 = clock.m2;
9024
		crtc_state->dpll.p1 = clock.p1;
9025
		crtc_state->dpll.p2 = clock.p2;
3746 Serge 9026
	}
3243 Serge 9027
 
4104 Serge 9028
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6084 serge 9029
	if (crtc_state->has_pch_encoder) {
9030
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9031
		if (has_reduced_clock)
4104 Serge 9032
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 9033
 
6084 serge 9034
		dpll = ironlake_compute_dpll(crtc, crtc_state,
4104 Serge 9035
					     &fp, &reduced_clock,
5060 serge 9036
					     has_reduced_clock ? &fp2 : NULL);
3243 Serge 9037
 
6084 serge 9038
		crtc_state->dpll_hw_state.dpll = dpll;
9039
		crtc_state->dpll_hw_state.fp0 = fp;
4104 Serge 9040
		if (has_reduced_clock)
6084 serge 9041
			crtc_state->dpll_hw_state.fp1 = fp2;
4104 Serge 9042
		else
6084 serge 9043
			crtc_state->dpll_hw_state.fp1 = fp;
2327 Serge 9044
 
6084 serge 9045
		pll = intel_get_shared_dpll(crtc, crtc_state);
3031 serge 9046
		if (pll == NULL) {
4104 Serge 9047
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5354 serge 9048
					 pipe_name(crtc->pipe));
2342 Serge 9049
			return -EINVAL;
6084 serge 9050
		}
5354 serge 9051
	}
2327 Serge 9052
 
6084 serge 9053
	if (is_lvds && has_reduced_clock)
5354 serge 9054
		crtc->lowfreq_avail = true;
4104 Serge 9055
	else
5354 serge 9056
		crtc->lowfreq_avail = false;
2327 Serge 9057
 
5060 serge 9058
	return 0;
4104 Serge 9059
}
3243 Serge 9060
 
4560 Serge 9061
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9062
					 struct intel_link_m_n *m_n)
4104 Serge 9063
{
9064
	struct drm_device *dev = crtc->base.dev;
9065
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 9066
	enum pipe pipe = crtc->pipe;
4104 Serge 9067
 
4560 Serge 9068
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9069
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9070
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9071
		& ~TU_SIZE_MASK;
9072
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9073
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9074
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9075
}
9076
 
9077
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9078
					 enum transcoder transcoder,
5354 serge 9079
					 struct intel_link_m_n *m_n,
9080
					 struct intel_link_m_n *m2_n2)
4560 Serge 9081
{
9082
	struct drm_device *dev = crtc->base.dev;
9083
	struct drm_i915_private *dev_priv = dev->dev_private;
9084
	enum pipe pipe = crtc->pipe;
9085
 
9086
	if (INTEL_INFO(dev)->gen >= 5) {
9087
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9088
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9089
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6084 serge 9090
			& ~TU_SIZE_MASK;
4560 Serge 9091
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9092
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6084 serge 9093
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5354 serge 9094
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9095
		 * gen < 8) and if DRRS is supported (to make sure the
9096
		 * registers are not unnecessarily read).
9097
		 */
9098
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6084 serge 9099
			crtc->config->has_drrs) {
5354 serge 9100
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9101
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9102
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9103
					& ~TU_SIZE_MASK;
9104
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9105
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9106
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9107
		}
4560 Serge 9108
	} else {
9109
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9110
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9111
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9112
			& ~TU_SIZE_MASK;
9113
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9114
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9115
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9116
	}
3243 Serge 9117
}
9118
 
4560 Serge 9119
void intel_dp_get_m_n(struct intel_crtc *crtc,
6084 serge 9120
		      struct intel_crtc_state *pipe_config)
4560 Serge 9121
{
6084 serge 9122
	if (pipe_config->has_pch_encoder)
4560 Serge 9123
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9124
	else
9125
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 9126
					     &pipe_config->dp_m_n,
9127
					     &pipe_config->dp_m2_n2);
4560 Serge 9128
}
9129
 
9130
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6084 serge 9131
					struct intel_crtc_state *pipe_config)
4560 Serge 9132
{
9133
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 9134
				     &pipe_config->fdi_m_n, NULL);
4560 Serge 9135
}
9136
 
5354 serge 9137
static void skylake_get_pfit_config(struct intel_crtc *crtc,
6084 serge 9138
				    struct intel_crtc_state *pipe_config)
5354 serge 9139
{
9140
	struct drm_device *dev = crtc->base.dev;
9141
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 9142
	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9143
	uint32_t ps_ctrl = 0;
9144
	int id = -1;
9145
	int i;
5354 serge 9146
 
6084 serge 9147
	/* find scaler attached to this pipe */
9148
	for (i = 0; i < crtc->num_scalers; i++) {
9149
		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9150
		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9151
			id = i;
9152
			pipe_config->pch_pfit.enabled = true;
9153
			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9154
			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9155
			break;
9156
		}
9157
	}
5354 serge 9158
 
6084 serge 9159
	scaler_state->scaler_id = id;
9160
	if (id >= 0) {
9161
		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9162
	} else {
9163
		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5354 serge 9164
	}
9165
}
9166
 
6084 serge 9167
static void
9168
skylake_get_initial_plane_config(struct intel_crtc *crtc,
9169
				 struct intel_initial_plane_config *plane_config)
9170
{
9171
	struct drm_device *dev = crtc->base.dev;
9172
	struct drm_i915_private *dev_priv = dev->dev_private;
9173
	u32 val, base, offset, stride_mult, tiling;
9174
	int pipe = crtc->pipe;
9175
	int fourcc, pixel_format;
9176
	unsigned int aligned_height;
9177
	struct drm_framebuffer *fb;
9178
	struct intel_framebuffer *intel_fb;
9179
 
9180
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9181
	if (!intel_fb) {
9182
		DRM_DEBUG_KMS("failed to alloc fb\n");
9183
		return;
9184
	}
9185
 
9186
	fb = &intel_fb->base;
9187
 
9188
	val = I915_READ(PLANE_CTL(pipe, 0));
9189
	if (!(val & PLANE_CTL_ENABLE))
9190
		goto error;
9191
 
9192
	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9193
	fourcc = skl_format_to_fourcc(pixel_format,
9194
				      val & PLANE_CTL_ORDER_RGBX,
9195
				      val & PLANE_CTL_ALPHA_MASK);
9196
	fb->pixel_format = fourcc;
9197
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9198
 
9199
	tiling = val & PLANE_CTL_TILED_MASK;
9200
	switch (tiling) {
9201
	case PLANE_CTL_TILED_LINEAR:
9202
		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9203
		break;
9204
	case PLANE_CTL_TILED_X:
9205
		plane_config->tiling = I915_TILING_X;
9206
		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9207
		break;
9208
	case PLANE_CTL_TILED_Y:
9209
		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9210
		break;
9211
	case PLANE_CTL_TILED_YF:
9212
		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9213
		break;
9214
	default:
9215
		MISSING_CASE(tiling);
9216
		goto error;
9217
	}
9218
 
9219
	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9220
	plane_config->base = base;
9221
 
9222
	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9223
 
9224
	val = I915_READ(PLANE_SIZE(pipe, 0));
9225
	fb->height = ((val >> 16) & 0xfff) + 1;
9226
	fb->width = ((val >> 0) & 0x1fff) + 1;
9227
 
9228
	val = I915_READ(PLANE_STRIDE(pipe, 0));
7144 serge 9229
	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
6084 serge 9230
						fb->pixel_format);
6283 serge 9231
	fb->pitches[0] = (val & 0x3ff) * stride_mult;
6084 serge 9232
 
9233
	aligned_height = intel_fb_align_height(dev, fb->height,
9234
					       fb->pixel_format,
9235
					       fb->modifier[0]);
9236
 
6283 serge 9237
	plane_config->size = fb->pitches[0] * aligned_height;
6084 serge 9238
 
9239
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9240
		      pipe_name(pipe), fb->width, fb->height,
9241
		      fb->bits_per_pixel, base, fb->pitches[0],
9242
		      plane_config->size);
9243
 
9244
	plane_config->fb = intel_fb;
9245
	return;
9246
 
9247
error:
9248
	kfree(fb);
9249
}
9250
 
4104 Serge 9251
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6084 serge 9252
				     struct intel_crtc_state *pipe_config)
4104 Serge 9253
{
9254
	struct drm_device *dev = crtc->base.dev;
9255
	struct drm_i915_private *dev_priv = dev->dev_private;
9256
	uint32_t tmp;
9257
 
9258
	tmp = I915_READ(PF_CTL(crtc->pipe));
9259
 
9260
	if (tmp & PF_ENABLE) {
9261
		pipe_config->pch_pfit.enabled = true;
9262
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9263
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9264
 
9265
		/* We currently do not free assignements of panel fitters on
9266
		 * ivb/hsw (since we don't use the higher upscaling modes which
9267
		 * differentiates them) so just WARN about this case for now. */
9268
		if (IS_GEN7(dev)) {
9269
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9270
				PF_PIPE_SEL_IVB(crtc->pipe));
9271
		}
9272
	}
9273
}
9274
 
6084 serge 9275
static void
9276
ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9277
				  struct intel_initial_plane_config *plane_config)
5060 serge 9278
{
9279
	struct drm_device *dev = crtc->base.dev;
9280
	struct drm_i915_private *dev_priv = dev->dev_private;
9281
	u32 val, base, offset;
6084 serge 9282
	int pipe = crtc->pipe;
5060 serge 9283
	int fourcc, pixel_format;
6084 serge 9284
	unsigned int aligned_height;
9285
	struct drm_framebuffer *fb;
9286
	struct intel_framebuffer *intel_fb;
5060 serge 9287
 
6084 serge 9288
	val = I915_READ(DSPCNTR(pipe));
9289
	if (!(val & DISPLAY_PLANE_ENABLE))
9290
		return;
9291
 
9292
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9293
	if (!intel_fb) {
5060 serge 9294
		DRM_DEBUG_KMS("failed to alloc fb\n");
9295
		return;
9296
	}
9297
 
6084 serge 9298
	fb = &intel_fb->base;
5060 serge 9299
 
6084 serge 9300
	if (INTEL_INFO(dev)->gen >= 4) {
9301
		if (val & DISPPLANE_TILED) {
9302
			plane_config->tiling = I915_TILING_X;
9303
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9304
		}
9305
	}
5060 serge 9306
 
9307
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6084 serge 9308
	fourcc = i9xx_format_to_fourcc(pixel_format);
9309
	fb->pixel_format = fourcc;
9310
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
5060 serge 9311
 
6084 serge 9312
	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
5060 serge 9313
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 9314
		offset = I915_READ(DSPOFFSET(pipe));
5060 serge 9315
	} else {
6084 serge 9316
		if (plane_config->tiling)
9317
			offset = I915_READ(DSPTILEOFF(pipe));
5060 serge 9318
		else
6084 serge 9319
			offset = I915_READ(DSPLINOFF(pipe));
5060 serge 9320
	}
9321
	plane_config->base = base;
9322
 
9323
	val = I915_READ(PIPESRC(pipe));
6084 serge 9324
	fb->width = ((val >> 16) & 0xfff) + 1;
9325
	fb->height = ((val >> 0) & 0xfff) + 1;
5060 serge 9326
 
9327
	val = I915_READ(DSPSTRIDE(pipe));
6283 serge 9328
	fb->pitches[0] = val & 0xffffffc0;
5060 serge 9329
 
6084 serge 9330
	aligned_height = intel_fb_align_height(dev, fb->height,
9331
					       fb->pixel_format,
9332
					       fb->modifier[0]);
5060 serge 9333
 
6283 serge 9334
	plane_config->size = fb->pitches[0] * aligned_height;
5060 serge 9335
 
6084 serge 9336
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9337
		      pipe_name(pipe), fb->width, fb->height,
9338
		      fb->bits_per_pixel, base, fb->pitches[0],
5060 serge 9339
		      plane_config->size);
6084 serge 9340
 
9341
	plane_config->fb = intel_fb;
5060 serge 9342
}
9343
 
3746 Serge 9344
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6084 serge 9345
				     struct intel_crtc_state *pipe_config)
3746 Serge 9346
{
9347
	struct drm_device *dev = crtc->base.dev;
9348
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 9349
	enum intel_display_power_domain power_domain;
3746 Serge 9350
	uint32_t tmp;
6937 serge 9351
	bool ret;
3746 Serge 9352
 
6937 serge 9353
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9354
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 9355
		return false;
9356
 
4104 Serge 9357
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9358
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9359
 
6937 serge 9360
	ret = false;
3746 Serge 9361
	tmp = I915_READ(PIPECONF(crtc->pipe));
9362
	if (!(tmp & PIPECONF_ENABLE))
6937 serge 9363
		goto out;
3746 Serge 9364
 
4280 Serge 9365
	switch (tmp & PIPECONF_BPC_MASK) {
9366
	case PIPECONF_6BPC:
9367
		pipe_config->pipe_bpp = 18;
9368
		break;
9369
	case PIPECONF_8BPC:
9370
		pipe_config->pipe_bpp = 24;
9371
		break;
9372
	case PIPECONF_10BPC:
9373
		pipe_config->pipe_bpp = 30;
9374
		break;
9375
	case PIPECONF_12BPC:
9376
		pipe_config->pipe_bpp = 36;
9377
		break;
9378
	default:
9379
		break;
9380
	}
9381
 
5060 serge 9382
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9383
		pipe_config->limited_color_range = true;
9384
 
4104 Serge 9385
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9386
		struct intel_shared_dpll *pll;
9387
 
3746 Serge 9388
		pipe_config->has_pch_encoder = true;
9389
 
4104 Serge 9390
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9391
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9392
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9393
 
9394
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9395
 
9396
		if (HAS_PCH_IBX(dev_priv->dev)) {
9397
			pipe_config->shared_dpll =
9398
				(enum intel_dpll_id) crtc->pipe;
9399
		} else {
9400
			tmp = I915_READ(PCH_DPLL_SEL);
9401
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9402
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9403
			else
9404
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9405
		}
9406
 
9407
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9408
 
9409
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9410
					   &pipe_config->dpll_hw_state));
9411
 
9412
		tmp = pipe_config->dpll_hw_state.dpll;
9413
		pipe_config->pixel_multiplier =
9414
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9415
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 9416
 
9417
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 9418
	} else {
9419
		pipe_config->pixel_multiplier = 1;
9420
	}
9421
 
9422
	intel_get_pipe_timings(crtc, pipe_config);
9423
 
9424
	ironlake_get_pfit_config(crtc, pipe_config);
9425
 
6937 serge 9426
	ret = true;
9427
 
9428
out:
9429
	intel_display_power_put(dev_priv, power_domain);
9430
 
9431
	return ret;
3746 Serge 9432
}
9433
 
4104 Serge 9434
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9435
{
9436
	struct drm_device *dev = dev_priv->dev;
9437
	struct intel_crtc *crtc;
9438
 
5060 serge 9439
	for_each_intel_crtc(dev, crtc)
6084 serge 9440
		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 9441
		     pipe_name(crtc->pipe));
9442
 
6084 serge 9443
	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9444
	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
6937 serge 9445
	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9446
	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
6084 serge 9447
	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9448
	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4104 Serge 9449
	     "CPU PWM1 enabled\n");
5060 serge 9450
	if (IS_HASWELL(dev))
6084 serge 9451
		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9452
		     "CPU PWM2 enabled\n");
9453
	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4104 Serge 9454
	     "PCH PWM1 enabled\n");
6084 serge 9455
	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4104 Serge 9456
	     "Utility pin enabled\n");
6084 serge 9457
	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
4104 Serge 9458
 
5060 serge 9459
	/*
9460
	 * In theory we can still leave IRQs enabled, as long as only the HPD
9461
	 * interrupts remain enabled. We used to check for that, but since it's
9462
	 * gen-specific and since we only disable LCPLL after we fully disable
9463
	 * the interrupts, the check below should be enough.
9464
	 */
6084 serge 9465
	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4104 Serge 9466
}
9467
 
5060 serge 9468
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9469
{
9470
	struct drm_device *dev = dev_priv->dev;
9471
 
9472
	if (IS_HASWELL(dev))
9473
		return I915_READ(D_COMP_HSW);
9474
	else
9475
		return I915_READ(D_COMP_BDW);
9476
}
9477
 
9478
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9479
{
9480
	struct drm_device *dev = dev_priv->dev;
9481
 
9482
	if (IS_HASWELL(dev)) {
9483
		mutex_lock(&dev_priv->rps.hw_lock);
9484
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9485
					    val))
9486
			DRM_ERROR("Failed to write to D_COMP\n");
9487
		mutex_unlock(&dev_priv->rps.hw_lock);
9488
	} else {
9489
		I915_WRITE(D_COMP_BDW, val);
9490
		POSTING_READ(D_COMP_BDW);
9491
	}
9492
}
9493
 
4104 Serge 9494
/*
9495
 * This function implements pieces of two sequences from BSpec:
9496
 * - Sequence for display software to disable LCPLL
9497
 * - Sequence for display software to allow package C8+
9498
 * The steps implemented here are just the steps that actually touch the LCPLL
9499
 * register. Callers should take care of disabling all the display engine
9500
 * functions, doing the mode unset, fixing interrupts, etc.
9501
 */
4560 Serge 9502
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6084 serge 9503
			      bool switch_to_fclk, bool allow_power_down)
4104 Serge 9504
{
9505
	uint32_t val;
9506
 
9507
	assert_can_disable_lcpll(dev_priv);
9508
 
9509
	val = I915_READ(LCPLL_CTL);
9510
 
9511
	if (switch_to_fclk) {
9512
		val |= LCPLL_CD_SOURCE_FCLK;
9513
		I915_WRITE(LCPLL_CTL, val);
9514
 
9515
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9516
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9517
			DRM_ERROR("Switching to FCLK failed\n");
9518
 
9519
		val = I915_READ(LCPLL_CTL);
9520
	}
9521
 
9522
	val |= LCPLL_PLL_DISABLE;
9523
	I915_WRITE(LCPLL_CTL, val);
9524
	POSTING_READ(LCPLL_CTL);
9525
 
9526
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9527
		DRM_ERROR("LCPLL still locked\n");
9528
 
5060 serge 9529
	val = hsw_read_dcomp(dev_priv);
4104 Serge 9530
	val |= D_COMP_COMP_DISABLE;
5060 serge 9531
	hsw_write_dcomp(dev_priv, val);
9532
	ndelay(100);
4104 Serge 9533
 
5060 serge 9534
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9535
		     1))
4104 Serge 9536
		DRM_ERROR("D_COMP RCOMP still in progress\n");
9537
 
9538
	if (allow_power_down) {
9539
		val = I915_READ(LCPLL_CTL);
9540
		val |= LCPLL_POWER_DOWN_ALLOW;
9541
		I915_WRITE(LCPLL_CTL, val);
9542
		POSTING_READ(LCPLL_CTL);
9543
	}
9544
}
9545
 
9546
/*
9547
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9548
 * source.
9549
 */
4560 Serge 9550
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 9551
{
9552
	uint32_t val;
9553
 
9554
	val = I915_READ(LCPLL_CTL);
9555
 
9556
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9557
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9558
		return;
9559
 
5060 serge 9560
	/*
9561
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9562
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9563
	 */
6084 serge 9564
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4104 Serge 9565
 
9566
	if (val & LCPLL_POWER_DOWN_ALLOW) {
9567
		val &= ~LCPLL_POWER_DOWN_ALLOW;
9568
		I915_WRITE(LCPLL_CTL, val);
9569
		POSTING_READ(LCPLL_CTL);
9570
	}
9571
 
5060 serge 9572
	val = hsw_read_dcomp(dev_priv);
4104 Serge 9573
	val |= D_COMP_COMP_FORCE;
9574
	val &= ~D_COMP_COMP_DISABLE;
5060 serge 9575
	hsw_write_dcomp(dev_priv, val);
4104 Serge 9576
 
9577
	val = I915_READ(LCPLL_CTL);
9578
	val &= ~LCPLL_PLL_DISABLE;
9579
	I915_WRITE(LCPLL_CTL, val);
9580
 
9581
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9582
		DRM_ERROR("LCPLL not locked yet\n");
9583
 
9584
	if (val & LCPLL_CD_SOURCE_FCLK) {
9585
		val = I915_READ(LCPLL_CTL);
9586
		val &= ~LCPLL_CD_SOURCE_FCLK;
9587
		I915_WRITE(LCPLL_CTL, val);
9588
 
9589
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9590
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9591
			DRM_ERROR("Switching back to LCPLL failed\n");
9592
	}
9593
 
6084 serge 9594
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9595
	intel_update_cdclk(dev_priv->dev);
4104 Serge 9596
}
9597
 
5060 serge 9598
/*
9599
 * Package states C8 and deeper are really deep PC states that can only be
9600
 * reached when all the devices on the system allow it, so even if the graphics
9601
 * device allows PC8+, it doesn't mean the system will actually get to these
9602
 * states. Our driver only allows PC8+ when going into runtime PM.
9603
 *
9604
 * The requirements for PC8+ are that all the outputs are disabled, the power
9605
 * well is disabled and most interrupts are disabled, and these are also
9606
 * requirements for runtime PM. When these conditions are met, we manually do
9607
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9608
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9609
 * hang the machine.
9610
 *
9611
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9612
 * the state of some registers, so when we come back from PC8+ we need to
9613
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9614
 * need to take care of the registers kept by RC6. Notice that this happens even
9615
 * if we don't put the device in PCI D3 state (which is what currently happens
9616
 * because of the runtime PM support).
9617
 *
9618
 * For more, read "Display Sequences for Package C8" on the hardware
9619
 * documentation.
9620
 */
9621
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 9622
{
9623
	struct drm_device *dev = dev_priv->dev;
9624
	uint32_t val;
9625
 
9626
	DRM_DEBUG_KMS("Enabling package C8+\n");
9627
 
6084 serge 9628
	if (HAS_PCH_LPT_LP(dev)) {
4104 Serge 9629
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9630
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9631
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9632
	}
9633
 
9634
	lpt_disable_clkout_dp(dev);
9635
	hsw_disable_lcpll(dev_priv, true, true);
9636
}
9637
 
5060 serge 9638
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 9639
{
9640
	struct drm_device *dev = dev_priv->dev;
9641
	uint32_t val;
9642
 
9643
	DRM_DEBUG_KMS("Disabling package C8+\n");
9644
 
9645
	hsw_restore_lcpll(dev_priv);
9646
	lpt_init_pch_refclk(dev);
9647
 
6084 serge 9648
	if (HAS_PCH_LPT_LP(dev)) {
4104 Serge 9649
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9650
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9651
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9652
	}
9653
}
9654
 
6084 serge 9655
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
4104 Serge 9656
{
6084 serge 9657
	struct drm_device *dev = old_state->dev;
7144 serge 9658
	struct intel_atomic_state *old_intel_state =
9659
		to_intel_atomic_state(old_state);
9660
	unsigned int req_cdclk = old_intel_state->dev_cdclk;
6084 serge 9661
 
9662
	broxton_set_cdclk(dev, req_cdclk);
9663
}
9664
 
9665
/* compute the max rate for new configuration */
9666
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9667
{
7144 serge 9668
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9669
	struct drm_i915_private *dev_priv = state->dev->dev_private;
9670
	struct drm_crtc *crtc;
9671
	struct drm_crtc_state *cstate;
6084 serge 9672
	struct intel_crtc_state *crtc_state;
7144 serge 9673
	unsigned max_pixel_rate = 0, i;
9674
	enum pipe pipe;
6084 serge 9675
 
7144 serge 9676
	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9677
	       sizeof(intel_state->min_pixclk));
9678
 
9679
	for_each_crtc_in_state(state, crtc, cstate, i) {
6084 serge 9680
		int pixel_rate;
9681
 
7144 serge 9682
		crtc_state = to_intel_crtc_state(cstate);
9683
		if (!crtc_state->base.enable) {
9684
			intel_state->min_pixclk[i] = 0;
6084 serge 9685
			continue;
7144 serge 9686
		}
6084 serge 9687
 
9688
		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9689
 
9690
		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7144 serge 9691
		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
6084 serge 9692
			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9693
 
7144 serge 9694
		intel_state->min_pixclk[i] = pixel_rate;
6084 serge 9695
	}
9696
 
7144 serge 9697
	for_each_pipe(dev_priv, pipe)
9698
		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9699
 
6084 serge 9700
	return max_pixel_rate;
9701
}
9702
 
9703
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9704
{
9705
	struct drm_i915_private *dev_priv = dev->dev_private;
9706
	uint32_t val, data;
9707
	int ret;
9708
 
9709
	if (WARN((I915_READ(LCPLL_CTL) &
9710
		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9711
		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9712
		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9713
		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9714
		 "trying to change cdclk frequency with cdclk not enabled\n"))
9715
		return;
9716
 
9717
	mutex_lock(&dev_priv->rps.hw_lock);
9718
	ret = sandybridge_pcode_write(dev_priv,
9719
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9720
	mutex_unlock(&dev_priv->rps.hw_lock);
9721
	if (ret) {
9722
		DRM_ERROR("failed to inform pcode about cdclk change\n");
9723
		return;
9724
	}
9725
 
9726
	val = I915_READ(LCPLL_CTL);
9727
	val |= LCPLL_CD_SOURCE_FCLK;
9728
	I915_WRITE(LCPLL_CTL, val);
9729
 
9730
	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9731
			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9732
		DRM_ERROR("Switching to FCLK failed\n");
9733
 
9734
	val = I915_READ(LCPLL_CTL);
9735
	val &= ~LCPLL_CLK_FREQ_MASK;
9736
 
9737
	switch (cdclk) {
9738
	case 450000:
9739
		val |= LCPLL_CLK_FREQ_450;
9740
		data = 0;
9741
		break;
9742
	case 540000:
9743
		val |= LCPLL_CLK_FREQ_54O_BDW;
9744
		data = 1;
9745
		break;
9746
	case 337500:
9747
		val |= LCPLL_CLK_FREQ_337_5_BDW;
9748
		data = 2;
9749
		break;
9750
	case 675000:
9751
		val |= LCPLL_CLK_FREQ_675_BDW;
9752
		data = 3;
9753
		break;
9754
	default:
9755
		WARN(1, "invalid cdclk frequency\n");
9756
		return;
9757
	}
9758
 
9759
	I915_WRITE(LCPLL_CTL, val);
9760
 
9761
	val = I915_READ(LCPLL_CTL);
9762
	val &= ~LCPLL_CD_SOURCE_FCLK;
9763
	I915_WRITE(LCPLL_CTL, val);
9764
 
9765
	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9766
				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9767
		DRM_ERROR("Switching back to LCPLL failed\n");
9768
 
9769
	mutex_lock(&dev_priv->rps.hw_lock);
9770
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9771
	mutex_unlock(&dev_priv->rps.hw_lock);
9772
 
6660 serge 9773
	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9774
 
6084 serge 9775
	intel_update_cdclk(dev);
9776
 
9777
	WARN(cdclk != dev_priv->cdclk_freq,
9778
	     "cdclk requested %d kHz but got %d kHz\n",
9779
	     cdclk, dev_priv->cdclk_freq);
9780
}
9781
 
9782
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9783
{
9784
	struct drm_i915_private *dev_priv = to_i915(state->dev);
7144 serge 9785
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6084 serge 9786
	int max_pixclk = ilk_max_pixel_rate(state);
9787
	int cdclk;
9788
 
9789
	/*
9790
	 * FIXME should also account for plane ratio
9791
	 * once 64bpp pixel formats are supported.
9792
	 */
9793
	if (max_pixclk > 540000)
9794
		cdclk = 675000;
9795
	else if (max_pixclk > 450000)
9796
		cdclk = 540000;
9797
	else if (max_pixclk > 337500)
9798
		cdclk = 450000;
9799
	else
9800
		cdclk = 337500;
9801
 
9802
	if (cdclk > dev_priv->max_cdclk_freq) {
6937 serge 9803
		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
7144 serge 9804
			      cdclk, dev_priv->max_cdclk_freq);
6937 serge 9805
		return -EINVAL;
6084 serge 9806
	}
9807
 
7144 serge 9808
	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9809
	if (!intel_state->active_crtcs)
9810
		intel_state->dev_cdclk = 337500;
6084 serge 9811
 
9812
	return 0;
9813
}
9814
 
9815
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9816
{
9817
	struct drm_device *dev = old_state->dev;
7144 serge 9818
	struct intel_atomic_state *old_intel_state =
9819
		to_intel_atomic_state(old_state);
9820
	unsigned req_cdclk = old_intel_state->dev_cdclk;
6084 serge 9821
 
9822
	broadwell_set_cdclk(dev, req_cdclk);
9823
}
9824
 
9825
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9826
				      struct intel_crtc_state *crtc_state)
9827
{
7144 serge 9828
	struct intel_encoder *intel_encoder =
9829
		intel_ddi_get_crtc_new_encoder(crtc_state);
5354 serge 9830
 
7144 serge 9831
	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9832
		if (!intel_ddi_pll_select(crtc, crtc_state))
9833
			return -EINVAL;
9834
	}
9835
 
5354 serge 9836
	crtc->lowfreq_avail = false;
9837
 
9838
	return 0;
4104 Serge 9839
}
9840
 
6084 serge 9841
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9842
				enum port port,
9843
				struct intel_crtc_state *pipe_config)
9844
{
9845
	switch (port) {
9846
	case PORT_A:
9847
		pipe_config->ddi_pll_sel = SKL_DPLL0;
9848
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9849
		break;
9850
	case PORT_B:
9851
		pipe_config->ddi_pll_sel = SKL_DPLL1;
9852
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9853
		break;
9854
	case PORT_C:
9855
		pipe_config->ddi_pll_sel = SKL_DPLL2;
9856
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9857
		break;
9858
	default:
9859
		DRM_ERROR("Incorrect port type\n");
9860
	}
9861
}
9862
 
5354 serge 9863
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9864
				enum port port,
6084 serge 9865
				struct intel_crtc_state *pipe_config)
4104 Serge 9866
{
6084 serge 9867
	u32 temp, dpll_ctl1;
5354 serge 9868
 
9869
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9870
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9871
 
9872
	switch (pipe_config->ddi_pll_sel) {
6084 serge 9873
	case SKL_DPLL0:
9874
		/*
9875
		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9876
		 * of the shared DPLL framework and thus needs to be read out
9877
		 * separately
9878
		 */
9879
		dpll_ctl1 = I915_READ(DPLL_CTRL1);
9880
		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9881
		break;
5354 serge 9882
	case SKL_DPLL1:
9883
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9884
		break;
9885
	case SKL_DPLL2:
9886
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9887
		break;
9888
	case SKL_DPLL3:
9889
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9890
		break;
9891
	}
4104 Serge 9892
}
9893
 
5354 serge 9894
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9895
				enum port port,
6084 serge 9896
				struct intel_crtc_state *pipe_config)
4104 Serge 9897
{
5354 serge 9898
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
4104 Serge 9899
 
5354 serge 9900
	switch (pipe_config->ddi_pll_sel) {
9901
	case PORT_CLK_SEL_WRPLL1:
9902
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9903
		break;
9904
	case PORT_CLK_SEL_WRPLL2:
9905
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9906
		break;
6084 serge 9907
	case PORT_CLK_SEL_SPLL:
9908
		pipe_config->shared_dpll = DPLL_ID_SPLL;
6937 serge 9909
		break;
5354 serge 9910
	}
4104 Serge 9911
}
9912
 
5060 serge 9913
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
6084 serge 9914
				       struct intel_crtc_state *pipe_config)
4104 Serge 9915
{
5060 serge 9916
	struct drm_device *dev = crtc->base.dev;
4104 Serge 9917
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 9918
	struct intel_shared_dpll *pll;
9919
	enum port port;
9920
	uint32_t tmp;
4104 Serge 9921
 
5060 serge 9922
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4560 Serge 9923
 
5060 serge 9924
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
4104 Serge 9925
 
6937 serge 9926
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5354 serge 9927
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
6084 serge 9928
	else if (IS_BROXTON(dev))
9929
		bxt_get_ddi_pll(dev_priv, port, pipe_config);
5354 serge 9930
	else
9931
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
4104 Serge 9932
 
5060 serge 9933
	if (pipe_config->shared_dpll >= 0) {
9934
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
4560 Serge 9935
 
5060 serge 9936
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9937
					   &pipe_config->dpll_hw_state));
4104 Serge 9938
	}
9939
 
4560 Serge 9940
	/*
5060 serge 9941
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9942
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9943
	 * the PCH transcoder is on.
4560 Serge 9944
	 */
5354 serge 9945
	if (INTEL_INFO(dev)->gen < 9 &&
9946
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
5060 serge 9947
		pipe_config->has_pch_encoder = true;
4560 Serge 9948
 
5060 serge 9949
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9950
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9951
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3480 Serge 9952
 
5060 serge 9953
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
3480 Serge 9954
	}
4560 Serge 9955
}
9956
 
3746 Serge 9957
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6084 serge 9958
				    struct intel_crtc_state *pipe_config)
3746 Serge 9959
{
9960
	struct drm_device *dev = crtc->base.dev;
9961
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 9962
	enum intel_display_power_domain power_domain;
9963
	unsigned long power_domain_mask;
3746 Serge 9964
	uint32_t tmp;
6937 serge 9965
	bool ret;
3746 Serge 9966
 
6937 serge 9967
	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9968
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 9969
		return false;
6937 serge 9970
	power_domain_mask = BIT(power_domain);
5060 serge 9971
 
6937 serge 9972
	ret = false;
9973
 
4104 Serge 9974
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9975
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9976
 
9977
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9978
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9979
		enum pipe trans_edp_pipe;
9980
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9981
		default:
9982
			WARN(1, "unknown pipe linked to edp transcoder\n");
9983
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9984
		case TRANS_DDI_EDP_INPUT_A_ON:
9985
			trans_edp_pipe = PIPE_A;
9986
			break;
9987
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9988
			trans_edp_pipe = PIPE_B;
9989
			break;
9990
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9991
			trans_edp_pipe = PIPE_C;
9992
			break;
9993
		}
9994
 
9995
		if (trans_edp_pipe == crtc->pipe)
9996
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9997
	}
9998
 
6937 serge 9999
	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10000
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10001
		goto out;
10002
	power_domain_mask |= BIT(power_domain);
4104 Serge 10003
 
10004
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 10005
	if (!(tmp & PIPECONF_ENABLE))
6937 serge 10006
		goto out;
3746 Serge 10007
 
5060 serge 10008
	haswell_get_ddi_port_state(crtc, pipe_config);
3746 Serge 10009
 
4104 Serge 10010
	intel_get_pipe_timings(crtc, pipe_config);
10011
 
6084 serge 10012
	if (INTEL_INFO(dev)->gen >= 9) {
10013
		skl_init_scalers(dev, crtc, pipe_config);
10014
	}
10015
 
10016
	if (INTEL_INFO(dev)->gen >= 9) {
10017
		pipe_config->scaler_state.scaler_id = -1;
10018
		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10019
	}
10020
 
6937 serge 10021
	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10022
	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10023
		power_domain_mask |= BIT(power_domain);
6084 serge 10024
		if (INTEL_INFO(dev)->gen >= 9)
5354 serge 10025
			skylake_get_pfit_config(crtc, pipe_config);
10026
		else
6084 serge 10027
			ironlake_get_pfit_config(crtc, pipe_config);
5354 serge 10028
	}
4104 Serge 10029
 
4560 Serge 10030
	if (IS_HASWELL(dev))
6084 serge 10031
		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10032
			(I915_READ(IPS_CTL) & IPS_ENABLE);
4104 Serge 10033
 
5354 serge 10034
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
10035
		pipe_config->pixel_multiplier =
10036
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10037
	} else {
6084 serge 10038
		pipe_config->pixel_multiplier = 1;
4560 Serge 10039
	}
10040
 
6937 serge 10041
	ret = true;
10042
 
10043
out:
10044
	for_each_power_domain(power_domain, power_domain_mask)
10045
		intel_display_power_put(dev_priv, power_domain);
10046
 
10047
	return ret;
2342 Serge 10048
}
10049
 
7144 serge 10050
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10051
			       const struct intel_plane_state *plane_state)
2342 Serge 10052
{
5354 serge 10053
	struct drm_device *dev = crtc->dev;
10054
	struct drm_i915_private *dev_priv = dev->dev_private;
10055
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10056
	uint32_t cntl = 0, size = 0;
2342 Serge 10057
 
7144 serge 10058
	if (plane_state && plane_state->visible) {
10059
		unsigned int width = plane_state->base.crtc_w;
10060
		unsigned int height = plane_state->base.crtc_h;
5354 serge 10061
		unsigned int stride = roundup_pow_of_two(width) * 4;
2342 Serge 10062
 
5354 serge 10063
		switch (stride) {
10064
		default:
10065
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10066
				  width, stride);
10067
			stride = 256;
10068
			/* fallthrough */
10069
		case 256:
10070
		case 512:
10071
		case 1024:
10072
		case 2048:
10073
			break;
6084 serge 10074
		}
3031 serge 10075
 
5354 serge 10076
		cntl |= CURSOR_ENABLE |
10077
			CURSOR_GAMMA_ENABLE |
10078
			CURSOR_FORMAT_ARGB |
10079
			CURSOR_STRIDE(stride);
3031 serge 10080
 
5354 serge 10081
		size = (height << 12) | width;
2342 Serge 10082
	}
10083
 
5354 serge 10084
	if (intel_crtc->cursor_cntl != 0 &&
10085
	    (intel_crtc->cursor_base != base ||
10086
	     intel_crtc->cursor_size != size ||
10087
	     intel_crtc->cursor_cntl != cntl)) {
10088
		/* On these chipsets we can only modify the base/size/stride
10089
		 * whilst the cursor is disabled.
3031 serge 10090
		 */
6084 serge 10091
		I915_WRITE(CURCNTR(PIPE_A), 0);
10092
		POSTING_READ(CURCNTR(PIPE_A));
10093
		intel_crtc->cursor_cntl = 0;
10094
	}
5060 serge 10095
 
5354 serge 10096
	if (intel_crtc->cursor_base != base) {
6084 serge 10097
		I915_WRITE(CURBASE(PIPE_A), base);
5354 serge 10098
		intel_crtc->cursor_base = base;
5060 serge 10099
	}
2327 Serge 10100
 
5354 serge 10101
	if (intel_crtc->cursor_size != size) {
10102
		I915_WRITE(CURSIZE, size);
10103
		intel_crtc->cursor_size = size;
10104
	}
10105
 
5060 serge 10106
	if (intel_crtc->cursor_cntl != cntl) {
6084 serge 10107
		I915_WRITE(CURCNTR(PIPE_A), cntl);
10108
		POSTING_READ(CURCNTR(PIPE_A));
5060 serge 10109
		intel_crtc->cursor_cntl = cntl;
10110
	}
3031 serge 10111
}
2327 Serge 10112
 
7144 serge 10113
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10114
			       const struct intel_plane_state *plane_state)
3031 serge 10115
{
10116
	struct drm_device *dev = crtc->dev;
10117
	struct drm_i915_private *dev_priv = dev->dev_private;
10118
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10119
	int pipe = intel_crtc->pipe;
6084 serge 10120
	uint32_t cntl = 0;
2327 Serge 10121
 
7144 serge 10122
	if (plane_state && plane_state->visible) {
5060 serge 10123
		cntl = MCURSOR_GAMMA_ENABLE;
7144 serge 10124
		switch (plane_state->base.crtc_w) {
5060 serge 10125
			case 64:
10126
				cntl |= CURSOR_MODE_64_ARGB_AX;
10127
				break;
10128
			case 128:
10129
				cntl |= CURSOR_MODE_128_ARGB_AX;
10130
				break;
10131
			case 256:
10132
				cntl |= CURSOR_MODE_256_ARGB_AX;
10133
				break;
10134
			default:
7144 serge 10135
				MISSING_CASE(plane_state->base.crtc_w);
5060 serge 10136
				return;
6084 serge 10137
		}
10138
		cntl |= pipe << 28; /* Connect to correct pipe */
2327 Serge 10139
 
6084 serge 10140
		if (HAS_DDI(dev))
3480 Serge 10141
			cntl |= CURSOR_PIPE_CSC_ENABLE;
7144 serge 10142
 
10143
		if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10144
			cntl |= CURSOR_ROTATE_180;
5354 serge 10145
	}
5060 serge 10146
 
10147
	if (intel_crtc->cursor_cntl != cntl) {
10148
		I915_WRITE(CURCNTR(pipe), cntl);
10149
		POSTING_READ(CURCNTR(pipe));
10150
		intel_crtc->cursor_cntl = cntl;
6084 serge 10151
	}
2327 Serge 10152
 
3031 serge 10153
	/* and commit changes on next vblank */
5060 serge 10154
	I915_WRITE(CURBASE(pipe), base);
10155
	POSTING_READ(CURBASE(pipe));
5354 serge 10156
 
10157
	intel_crtc->cursor_base = base;
3031 serge 10158
}
2327 Serge 10159
 
3031 serge 10160
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5060 serge 10161
void intel_crtc_update_cursor(struct drm_crtc *crtc,
7144 serge 10162
				     const struct intel_plane_state *plane_state)
3031 serge 10163
{
10164
	struct drm_device *dev = crtc->dev;
10165
	struct drm_i915_private *dev_priv = dev->dev_private;
10166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10167
	int pipe = intel_crtc->pipe;
7144 serge 10168
	u32 base = intel_crtc->cursor_addr;
10169
	u32 pos = 0;
2327 Serge 10170
 
7144 serge 10171
	if (plane_state) {
10172
		int x = plane_state->base.crtc_x;
10173
		int y = plane_state->base.crtc_y;
2327 Serge 10174
 
7144 serge 10175
		if (x < 0) {
10176
			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10177
			x = -x;
10178
		}
10179
		pos |= x << CURSOR_X_SHIFT;
2327 Serge 10180
 
7144 serge 10181
		if (y < 0) {
10182
			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10183
			y = -y;
10184
		}
10185
		pos |= y << CURSOR_Y_SHIFT;
2327 Serge 10186
 
7144 serge 10187
		/* ILK+ do this automagically */
10188
		if (HAS_GMCH_DISPLAY(dev) &&
10189
		    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10190
			base += (plane_state->base.crtc_h *
10191
				 plane_state->base.crtc_w - 1) * 4;
10192
		}
3031 serge 10193
	}
2327 Serge 10194
 
5060 serge 10195
	I915_WRITE(CURPOS(pipe), pos);
10196
 
5354 serge 10197
	if (IS_845G(dev) || IS_I865G(dev))
7144 serge 10198
		i845_update_cursor(crtc, base, plane_state);
5060 serge 10199
	else
7144 serge 10200
		i9xx_update_cursor(crtc, base, plane_state);
3031 serge 10201
}
2327 Serge 10202
 
5354 serge 10203
static bool cursor_size_ok(struct drm_device *dev,
10204
			   uint32_t width, uint32_t height)
10205
{
10206
	if (width == 0 || height == 0)
10207
		return false;
10208
 
10209
	/*
10210
	 * 845g/865g are special in that they are only limited by
10211
	 * the width of their cursors, the height is arbitrary up to
10212
	 * the precision of the register. Everything else requires
10213
	 * square cursors, limited to a few power-of-two sizes.
6084 serge 10214
	 */
5354 serge 10215
	if (IS_845G(dev) || IS_I865G(dev)) {
10216
		if ((width & 63) != 0)
10217
			return false;
10218
 
10219
		if (width > (IS_845G(dev) ? 64 : 512))
10220
			return false;
10221
 
10222
		if (height > 1023)
10223
			return false;
10224
	} else {
10225
		switch (width | height) {
10226
		case 256:
10227
		case 128:
10228
			if (IS_GEN2(dev))
10229
				return false;
10230
		case 64:
10231
			break;
10232
		default:
10233
			return false;
10234
		}
10235
	}
10236
 
10237
	return true;
10238
}
10239
 
2330 Serge 10240
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10241
				 u16 *blue, uint32_t start, uint32_t size)
10242
{
10243
	int end = (start + size > 256) ? 256 : start + size, i;
10244
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 10245
 
2330 Serge 10246
	for (i = start; i < end; i++) {
10247
		intel_crtc->lut_r[i] = red[i] >> 8;
10248
		intel_crtc->lut_g[i] = green[i] >> 8;
10249
		intel_crtc->lut_b[i] = blue[i] >> 8;
10250
	}
2327 Serge 10251
 
2330 Serge 10252
	intel_crtc_load_lut(crtc);
10253
}
2327 Serge 10254
 
2330 Serge 10255
/* VESA 640x480x72Hz mode to set on the pipe */
10256
static struct drm_display_mode load_detect_mode = {
10257
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10258
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10259
};
2327 Serge 10260
 
4560 Serge 10261
struct drm_framebuffer *
5060 serge 10262
__intel_framebuffer_create(struct drm_device *dev,
6084 serge 10263
			   struct drm_mode_fb_cmd2 *mode_cmd,
10264
			   struct drm_i915_gem_object *obj)
3031 serge 10265
{
10266
	struct intel_framebuffer *intel_fb;
10267
	int ret;
2327 Serge 10268
 
3031 serge 10269
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6937 serge 10270
	if (!intel_fb)
3031 serge 10271
		return ERR_PTR(-ENOMEM);
2327 Serge 10272
 
3031 serge 10273
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 10274
	if (ret)
10275
		goto err;
10276
 
10277
	return &intel_fb->base;
6937 serge 10278
 
4560 Serge 10279
err:
6084 serge 10280
	kfree(intel_fb);
10281
	return ERR_PTR(ret);
3031 serge 10282
}
2327 Serge 10283
 
5060 serge 10284
static struct drm_framebuffer *
10285
intel_framebuffer_create(struct drm_device *dev,
10286
			 struct drm_mode_fb_cmd2 *mode_cmd,
10287
			 struct drm_i915_gem_object *obj)
10288
{
10289
	struct drm_framebuffer *fb;
10290
	int ret;
10291
 
10292
	ret = i915_mutex_lock_interruptible(dev);
10293
	if (ret)
10294
		return ERR_PTR(ret);
10295
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10296
	mutex_unlock(&dev->struct_mutex);
10297
 
10298
	return fb;
10299
}
10300
 
2330 Serge 10301
static u32
10302
intel_framebuffer_pitch_for_width(int width, int bpp)
10303
{
10304
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10305
	return ALIGN(pitch, 64);
10306
}
2327 Serge 10307
 
2330 Serge 10308
static u32
10309
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10310
{
10311
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5060 serge 10312
	return PAGE_ALIGN(pitch * mode->vdisplay);
2330 Serge 10313
}
2327 Serge 10314
 
2330 Serge 10315
static struct drm_framebuffer *
10316
intel_framebuffer_create_for_mode(struct drm_device *dev,
10317
				  struct drm_display_mode *mode,
10318
				  int depth, int bpp)
10319
{
6937 serge 10320
	struct drm_framebuffer *fb;
2330 Serge 10321
	struct drm_i915_gem_object *obj;
3243 Serge 10322
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 10323
 
5060 serge 10324
	obj = i915_gem_alloc_object(dev,
10325
				    intel_framebuffer_size_for_mode(mode, bpp));
10326
	if (obj == NULL)
10327
		return ERR_PTR(-ENOMEM);
10328
 
10329
	mode_cmd.width = mode->hdisplay;
10330
	mode_cmd.height = mode->vdisplay;
10331
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10332
								bpp);
10333
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10334
 
6937 serge 10335
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10336
	if (IS_ERR(fb))
10337
		drm_gem_object_unreference_unlocked(&obj->base);
10338
 
10339
	return fb;
2330 Serge 10340
}
2327 Serge 10341
 
2330 Serge 10342
static struct drm_framebuffer *
10343
mode_fits_in_fbdev(struct drm_device *dev,
10344
		   struct drm_display_mode *mode)
10345
{
6084 serge 10346
#ifdef CONFIG_DRM_FBDEV_EMULATION
2330 Serge 10347
	struct drm_i915_private *dev_priv = dev->dev_private;
10348
	struct drm_i915_gem_object *obj;
10349
	struct drm_framebuffer *fb;
2327 Serge 10350
 
5060 serge 10351
	if (!dev_priv->fbdev)
4280 Serge 10352
		return NULL;
2327 Serge 10353
 
5060 serge 10354
	if (!dev_priv->fbdev->fb)
2330 Serge 10355
		return NULL;
2327 Serge 10356
 
5060 serge 10357
	obj = dev_priv->fbdev->fb->obj;
10358
	BUG_ON(!obj);
10359
 
10360
	fb = &dev_priv->fbdev->fb->base;
3031 serge 10361
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10362
							       fb->bits_per_pixel))
4280 Serge 10363
		return NULL;
2327 Serge 10364
 
3031 serge 10365
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10366
		return NULL;
10367
 
7144 serge 10368
	drm_framebuffer_reference(fb);
4280 Serge 10369
	return fb;
4560 Serge 10370
#else
10371
	return NULL;
10372
#endif
2330 Serge 10373
}
2327 Serge 10374
 
6084 serge 10375
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10376
					   struct drm_crtc *crtc,
10377
					   struct drm_display_mode *mode,
10378
					   struct drm_framebuffer *fb,
10379
					   int x, int y)
10380
{
10381
	struct drm_plane_state *plane_state;
10382
	int hdisplay, vdisplay;
10383
	int ret;
10384
 
10385
	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10386
	if (IS_ERR(plane_state))
10387
		return PTR_ERR(plane_state);
10388
 
10389
	if (mode)
10390
		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10391
	else
10392
		hdisplay = vdisplay = 0;
10393
 
10394
	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10395
	if (ret)
10396
		return ret;
10397
	drm_atomic_set_fb_for_plane(plane_state, fb);
10398
	plane_state->crtc_x = 0;
10399
	plane_state->crtc_y = 0;
10400
	plane_state->crtc_w = hdisplay;
10401
	plane_state->crtc_h = vdisplay;
10402
	plane_state->src_x = x << 16;
10403
	plane_state->src_y = y << 16;
10404
	plane_state->src_w = hdisplay << 16;
10405
	plane_state->src_h = vdisplay << 16;
10406
 
10407
	return 0;
10408
}
10409
 
3031 serge 10410
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 10411
				struct drm_display_mode *mode,
5060 serge 10412
				struct intel_load_detect_pipe *old,
10413
				struct drm_modeset_acquire_ctx *ctx)
2330 Serge 10414
{
10415
	struct intel_crtc *intel_crtc;
3031 serge 10416
	struct intel_encoder *intel_encoder =
10417
		intel_attached_encoder(connector);
2330 Serge 10418
	struct drm_crtc *possible_crtc;
10419
	struct drm_encoder *encoder = &intel_encoder->base;
10420
	struct drm_crtc *crtc = NULL;
10421
	struct drm_device *dev = encoder->dev;
3031 serge 10422
	struct drm_framebuffer *fb;
5060 serge 10423
	struct drm_mode_config *config = &dev->mode_config;
7144 serge 10424
	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6084 serge 10425
	struct drm_connector_state *connector_state;
10426
	struct intel_crtc_state *crtc_state;
5060 serge 10427
	int ret, i = -1;
2327 Serge 10428
 
2330 Serge 10429
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 10430
		      connector->base.id, connector->name,
10431
		      encoder->base.id, encoder->name);
2327 Serge 10432
 
7144 serge 10433
	old->restore_state = NULL;
10434
 
5060 serge 10435
retry:
10436
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10437
	if (ret)
6084 serge 10438
		goto fail;
5060 serge 10439
 
2330 Serge 10440
	/*
10441
	 * Algorithm gets a little messy:
10442
	 *
10443
	 *   - if the connector already has an assigned crtc, use it (but make
10444
	 *     sure it's on first)
10445
	 *
10446
	 *   - try to find the first unused crtc that can drive this connector,
10447
	 *     and use that if we find one
10448
	 */
2327 Serge 10449
 
2330 Serge 10450
	/* See if we already have a CRTC for this connector */
7144 serge 10451
	if (connector->state->crtc) {
10452
		crtc = connector->state->crtc;
2327 Serge 10453
 
5060 serge 10454
		ret = drm_modeset_lock(&crtc->mutex, ctx);
10455
		if (ret)
6084 serge 10456
			goto fail;
3480 Serge 10457
 
2330 Serge 10458
		/* Make sure the crtc and connector are running */
7144 serge 10459
		goto found;
2330 Serge 10460
	}
2327 Serge 10461
 
2330 Serge 10462
	/* Find an unused one (if possible) */
5060 serge 10463
	for_each_crtc(dev, possible_crtc) {
2330 Serge 10464
		i++;
10465
		if (!(encoder->possible_crtcs & (1 << i)))
10466
			continue;
7144 serge 10467
 
10468
		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10469
		if (ret)
10470
			goto fail;
10471
 
10472
		if (possible_crtc->state->enable) {
10473
			drm_modeset_unlock(&possible_crtc->mutex);
5060 serge 10474
			continue;
7144 serge 10475
		}
5060 serge 10476
 
6084 serge 10477
		crtc = possible_crtc;
10478
		break;
10479
	}
2327 Serge 10480
 
2330 Serge 10481
	/*
10482
	 * If we didn't find an unused CRTC, don't use any.
10483
	 */
10484
	if (!crtc) {
10485
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6084 serge 10486
		goto fail;
2330 Serge 10487
	}
2327 Serge 10488
 
7144 serge 10489
found:
10490
	intel_crtc = to_intel_crtc(crtc);
10491
 
5354 serge 10492
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10493
	if (ret)
6084 serge 10494
		goto fail;
2327 Serge 10495
 
6084 serge 10496
	state = drm_atomic_state_alloc(dev);
7144 serge 10497
	restore_state = drm_atomic_state_alloc(dev);
10498
	if (!state || !restore_state) {
10499
		ret = -ENOMEM;
10500
		goto fail;
10501
	}
6084 serge 10502
 
10503
	state->acquire_ctx = ctx;
7144 serge 10504
	restore_state->acquire_ctx = ctx;
6084 serge 10505
 
10506
	connector_state = drm_atomic_get_connector_state(state, connector);
10507
	if (IS_ERR(connector_state)) {
10508
		ret = PTR_ERR(connector_state);
10509
		goto fail;
10510
	}
10511
 
7144 serge 10512
	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10513
	if (ret)
10514
		goto fail;
6084 serge 10515
 
10516
	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10517
	if (IS_ERR(crtc_state)) {
10518
		ret = PTR_ERR(crtc_state);
10519
		goto fail;
10520
	}
10521
 
10522
	crtc_state->base.active = crtc_state->base.enable = true;
10523
 
2330 Serge 10524
	if (!mode)
10525
		mode = &load_detect_mode;
2327 Serge 10526
 
2330 Serge 10527
	/* We need a framebuffer large enough to accommodate all accesses
10528
	 * that the plane may generate whilst we perform load detection.
10529
	 * We can not rely on the fbcon either being present (we get called
10530
	 * during its initialisation to detect all boot displays, or it may
10531
	 * not even exist) or that it is large enough to satisfy the
10532
	 * requested mode.
10533
	 */
3031 serge 10534
	fb = mode_fits_in_fbdev(dev, mode);
10535
	if (fb == NULL) {
2330 Serge 10536
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 10537
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
2330 Serge 10538
	} else
10539
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 10540
	if (IS_ERR(fb)) {
2330 Serge 10541
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5060 serge 10542
		goto fail;
2330 Serge 10543
	}
2327 Serge 10544
 
6084 serge 10545
	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10546
	if (ret)
10547
		goto fail;
10548
 
7144 serge 10549
	drm_framebuffer_unreference(fb);
6084 serge 10550
 
7144 serge 10551
	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10552
	if (ret)
10553
		goto fail;
10554
 
10555
	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10556
	if (!ret)
10557
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10558
	if (!ret)
10559
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10560
	if (ret) {
10561
		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10562
		goto fail;
10563
	}
10564
 
10565
	ret = drm_atomic_commit(state);
10566
	if (ret) {
2330 Serge 10567
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5060 serge 10568
		goto fail;
2330 Serge 10569
	}
2327 Serge 10570
 
7144 serge 10571
	old->restore_state = restore_state;
10572
 
2330 Serge 10573
	/* let the connector get through one full cycle before testing */
10574
	intel_wait_for_vblank(dev, intel_crtc->pipe);
10575
	return true;
5060 serge 10576
 
6084 serge 10577
fail:
10578
	drm_atomic_state_free(state);
7144 serge 10579
	drm_atomic_state_free(restore_state);
10580
	restore_state = state = NULL;
6084 serge 10581
 
5060 serge 10582
	if (ret == -EDEADLK) {
10583
		drm_modeset_backoff(ctx);
10584
		goto retry;
10585
	}
10586
 
10587
	return false;
2330 Serge 10588
}
2327 Serge 10589
 
3031 serge 10590
void intel_release_load_detect_pipe(struct drm_connector *connector,
6084 serge 10591
				    struct intel_load_detect_pipe *old,
10592
				    struct drm_modeset_acquire_ctx *ctx)
2330 Serge 10593
{
3031 serge 10594
	struct intel_encoder *intel_encoder =
10595
		intel_attached_encoder(connector);
2330 Serge 10596
	struct drm_encoder *encoder = &intel_encoder->base;
7144 serge 10597
	struct drm_atomic_state *state = old->restore_state;
6084 serge 10598
	int ret;
2327 Serge 10599
 
2330 Serge 10600
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 10601
		      connector->base.id, connector->name,
10602
		      encoder->base.id, encoder->name);
2327 Serge 10603
 
7144 serge 10604
	if (!state)
10605
		return;
3031 serge 10606
 
7144 serge 10607
	ret = drm_atomic_commit(state);
10608
	if (ret) {
10609
		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10610
		drm_atomic_state_free(state);
2330 Serge 10611
	}
10612
}
2327 Serge 10613
 
4560 Serge 10614
static int i9xx_pll_refclk(struct drm_device *dev,
6084 serge 10615
			   const struct intel_crtc_state *pipe_config)
4560 Serge 10616
{
10617
	struct drm_i915_private *dev_priv = dev->dev_private;
10618
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10619
 
10620
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10621
		return dev_priv->vbt.lvds_ssc_freq;
10622
	else if (HAS_PCH_SPLIT(dev))
10623
		return 120000;
10624
	else if (!IS_GEN2(dev))
10625
		return 96000;
10626
	else
10627
		return 48000;
10628
}
10629
 
2330 Serge 10630
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 10631
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 10632
				struct intel_crtc_state *pipe_config)
2330 Serge 10633
{
4104 Serge 10634
	struct drm_device *dev = crtc->base.dev;
2330 Serge 10635
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10636
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 10637
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 10638
	u32 fp;
10639
	intel_clock_t clock;
6084 serge 10640
	int port_clock;
4560 Serge 10641
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 10642
 
2330 Serge 10643
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 10644
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 10645
	else
4560 Serge 10646
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 10647
 
2330 Serge 10648
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10649
	if (IS_PINEVIEW(dev)) {
10650
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10651
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10652
	} else {
10653
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10654
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10655
	}
2327 Serge 10656
 
2330 Serge 10657
	if (!IS_GEN2(dev)) {
10658
		if (IS_PINEVIEW(dev))
10659
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10660
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10661
		else
10662
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10663
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 10664
 
2330 Serge 10665
		switch (dpll & DPLL_MODE_MASK) {
10666
		case DPLLB_MODE_DAC_SERIAL:
10667
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10668
				5 : 10;
10669
			break;
10670
		case DPLLB_MODE_LVDS:
10671
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10672
				7 : 14;
10673
			break;
10674
		default:
10675
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10676
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 10677
			return;
2330 Serge 10678
		}
2327 Serge 10679
 
4104 Serge 10680
		if (IS_PINEVIEW(dev))
6084 serge 10681
			port_clock = pnv_calc_dpll_params(refclk, &clock);
4104 Serge 10682
		else
6084 serge 10683
			port_clock = i9xx_calc_dpll_params(refclk, &clock);
2330 Serge 10684
	} else {
4560 Serge 10685
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10686
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 10687
 
2330 Serge 10688
		if (is_lvds) {
10689
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10690
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 10691
 
10692
			if (lvds & LVDS_CLKB_POWER_UP)
10693
				clock.p2 = 7;
10694
			else
6084 serge 10695
				clock.p2 = 14;
2330 Serge 10696
		} else {
10697
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10698
				clock.p1 = 2;
10699
			else {
10700
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10701
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10702
			}
10703
			if (dpll & PLL_P2_DIVIDE_BY_4)
10704
				clock.p2 = 4;
10705
			else
10706
				clock.p2 = 2;
4560 Serge 10707
		}
2327 Serge 10708
 
6084 serge 10709
		port_clock = i9xx_calc_dpll_params(refclk, &clock);
2330 Serge 10710
	}
2327 Serge 10711
 
4560 Serge 10712
	/*
10713
	 * This value includes pixel_multiplier. We will use
10714
	 * port_clock to compute adjusted_mode.crtc_clock in the
10715
	 * encoder's get_config() function.
10716
	 */
6084 serge 10717
	pipe_config->port_clock = port_clock;
4104 Serge 10718
}
10719
 
4560 Serge 10720
int intel_dotclock_calculate(int link_freq,
10721
			     const struct intel_link_m_n *m_n)
4104 Serge 10722
{
10723
	/*
10724
	 * The calculation for the data clock is:
4560 Serge 10725
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 10726
	 * But we want to avoid losing precison if possible, so:
4560 Serge 10727
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 10728
	 *
10729
	 * and the link clock is simpler:
4560 Serge 10730
	 * link_clock = (m * link_clock) / n
2330 Serge 10731
	 */
2327 Serge 10732
 
4560 Serge 10733
	if (!m_n->link_n)
10734
		return 0;
4104 Serge 10735
 
4560 Serge 10736
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10737
}
4104 Serge 10738
 
4560 Serge 10739
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
6084 serge 10740
				   struct intel_crtc_state *pipe_config)
4560 Serge 10741
{
10742
	struct drm_device *dev = crtc->base.dev;
4104 Serge 10743
 
4560 Serge 10744
	/* read out port_clock from the DPLL */
10745
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 10746
 
4560 Serge 10747
	/*
10748
	 * This value does not include pixel_multiplier.
10749
	 * We will check that port_clock and adjusted_mode.crtc_clock
10750
	 * agree once we know their relationship in the encoder's
10751
	 * get_config() function.
10752
	 */
6084 serge 10753
	pipe_config->base.adjusted_mode.crtc_clock =
4560 Serge 10754
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10755
					 &pipe_config->fdi_m_n);
2330 Serge 10756
}
2327 Serge 10757
 
2330 Serge 10758
/** Returns the currently programmed mode of the given pipe. */
10759
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10760
					     struct drm_crtc *crtc)
10761
{
10762
	struct drm_i915_private *dev_priv = dev->dev_private;
10763
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 10764
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
2330 Serge 10765
	struct drm_display_mode *mode;
7144 serge 10766
	struct intel_crtc_state *pipe_config;
3243 Serge 10767
	int htot = I915_READ(HTOTAL(cpu_transcoder));
10768
	int hsync = I915_READ(HSYNC(cpu_transcoder));
10769
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10770
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 10771
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 10772
 
2330 Serge 10773
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10774
	if (!mode)
10775
		return NULL;
10776
 
7144 serge 10777
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10778
	if (!pipe_config) {
10779
		kfree(mode);
10780
		return NULL;
10781
	}
10782
 
4104 Serge 10783
	/*
10784
	 * Construct a pipe_config sufficient for getting the clock info
10785
	 * back out of crtc_clock_get.
10786
	 *
10787
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10788
	 * to use a real value here instead.
10789
	 */
7144 serge 10790
	pipe_config->cpu_transcoder = (enum transcoder) pipe;
10791
	pipe_config->pixel_multiplier = 1;
10792
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10793
	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10794
	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10795
	i9xx_crtc_clock_get(intel_crtc, pipe_config);
4104 Serge 10796
 
7144 serge 10797
	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
2330 Serge 10798
	mode->hdisplay = (htot & 0xffff) + 1;
10799
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10800
	mode->hsync_start = (hsync & 0xffff) + 1;
10801
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10802
	mode->vdisplay = (vtot & 0xffff) + 1;
10803
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10804
	mode->vsync_start = (vsync & 0xffff) + 1;
10805
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10806
 
10807
	drm_mode_set_name(mode);
10808
 
7144 serge 10809
	kfree(pipe_config);
10810
 
2330 Serge 10811
	return mode;
10812
}
10813
 
3031 serge 10814
void intel_mark_busy(struct drm_device *dev)
10815
{
4104 Serge 10816
	struct drm_i915_private *dev_priv = dev->dev_private;
10817
 
5060 serge 10818
	if (dev_priv->mm.busy)
10819
		return;
10820
 
10821
	intel_runtime_pm_get(dev_priv);
4104 Serge 10822
	i915_update_gfx_val(dev_priv);
6084 serge 10823
	if (INTEL_INFO(dev)->gen >= 6)
10824
		gen6_rps_busy(dev_priv);
5060 serge 10825
	dev_priv->mm.busy = true;
3031 serge 10826
}
2327 Serge 10827
 
3031 serge 10828
void intel_mark_idle(struct drm_device *dev)
10829
{
4104 Serge 10830
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 10831
 
5060 serge 10832
	if (!dev_priv->mm.busy)
3031 serge 10833
		return;
2327 Serge 10834
 
5060 serge 10835
	dev_priv->mm.busy = false;
10836
 
10837
	if (INTEL_INFO(dev)->gen >= 6)
4560 Serge 10838
		gen6_rps_idle(dev->dev_private);
5060 serge 10839
 
10840
	intel_runtime_pm_put(dev_priv);
3031 serge 10841
}
2327 Serge 10842
 
2330 Serge 10843
static void intel_crtc_destroy(struct drm_crtc *crtc)
10844
{
10845
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10846
	struct drm_device *dev = crtc->dev;
10847
	struct intel_unpin_work *work;
2327 Serge 10848
 
5354 serge 10849
	spin_lock_irq(&dev->event_lock);
2330 Serge 10850
	work = intel_crtc->unpin_work;
10851
	intel_crtc->unpin_work = NULL;
5354 serge 10852
	spin_unlock_irq(&dev->event_lock);
2327 Serge 10853
 
2330 Serge 10854
	if (work) {
7144 serge 10855
		cancel_work_sync(&work->work);
2330 Serge 10856
		kfree(work);
10857
	}
2327 Serge 10858
 
2330 Serge 10859
	drm_crtc_cleanup(crtc);
2327 Serge 10860
 
2330 Serge 10861
	kfree(intel_crtc);
10862
}
2327 Serge 10863
 
3031 serge 10864
static void intel_unpin_work_fn(struct work_struct *__work)
10865
{
10866
	struct intel_unpin_work *work =
10867
		container_of(__work, struct intel_unpin_work, work);
6084 serge 10868
	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10869
	struct drm_device *dev = crtc->base.dev;
10870
	struct drm_plane *primary = crtc->base.primary;
2327 Serge 10871
 
3243 Serge 10872
	mutex_lock(&dev->struct_mutex);
6084 serge 10873
	intel_unpin_fb_obj(work->old_fb, primary->state);
3031 serge 10874
	drm_gem_object_unreference(&work->pending_flip_obj->base);
2327 Serge 10875
 
6084 serge 10876
	if (work->flip_queued_req)
10877
		i915_gem_request_assign(&work->flip_queued_req, NULL);
3243 Serge 10878
	mutex_unlock(&dev->struct_mutex);
10879
 
6084 serge 10880
	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
7144 serge 10881
	intel_fbc_post_update(crtc);
6084 serge 10882
	drm_framebuffer_unreference(work->old_fb);
5354 serge 10883
 
6084 serge 10884
	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10885
	atomic_dec(&crtc->unpin_work_count);
3243 Serge 10886
 
3031 serge 10887
	kfree(work);
10888
}
2327 Serge 10889
 
3031 serge 10890
static void do_intel_finish_page_flip(struct drm_device *dev,
10891
				      struct drm_crtc *crtc)
10892
{
10893
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10894
	struct intel_unpin_work *work;
10895
	unsigned long flags;
2327 Serge 10896
 
3031 serge 10897
	/* Ignore early vblank irqs */
10898
	if (intel_crtc == NULL)
10899
		return;
2327 Serge 10900
 
5354 serge 10901
	/*
10902
	 * This is called both by irq handlers and the reset code (to complete
10903
	 * lost pageflips) so needs the full irqsave spinlocks.
10904
	 */
3031 serge 10905
	spin_lock_irqsave(&dev->event_lock, flags);
10906
	work = intel_crtc->unpin_work;
3243 Serge 10907
 
10908
	/* Ensure we don't miss a work->pending update ... */
10909
	smp_rmb();
10910
 
10911
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 10912
		spin_unlock_irqrestore(&dev->event_lock, flags);
10913
		return;
10914
	}
2327 Serge 10915
 
5354 serge 10916
	page_flip_completed(intel_crtc);
3243 Serge 10917
 
3031 serge 10918
	spin_unlock_irqrestore(&dev->event_lock, flags);
10919
}
2327 Serge 10920
 
3031 serge 10921
void intel_finish_page_flip(struct drm_device *dev, int pipe)
10922
{
5060 serge 10923
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10924
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 10925
 
3031 serge 10926
	do_intel_finish_page_flip(dev, crtc);
10927
}
2327 Serge 10928
 
3031 serge 10929
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10930
{
5060 serge 10931
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10932
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 10933
 
3031 serge 10934
	do_intel_finish_page_flip(dev, crtc);
10935
}
2327 Serge 10936
 
5060 serge 10937
/* Is 'a' after or equal to 'b'? */
10938
static bool g4x_flip_count_after_eq(u32 a, u32 b)
10939
{
10940
	return !((a - b) & 0x80000000);
10941
}
10942
 
10943
static bool page_flip_finished(struct intel_crtc *crtc)
10944
{
10945
	struct drm_device *dev = crtc->base.dev;
10946
	struct drm_i915_private *dev_priv = dev->dev_private;
10947
 
5354 serge 10948
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10949
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10950
		return true;
10951
 
5060 serge 10952
	/*
10953
	 * The relevant registers doen't exist on pre-ctg.
10954
	 * As the flip done interrupt doesn't trigger for mmio
10955
	 * flips on gmch platforms, a flip count check isn't
10956
	 * really needed there. But since ctg has the registers,
10957
	 * include it in the check anyway.
10958
	 */
10959
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10960
		return true;
10961
 
10962
	/*
7144 serge 10963
	 * BDW signals flip done immediately if the plane
10964
	 * is disabled, even if the plane enable is already
10965
	 * armed to occur at the next vblank :(
10966
	 */
10967
 
10968
	/*
5060 serge 10969
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10970
	 * used the same base address. In that case the mmio flip might
10971
	 * have completed, but the CS hasn't even executed the flip yet.
10972
	 *
10973
	 * A flip count check isn't enough as the CS might have updated
10974
	 * the base address just after start of vblank, but before we
10975
	 * managed to process the interrupt. This means we'd complete the
10976
	 * CS flip too soon.
10977
	 *
10978
	 * Combining both checks should get us a good enough result. It may
10979
	 * still happen that the CS flip has been executed, but has not
10980
	 * yet actually completed. But in case the base address is the same
10981
	 * anyway, we don't really care.
10982
	 */
10983
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10984
		crtc->unpin_work->gtt_offset &&
6084 serge 10985
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
5060 serge 10986
				    crtc->unpin_work->flip_count);
10987
}
10988
 
3031 serge 10989
void intel_prepare_page_flip(struct drm_device *dev, int plane)
10990
{
5060 serge 10991
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10992
	struct intel_crtc *intel_crtc =
10993
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10994
	unsigned long flags;
2327 Serge 10995
 
5354 serge 10996
 
10997
	/*
10998
	 * This is called both by irq handlers and the reset code (to complete
10999
	 * lost pageflips) so needs the full irqsave spinlocks.
11000
	 *
11001
	 * NB: An MMIO update of the plane base pointer will also
3243 Serge 11002
	 * generate a page-flip completion irq, i.e. every modeset
11003
	 * is also accompanied by a spurious intel_prepare_page_flip().
11004
	 */
3031 serge 11005
	spin_lock_irqsave(&dev->event_lock, flags);
5060 serge 11006
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
3243 Serge 11007
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 11008
	spin_unlock_irqrestore(&dev->event_lock, flags);
11009
}
2327 Serge 11010
 
6084 serge 11011
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
3243 Serge 11012
{
11013
	/* Ensure that the work item is consistent when activating it ... */
11014
	smp_wmb();
6084 serge 11015
	atomic_set(&work->pending, INTEL_FLIP_PENDING);
3243 Serge 11016
	/* and that it is marked active as soon as the irq could fire. */
11017
	smp_wmb();
11018
}
6320 serge 11019
 
3031 serge 11020
static int intel_gen2_queue_flip(struct drm_device *dev,
11021
				 struct drm_crtc *crtc,
11022
				 struct drm_framebuffer *fb,
4104 Serge 11023
				 struct drm_i915_gem_object *obj,
6084 serge 11024
				 struct drm_i915_gem_request *req,
4104 Serge 11025
				 uint32_t flags)
3031 serge 11026
{
6084 serge 11027
	struct intel_engine_cs *ring = req->ring;
3031 serge 11028
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11029
	u32 flip_mask;
11030
	int ret;
2327 Serge 11031
 
6084 serge 11032
	ret = intel_ring_begin(req, 6);
3031 serge 11033
	if (ret)
5060 serge 11034
		return ret;
2327 Serge 11035
 
3031 serge 11036
	/* Can't queue multiple flips, so wait for the previous
11037
	 * one to finish before executing the next.
11038
	 */
11039
	if (intel_crtc->plane)
11040
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11041
	else
11042
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11043
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11044
	intel_ring_emit(ring, MI_NOOP);
11045
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11046
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11047
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11048
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11049
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 11050
 
6084 serge 11051
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11052
	return 0;
11053
}
2327 Serge 11054
 
3031 serge 11055
static int intel_gen3_queue_flip(struct drm_device *dev,
11056
				 struct drm_crtc *crtc,
11057
				 struct drm_framebuffer *fb,
4104 Serge 11058
				 struct drm_i915_gem_object *obj,
6084 serge 11059
				 struct drm_i915_gem_request *req,
4104 Serge 11060
				 uint32_t flags)
3031 serge 11061
{
6084 serge 11062
	struct intel_engine_cs *ring = req->ring;
3031 serge 11063
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11064
	u32 flip_mask;
11065
	int ret;
2327 Serge 11066
 
6084 serge 11067
	ret = intel_ring_begin(req, 6);
3031 serge 11068
	if (ret)
5060 serge 11069
		return ret;
2327 Serge 11070
 
3031 serge 11071
	if (intel_crtc->plane)
11072
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11073
	else
11074
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11075
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11076
	intel_ring_emit(ring, MI_NOOP);
11077
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11078
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11079
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11080
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11081
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 11082
 
6084 serge 11083
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11084
	return 0;
11085
}
2327 Serge 11086
 
3031 serge 11087
static int intel_gen4_queue_flip(struct drm_device *dev,
11088
				 struct drm_crtc *crtc,
11089
				 struct drm_framebuffer *fb,
4104 Serge 11090
				 struct drm_i915_gem_object *obj,
6084 serge 11091
				 struct drm_i915_gem_request *req,
4104 Serge 11092
				 uint32_t flags)
3031 serge 11093
{
6084 serge 11094
	struct intel_engine_cs *ring = req->ring;
3031 serge 11095
	struct drm_i915_private *dev_priv = dev->dev_private;
11096
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11097
	uint32_t pf, pipesrc;
11098
	int ret;
2327 Serge 11099
 
6084 serge 11100
	ret = intel_ring_begin(req, 4);
3031 serge 11101
	if (ret)
5060 serge 11102
		return ret;
2327 Serge 11103
 
3031 serge 11104
	/* i965+ uses the linear or tiled offsets from the
11105
	 * Display Registers (which do not change across a page-flip)
11106
	 * so we need only reprogram the base address.
11107
	 */
11108
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11109
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11110
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11111
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
3031 serge 11112
			obj->tiling_mode);
2327 Serge 11113
 
3031 serge 11114
	/* XXX Enabling the panel-fitter across page-flip is so far
11115
	 * untested on non-native modes, so ignore it for now.
11116
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11117
	 */
11118
	pf = 0;
11119
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11120
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 11121
 
6084 serge 11122
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11123
	return 0;
11124
}
2327 Serge 11125
 
3031 serge 11126
static int intel_gen6_queue_flip(struct drm_device *dev,
11127
				 struct drm_crtc *crtc,
11128
				 struct drm_framebuffer *fb,
4104 Serge 11129
				 struct drm_i915_gem_object *obj,
6084 serge 11130
				 struct drm_i915_gem_request *req,
4104 Serge 11131
				 uint32_t flags)
3031 serge 11132
{
6084 serge 11133
	struct intel_engine_cs *ring = req->ring;
3031 serge 11134
	struct drm_i915_private *dev_priv = dev->dev_private;
11135
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11136
	uint32_t pf, pipesrc;
11137
	int ret;
2327 Serge 11138
 
6084 serge 11139
	ret = intel_ring_begin(req, 4);
3031 serge 11140
	if (ret)
5060 serge 11141
		return ret;
2327 Serge 11142
 
3031 serge 11143
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11144
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11145
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5060 serge 11146
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
2327 Serge 11147
 
3031 serge 11148
	/* Contrary to the suggestions in the documentation,
11149
	 * "Enable Panel Fitter" does not seem to be required when page
11150
	 * flipping with a non-native mode, and worse causes a normal
11151
	 * modeset to fail.
11152
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11153
	 */
11154
	pf = 0;
11155
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11156
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 11157
 
6084 serge 11158
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11159
	return 0;
11160
}
2327 Serge 11161
 
3031 serge 11162
static int intel_gen7_queue_flip(struct drm_device *dev,
11163
				 struct drm_crtc *crtc,
11164
				 struct drm_framebuffer *fb,
4104 Serge 11165
				 struct drm_i915_gem_object *obj,
6084 serge 11166
				 struct drm_i915_gem_request *req,
4104 Serge 11167
				 uint32_t flags)
3031 serge 11168
{
6084 serge 11169
	struct intel_engine_cs *ring = req->ring;
3031 serge 11170
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11171
	uint32_t plane_bit = 0;
4104 Serge 11172
	int len, ret;
2327 Serge 11173
 
5060 serge 11174
	switch (intel_crtc->plane) {
3031 serge 11175
	case PLANE_A:
11176
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11177
		break;
11178
	case PLANE_B:
11179
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11180
		break;
11181
	case PLANE_C:
11182
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11183
		break;
11184
	default:
11185
		WARN_ONCE(1, "unknown plane in flip command\n");
5060 serge 11186
		return -ENODEV;
3031 serge 11187
	}
2327 Serge 11188
 
4104 Serge 11189
	len = 4;
5060 serge 11190
	if (ring->id == RCS) {
4104 Serge 11191
		len += 6;
5060 serge 11192
		/*
11193
		 * On Gen 8, SRM is now taking an extra dword to accommodate
11194
		 * 48bits addresses, and we need a NOOP for the batch size to
11195
		 * stay even.
11196
		 */
11197
		if (IS_GEN8(dev))
11198
			len += 2;
11199
	}
4104 Serge 11200
 
5060 serge 11201
	/*
11202
	 * BSpec MI_DISPLAY_FLIP for IVB:
11203
	 * "The full packet must be contained within the same cache line."
11204
	 *
11205
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11206
	 * cacheline, if we ever start emitting more commands before
11207
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11208
	 * then do the cacheline alignment, and finally emit the
11209
	 * MI_DISPLAY_FLIP.
11210
	 */
6084 serge 11211
	ret = intel_ring_cacheline_align(req);
5060 serge 11212
	if (ret)
11213
		return ret;
11214
 
6084 serge 11215
	ret = intel_ring_begin(req, len);
3031 serge 11216
	if (ret)
5060 serge 11217
		return ret;
2327 Serge 11218
 
4104 Serge 11219
	/* Unmask the flip-done completion message. Note that the bspec says that
11220
	 * we should do this for both the BCS and RCS, and that we must not unmask
11221
	 * more than one flip event at any time (or ensure that one flip message
11222
	 * can be sent by waiting for flip-done prior to queueing new flips).
11223
	 * Experimentation says that BCS works despite DERRMR masking all
11224
	 * flip-done completion events and that unmasking all planes at once
11225
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11226
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11227
	 */
11228
	if (ring->id == RCS) {
11229
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
6937 serge 11230
		intel_ring_emit_reg(ring, DERRMR);
4104 Serge 11231
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11232
					DERRMR_PIPEB_PRI_FLIP_DONE |
11233
					DERRMR_PIPEC_PRI_FLIP_DONE));
5060 serge 11234
		if (IS_GEN8(dev))
6084 serge 11235
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
5060 serge 11236
					      MI_SRM_LRM_GLOBAL_GTT);
11237
		else
6084 serge 11238
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11239
					      MI_SRM_LRM_GLOBAL_GTT);
6937 serge 11240
		intel_ring_emit_reg(ring, DERRMR);
4104 Serge 11241
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
5060 serge 11242
		if (IS_GEN8(dev)) {
11243
			intel_ring_emit(ring, 0);
11244
			intel_ring_emit(ring, MI_NOOP);
11245
		}
4104 Serge 11246
	}
11247
 
3031 serge 11248
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11249
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5060 serge 11250
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11251
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 11252
 
6084 serge 11253
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11254
	return 0;
11255
}
2327 Serge 11256
 
6084 serge 11257
static bool use_mmio_flip(struct intel_engine_cs *ring,
11258
			  struct drm_i915_gem_object *obj)
11259
{
11260
	/*
11261
	 * This is not being used for older platforms, because
11262
	 * non-availability of flip done interrupt forces us to use
11263
	 * CS flips. Older platforms derive flip done using some clever
11264
	 * tricks involving the flip_pending status bits and vblank irqs.
11265
	 * So using MMIO flips there would disrupt this mechanism.
11266
	 */
11267
 
11268
	if (ring == NULL)
11269
		return true;
11270
 
11271
	if (INTEL_INFO(ring->dev)->gen < 5)
11272
		return false;
11273
 
11274
	if (i915.use_mmio_flip < 0)
11275
		return false;
11276
	else if (i915.use_mmio_flip > 0)
11277
		return true;
11278
	else if (i915.enable_execlists)
11279
		return true;
6937 serge 11280
//	else if (obj->base.dma_buf &&
11281
//		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11282
//						       false))
11283
//		return true;
6084 serge 11284
	else
11285
		return ring != i915_gem_request_get_ring(obj->last_write_req);
11286
}
11287
 
11288
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
6937 serge 11289
			     unsigned int rotation,
6084 serge 11290
			     struct intel_unpin_work *work)
11291
{
11292
	struct drm_device *dev = intel_crtc->base.dev;
11293
	struct drm_i915_private *dev_priv = dev->dev_private;
11294
	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11295
	const enum pipe pipe = intel_crtc->pipe;
6937 serge 11296
	u32 ctl, stride, tile_height;
6084 serge 11297
 
11298
	ctl = I915_READ(PLANE_CTL(pipe, 0));
11299
	ctl &= ~PLANE_CTL_TILED_MASK;
11300
	switch (fb->modifier[0]) {
11301
	case DRM_FORMAT_MOD_NONE:
11302
		break;
11303
	case I915_FORMAT_MOD_X_TILED:
11304
		ctl |= PLANE_CTL_TILED_X;
11305
		break;
11306
	case I915_FORMAT_MOD_Y_TILED:
11307
		ctl |= PLANE_CTL_TILED_Y;
11308
		break;
11309
	case I915_FORMAT_MOD_Yf_TILED:
11310
		ctl |= PLANE_CTL_TILED_YF;
11311
		break;
11312
	default:
11313
		MISSING_CASE(fb->modifier[0]);
11314
	}
11315
 
11316
	/*
11317
	 * The stride is either expressed as a multiple of 64 bytes chunks for
11318
	 * linear buffers or in number of tiles for tiled buffers.
11319
	 */
6937 serge 11320
	if (intel_rotation_90_or_270(rotation)) {
11321
		/* stride = Surface height in tiles */
7144 serge 11322
		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
6937 serge 11323
		stride = DIV_ROUND_UP(fb->height, tile_height);
11324
	} else {
7144 serge 11325
		stride = fb->pitches[0] /
11326
			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11327
						  fb->pixel_format);
6937 serge 11328
	}
6084 serge 11329
 
11330
	/*
11331
	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11332
	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11333
	 */
11334
	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11335
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11336
 
11337
	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11338
	POSTING_READ(PLANE_SURF(pipe, 0));
11339
}
11340
 
11341
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11342
			     struct intel_unpin_work *work)
11343
{
11344
	struct drm_device *dev = intel_crtc->base.dev;
11345
	struct drm_i915_private *dev_priv = dev->dev_private;
11346
	struct intel_framebuffer *intel_fb =
11347
		to_intel_framebuffer(intel_crtc->base.primary->fb);
11348
	struct drm_i915_gem_object *obj = intel_fb->obj;
6937 serge 11349
	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
6084 serge 11350
	u32 dspcntr;
11351
 
11352
	dspcntr = I915_READ(reg);
11353
 
11354
	if (obj->tiling_mode != I915_TILING_NONE)
11355
		dspcntr |= DISPPLANE_TILED;
11356
	else
11357
		dspcntr &= ~DISPPLANE_TILED;
11358
 
11359
	I915_WRITE(reg, dspcntr);
11360
 
11361
	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11362
	POSTING_READ(DSPSURF(intel_crtc->plane));
11363
}
11364
 
11365
/*
11366
 * XXX: This is the temporary way to update the plane registers until we get
11367
 * around to using the usual plane update functions for MMIO flips
11368
 */
11369
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11370
{
11371
	struct intel_crtc *crtc = mmio_flip->crtc;
11372
	struct intel_unpin_work *work;
11373
 
11374
	spin_lock_irq(&crtc->base.dev->event_lock);
11375
	work = crtc->unpin_work;
11376
	spin_unlock_irq(&crtc->base.dev->event_lock);
11377
	if (work == NULL)
11378
		return;
11379
 
11380
	intel_mark_page_flip_active(work);
11381
 
11382
	intel_pipe_update_start(crtc);
11383
 
11384
	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
6937 serge 11385
		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
6084 serge 11386
	else
11387
		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11388
		ilk_do_mmio_flip(crtc, work);
11389
 
11390
	intel_pipe_update_end(crtc);
11391
}
11392
 
11393
static void intel_mmio_flip_work_func(struct work_struct *work)
11394
{
11395
	struct intel_mmio_flip *mmio_flip =
11396
		container_of(work, struct intel_mmio_flip, work);
6937 serge 11397
	struct intel_framebuffer *intel_fb =
11398
		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11399
	struct drm_i915_gem_object *obj = intel_fb->obj;
6084 serge 11400
 
11401
	if (mmio_flip->req) {
11402
		WARN_ON(__i915_wait_request(mmio_flip->req,
11403
					    mmio_flip->crtc->reset_counter,
11404
					    false, NULL,
11405
					    &mmio_flip->i915->rps.mmioflips));
11406
		i915_gem_request_unreference__unlocked(mmio_flip->req);
11407
	}
11408
 
6937 serge 11409
	/* For framebuffer backed by dmabuf, wait for fence */
11410
//	if (obj->base.dma_buf)
11411
//		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11412
//							    false, false,
11413
//							    MAX_SCHEDULE_TIMEOUT) < 0);
11414
 
6084 serge 11415
	intel_do_mmio_flip(mmio_flip);
11416
	kfree(mmio_flip);
11417
}
11418
 
11419
static int intel_queue_mmio_flip(struct drm_device *dev,
11420
				 struct drm_crtc *crtc,
6937 serge 11421
				 struct drm_i915_gem_object *obj)
6084 serge 11422
{
11423
	struct intel_mmio_flip *mmio_flip;
11424
 
11425
	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11426
	if (mmio_flip == NULL)
11427
		return -ENOMEM;
11428
 
11429
	mmio_flip->i915 = to_i915(dev);
11430
	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11431
	mmio_flip->crtc = to_intel_crtc(crtc);
6937 serge 11432
	mmio_flip->rotation = crtc->primary->state->rotation;
6084 serge 11433
 
11434
	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11435
	schedule_work(&mmio_flip->work);
11436
 
11437
	return 0;
11438
}
11439
 
3031 serge 11440
static int intel_default_queue_flip(struct drm_device *dev,
11441
				    struct drm_crtc *crtc,
11442
				    struct drm_framebuffer *fb,
4104 Serge 11443
				    struct drm_i915_gem_object *obj,
6084 serge 11444
				    struct drm_i915_gem_request *req,
4104 Serge 11445
				    uint32_t flags)
3031 serge 11446
{
11447
	return -ENODEV;
11448
}
2327 Serge 11449
 
6084 serge 11450
static bool __intel_pageflip_stall_check(struct drm_device *dev,
11451
					 struct drm_crtc *crtc)
11452
{
11453
	struct drm_i915_private *dev_priv = dev->dev_private;
11454
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11455
	struct intel_unpin_work *work = intel_crtc->unpin_work;
11456
	u32 addr;
11457
 
11458
	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11459
		return true;
11460
 
11461
	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11462
		return false;
11463
 
11464
	if (!work->enable_stall_check)
11465
		return false;
11466
 
11467
	if (work->flip_ready_vblank == 0) {
11468
		if (work->flip_queued_req &&
11469
		    !i915_gem_request_completed(work->flip_queued_req, true))
11470
			return false;
11471
 
11472
		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11473
	}
11474
 
11475
	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11476
		return false;
11477
 
11478
	/* Potential stall - if we see that the flip has happened,
11479
	 * assume a missed interrupt. */
11480
	if (INTEL_INFO(dev)->gen >= 4)
11481
		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11482
	else
11483
		addr = I915_READ(DSPADDR(intel_crtc->plane));
11484
 
11485
	/* There is a potential issue here with a false positive after a flip
11486
	 * to the same address. We could address this by checking for a
11487
	 * non-incrementing frame counter.
11488
	 */
11489
	return addr == work->gtt_offset;
11490
}
11491
 
11492
void intel_check_page_flip(struct drm_device *dev, int pipe)
11493
{
11494
	struct drm_i915_private *dev_priv = dev->dev_private;
11495
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11496
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11497
	struct intel_unpin_work *work;
11498
 
6937 serge 11499
	WARN_ON(!in_interrupt());
11500
 
6084 serge 11501
	if (crtc == NULL)
11502
		return;
11503
 
11504
	spin_lock(&dev->event_lock);
11505
	work = intel_crtc->unpin_work;
11506
	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11507
		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11508
			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11509
		page_flip_completed(intel_crtc);
11510
		work = NULL;
11511
	}
11512
	if (work != NULL &&
11513
	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11514
		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11515
	spin_unlock(&dev->event_lock);
11516
}
6320 serge 11517
 
3031 serge 11518
static int intel_crtc_page_flip(struct drm_crtc *crtc,
11519
				struct drm_framebuffer *fb,
4104 Serge 11520
				struct drm_pending_vblank_event *event,
11521
				uint32_t page_flip_flags)
3031 serge 11522
{
11523
	struct drm_device *dev = crtc->dev;
11524
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 11525
	struct drm_framebuffer *old_fb = crtc->primary->fb;
11526
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3031 serge 11527
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 11528
	struct drm_plane *primary = crtc->primary;
5060 serge 11529
	enum pipe pipe = intel_crtc->pipe;
3031 serge 11530
	struct intel_unpin_work *work;
5060 serge 11531
	struct intel_engine_cs *ring;
6084 serge 11532
	bool mmio_flip;
11533
	struct drm_i915_gem_request *request = NULL;
3031 serge 11534
	int ret;
2327 Serge 11535
 
5060 serge 11536
	/*
11537
	 * drm_mode_page_flip_ioctl() should already catch this, but double
11538
	 * check to be safe.  In the future we may enable pageflipping from
11539
	 * a disabled primary plane.
11540
	 */
11541
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11542
		return -EBUSY;
11543
 
3031 serge 11544
	/* Can't change pixel format via MI display flips. */
5060 serge 11545
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
3031 serge 11546
		return -EINVAL;
2327 Serge 11547
 
3031 serge 11548
	/*
11549
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11550
	 * Note that pitch changes could also affect these register.
11551
	 */
11552
	if (INTEL_INFO(dev)->gen > 3 &&
5060 serge 11553
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11554
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
3031 serge 11555
		return -EINVAL;
2327 Serge 11556
 
5354 serge 11557
	if (i915_terminally_wedged(&dev_priv->gpu_error))
11558
		goto out_hang;
11559
 
4560 Serge 11560
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 11561
	if (work == NULL)
11562
		return -ENOMEM;
2327 Serge 11563
 
3031 serge 11564
	work->event = event;
3243 Serge 11565
	work->crtc = crtc;
6084 serge 11566
	work->old_fb = old_fb;
6320 serge 11567
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 11568
 
5060 serge 11569
	ret = drm_crtc_vblank_get(crtc);
3031 serge 11570
	if (ret)
11571
		goto free_work;
2327 Serge 11572
 
3031 serge 11573
	/* We borrow the event spin lock for protecting unpin_work */
5354 serge 11574
	spin_lock_irq(&dev->event_lock);
3031 serge 11575
	if (intel_crtc->unpin_work) {
5354 serge 11576
		/* Before declaring the flip queue wedged, check if
11577
		 * the hardware completed the operation behind our backs.
11578
		 */
11579
		if (__intel_pageflip_stall_check(dev, crtc)) {
11580
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11581
			page_flip_completed(intel_crtc);
11582
		} else {
11583
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11584
			spin_unlock_irq(&dev->event_lock);
11585
 
11586
			drm_crtc_vblank_put(crtc);
6084 serge 11587
			kfree(work);
11588
			return -EBUSY;
11589
		}
3031 serge 11590
	}
11591
	intel_crtc->unpin_work = work;
5354 serge 11592
	spin_unlock_irq(&dev->event_lock);
2327 Serge 11593
 
6320 serge 11594
//   if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11595
//       flush_workqueue(dev_priv->wq);
3243 Serge 11596
 
3031 serge 11597
	/* Reference the objects for the scheduled work. */
6084 serge 11598
	drm_framebuffer_reference(work->old_fb);
3031 serge 11599
	drm_gem_object_reference(&obj->base);
2327 Serge 11600
 
5060 serge 11601
	crtc->primary->fb = fb;
6084 serge 11602
	update_state_fb(crtc->primary);
7144 serge 11603
	intel_fbc_pre_update(intel_crtc);
2327 Serge 11604
 
3031 serge 11605
	work->pending_flip_obj = obj;
2327 Serge 11606
 
6084 serge 11607
	ret = i915_mutex_lock_interruptible(dev);
11608
	if (ret)
11609
		goto cleanup;
11610
 
3243 Serge 11611
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 11612
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 11613
 
5060 serge 11614
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
6084 serge 11615
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
5060 serge 11616
 
6937 serge 11617
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5060 serge 11618
		ring = &dev_priv->ring[BCS];
6084 serge 11619
		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
5060 serge 11620
			/* vlv: DISPLAY_FLIP fails to change tiling */
11621
			ring = NULL;
6084 serge 11622
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5060 serge 11623
		ring = &dev_priv->ring[BCS];
11624
	} else if (INTEL_INFO(dev)->gen >= 7) {
6084 serge 11625
		ring = i915_gem_request_get_ring(obj->last_write_req);
5060 serge 11626
		if (ring == NULL || ring->id != RCS)
11627
			ring = &dev_priv->ring[BCS];
11628
	} else {
11629
		ring = &dev_priv->ring[RCS];
11630
	}
11631
 
6084 serge 11632
	mmio_flip = use_mmio_flip(ring, obj);
11633
 
11634
	/* When using CS flips, we want to emit semaphores between rings.
11635
	 * However, when using mmio flips we will create a task to do the
11636
	 * synchronisation, so all we want here is to pin the framebuffer
11637
	 * into the display plane and skip any waits.
11638
	 */
6937 serge 11639
	if (!mmio_flip) {
11640
		ret = i915_gem_object_sync(obj, ring, &request);
11641
		if (ret)
11642
			goto cleanup_pending;
11643
	}
11644
 
6084 serge 11645
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
6937 serge 11646
					 crtc->primary->state);
3031 serge 11647
	if (ret)
11648
		goto cleanup_pending;
11649
 
6084 serge 11650
	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11651
						  obj, 0);
11652
	work->gtt_offset += intel_crtc->dspaddr_offset;
5060 serge 11653
 
6084 serge 11654
	if (mmio_flip) {
6937 serge 11655
		ret = intel_queue_mmio_flip(dev, crtc, obj);
5354 serge 11656
		if (ret)
11657
			goto cleanup_unpin;
11658
 
6084 serge 11659
		i915_gem_request_assign(&work->flip_queued_req,
11660
					obj->last_write_req);
5354 serge 11661
	} else {
6084 serge 11662
		if (!request) {
7144 serge 11663
			request = i915_gem_request_alloc(ring, NULL);
11664
			if (IS_ERR(request)) {
11665
				ret = PTR_ERR(request);
6084 serge 11666
				goto cleanup_unpin;
7144 serge 11667
			}
6084 serge 11668
		}
5060 serge 11669
 
6084 serge 11670
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11671
						   page_flip_flags);
11672
		if (ret)
11673
			goto cleanup_unpin;
11674
 
11675
		i915_gem_request_assign(&work->flip_queued_req, request);
5354 serge 11676
	}
11677
 
6084 serge 11678
	if (request)
11679
		i915_add_request_no_flush(request);
11680
 
11681
	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
5354 serge 11682
	work->enable_stall_check = true;
11683
 
6084 serge 11684
	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11685
			  to_intel_plane(primary)->frontbuffer_bit);
3031 serge 11686
	mutex_unlock(&dev->struct_mutex);
11687
 
6084 serge 11688
	intel_frontbuffer_flip_prepare(dev,
11689
				       to_intel_plane(primary)->frontbuffer_bit);
11690
 
3031 serge 11691
	trace_i915_flip_request(intel_crtc->plane, obj);
11692
 
11693
	return 0;
11694
 
5060 serge 11695
cleanup_unpin:
6084 serge 11696
	intel_unpin_fb_obj(fb, crtc->primary->state);
3031 serge 11697
cleanup_pending:
7144 serge 11698
	if (!IS_ERR_OR_NULL(request))
6084 serge 11699
		i915_gem_request_cancel(request);
3243 Serge 11700
	atomic_dec(&intel_crtc->unpin_work_count);
6084 serge 11701
	mutex_unlock(&dev->struct_mutex);
11702
cleanup:
5060 serge 11703
	crtc->primary->fb = old_fb;
6084 serge 11704
	update_state_fb(crtc->primary);
3031 serge 11705
 
6084 serge 11706
	drm_gem_object_unreference_unlocked(&obj->base);
11707
	drm_framebuffer_unreference(work->old_fb);
11708
 
5354 serge 11709
	spin_lock_irq(&dev->event_lock);
3031 serge 11710
	intel_crtc->unpin_work = NULL;
5354 serge 11711
	spin_unlock_irq(&dev->event_lock);
3031 serge 11712
 
5060 serge 11713
	drm_crtc_vblank_put(crtc);
3031 serge 11714
free_work:
11715
	kfree(work);
11716
 
5060 serge 11717
	if (ret == -EIO) {
6084 serge 11718
		struct drm_atomic_state *state;
11719
		struct drm_plane_state *plane_state;
11720
 
5060 serge 11721
out_hang:
6084 serge 11722
		state = drm_atomic_state_alloc(dev);
11723
		if (!state)
11724
			return -ENOMEM;
11725
		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11726
 
11727
retry:
11728
		plane_state = drm_atomic_get_plane_state(state, primary);
11729
		ret = PTR_ERR_OR_ZERO(plane_state);
11730
		if (!ret) {
11731
			drm_atomic_set_fb_for_plane(plane_state, fb);
11732
 
11733
			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11734
			if (!ret)
11735
				ret = drm_atomic_commit(state);
11736
		}
11737
 
11738
		if (ret == -EDEADLK) {
11739
			drm_modeset_backoff(state->acquire_ctx);
11740
			drm_atomic_state_clear(state);
11741
			goto retry;
11742
		}
11743
 
11744
		if (ret)
11745
			drm_atomic_state_free(state);
11746
 
5354 serge 11747
		if (ret == 0 && event) {
11748
			spin_lock_irq(&dev->event_lock);
5060 serge 11749
			drm_send_vblank_event(dev, pipe, event);
5354 serge 11750
			spin_unlock_irq(&dev->event_lock);
11751
		}
5060 serge 11752
	}
3031 serge 11753
	return ret;
11754
}
11755
 
11756
 
11757
/**
6084 serge 11758
 * intel_wm_need_update - Check whether watermarks need updating
11759
 * @plane: drm plane
11760
 * @state: new plane state
3031 serge 11761
 *
6084 serge 11762
 * Check current plane state versus the new one to determine whether
11763
 * watermarks need to be recalculated.
11764
 *
11765
 * Returns true or false.
3031 serge 11766
 */
6084 serge 11767
static bool intel_wm_need_update(struct drm_plane *plane,
11768
				 struct drm_plane_state *state)
3031 serge 11769
{
6937 serge 11770
	struct intel_plane_state *new = to_intel_plane_state(state);
11771
	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11772
 
11773
	/* Update watermarks on tiling or size changes. */
11774
	if (new->visible != cur->visible)
6084 serge 11775
		return true;
3031 serge 11776
 
6937 serge 11777
	if (!cur->base.fb || !new->base.fb)
11778
		return false;
11779
 
11780
	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11781
	    cur->base.rotation != new->base.rotation ||
11782
	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11783
	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11784
	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11785
	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
6084 serge 11786
		return true;
11787
 
11788
	return false;
11789
}
11790
 
6937 serge 11791
static bool needs_scaling(struct intel_plane_state *state)
11792
{
11793
	int src_w = drm_rect_width(&state->src) >> 16;
11794
	int src_h = drm_rect_height(&state->src) >> 16;
11795
	int dst_w = drm_rect_width(&state->dst);
11796
	int dst_h = drm_rect_height(&state->dst);
11797
 
11798
	return (src_w != dst_w || src_h != dst_h);
11799
}
11800
 
6084 serge 11801
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11802
				    struct drm_plane_state *plane_state)
11803
{
6937 serge 11804
	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
6084 serge 11805
	struct drm_crtc *crtc = crtc_state->crtc;
11806
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11807
	struct drm_plane *plane = plane_state->plane;
11808
	struct drm_device *dev = crtc->dev;
11809
	struct intel_plane_state *old_plane_state =
11810
		to_intel_plane_state(plane->state);
11811
	int idx = intel_crtc->base.base.id, ret;
11812
	bool mode_changed = needs_modeset(crtc_state);
11813
	bool was_crtc_enabled = crtc->state->active;
11814
	bool is_crtc_enabled = crtc_state->active;
11815
	bool turn_off, turn_on, visible, was_visible;
11816
	struct drm_framebuffer *fb = plane_state->fb;
11817
 
11818
	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11819
	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11820
		ret = skl_update_scaler_plane(
11821
			to_intel_crtc_state(crtc_state),
11822
			to_intel_plane_state(plane_state));
11823
		if (ret)
11824
			return ret;
3031 serge 11825
	}
11826
 
6084 serge 11827
	was_visible = old_plane_state->visible;
11828
	visible = to_intel_plane_state(plane_state)->visible;
11829
 
11830
	if (!was_crtc_enabled && WARN_ON(was_visible))
11831
		was_visible = false;
11832
 
7144 serge 11833
	/*
11834
	 * Visibility is calculated as if the crtc was on, but
11835
	 * after scaler setup everything depends on it being off
11836
	 * when the crtc isn't active.
11837
	 */
11838
	if (!is_crtc_enabled)
11839
		to_intel_plane_state(plane_state)->visible = visible = false;
6084 serge 11840
 
11841
	if (!was_visible && !visible)
11842
		return 0;
11843
 
7144 serge 11844
	if (fb != old_plane_state->base.fb)
11845
		pipe_config->fb_changed = true;
11846
 
6084 serge 11847
	turn_off = was_visible && (!visible || mode_changed);
11848
	turn_on = visible && (!was_visible || mode_changed);
11849
 
11850
	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11851
			 plane->base.id, fb ? fb->base.id : -1);
11852
 
11853
	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11854
			 plane->base.id, was_visible, visible,
11855
			 turn_off, turn_on, mode_changed);
11856
 
11857
	if (turn_on) {
6937 serge 11858
		pipe_config->update_wm_pre = true;
11859
 
6084 serge 11860
		/* must disable cxsr around plane enable/disable */
6937 serge 11861
		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11862
			pipe_config->disable_cxsr = true;
6084 serge 11863
	} else if (turn_off) {
6937 serge 11864
		pipe_config->update_wm_post = true;
11865
 
6084 serge 11866
		/* must disable cxsr around plane enable/disable */
7144 serge 11867
		if (plane->type != DRM_PLANE_TYPE_CURSOR)
6937 serge 11868
			pipe_config->disable_cxsr = true;
6084 serge 11869
	} else if (intel_wm_need_update(plane, plane_state)) {
6937 serge 11870
		/* FIXME bollocks */
11871
		pipe_config->update_wm_pre = true;
11872
		pipe_config->update_wm_post = true;
3031 serge 11873
	}
5060 serge 11874
 
6084 serge 11875
	if (visible || was_visible)
11876
		intel_crtc->atomic.fb_bits |=
11877
			to_intel_plane(plane)->frontbuffer_bit;
5060 serge 11878
 
6084 serge 11879
	switch (plane->type) {
11880
	case DRM_PLANE_TYPE_PRIMARY:
11881
		intel_crtc->atomic.post_enable_primary = turn_on;
7144 serge 11882
		intel_crtc->atomic.update_fbc = true;
6084 serge 11883
 
11884
		break;
11885
	case DRM_PLANE_TYPE_CURSOR:
11886
		break;
11887
	case DRM_PLANE_TYPE_OVERLAY:
6937 serge 11888
		/*
11889
		 * WaCxSRDisabledForSpriteScaling:ivb
11890
		 *
11891
		 * cstate->update_wm was already set above, so this flag will
11892
		 * take effect when we commit and program watermarks.
11893
		 */
11894
		if (IS_IVYBRIDGE(dev) &&
11895
		    needs_scaling(to_intel_plane_state(plane_state)) &&
7144 serge 11896
		    !needs_scaling(old_plane_state))
11897
			pipe_config->disable_lp_wm = true;
6937 serge 11898
 
11899
		break;
5060 serge 11900
	}
6084 serge 11901
	return 0;
3031 serge 11902
}
11903
 
6084 serge 11904
static bool encoders_cloneable(const struct intel_encoder *a,
11905
			       const struct intel_encoder *b)
3031 serge 11906
{
6084 serge 11907
	/* masks could be asymmetric, so check both ways */
11908
	return a == b || (a->cloneable & (1 << b->type) &&
11909
			  b->cloneable & (1 << a->type));
11910
}
11911
 
11912
static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11913
					 struct intel_crtc *crtc,
11914
					 struct intel_encoder *encoder)
11915
{
11916
	struct intel_encoder *source_encoder;
11917
	struct drm_connector *connector;
11918
	struct drm_connector_state *connector_state;
11919
	int i;
11920
 
11921
	for_each_connector_in_state(state, connector, connector_state, i) {
11922
		if (connector_state->crtc != &crtc->base)
11923
			continue;
11924
 
11925
		source_encoder =
11926
			to_intel_encoder(connector_state->best_encoder);
11927
		if (!encoders_cloneable(encoder, source_encoder))
11928
			return false;
11929
	}
11930
 
11931
	return true;
11932
}
11933
 
11934
static bool check_encoder_cloning(struct drm_atomic_state *state,
11935
				  struct intel_crtc *crtc)
11936
{
3031 serge 11937
	struct intel_encoder *encoder;
6084 serge 11938
	struct drm_connector *connector;
11939
	struct drm_connector_state *connector_state;
11940
	int i;
3031 serge 11941
 
6084 serge 11942
	for_each_connector_in_state(state, connector, connector_state, i) {
11943
		if (connector_state->crtc != &crtc->base)
11944
			continue;
11945
 
11946
		encoder = to_intel_encoder(connector_state->best_encoder);
11947
		if (!check_single_encoder_cloning(state, crtc, encoder))
11948
			return false;
3031 serge 11949
	}
11950
 
6084 serge 11951
	return true;
11952
}
11953
 
11954
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11955
				   struct drm_crtc_state *crtc_state)
11956
{
11957
	struct drm_device *dev = crtc->dev;
11958
	struct drm_i915_private *dev_priv = dev->dev_private;
11959
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11960
	struct intel_crtc_state *pipe_config =
11961
		to_intel_crtc_state(crtc_state);
11962
	struct drm_atomic_state *state = crtc_state->state;
11963
	int ret;
11964
	bool mode_changed = needs_modeset(crtc_state);
11965
 
11966
	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11967
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11968
		return -EINVAL;
3031 serge 11969
	}
5060 serge 11970
 
6084 serge 11971
	if (mode_changed && !crtc_state->active)
6937 serge 11972
		pipe_config->update_wm_post = true;
6084 serge 11973
 
11974
	if (mode_changed && crtc_state->enable &&
11975
	    dev_priv->display.crtc_compute_clock &&
11976
	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11977
		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11978
							   pipe_config);
11979
		if (ret)
11980
			return ret;
5060 serge 11981
	}
6084 serge 11982
 
11983
	ret = 0;
6937 serge 11984
	if (dev_priv->display.compute_pipe_wm) {
11985
		ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
11986
		if (ret)
11987
			return ret;
11988
	}
11989
 
6084 serge 11990
	if (INTEL_INFO(dev)->gen >= 9) {
11991
		if (mode_changed)
11992
			ret = skl_update_scaler_crtc(pipe_config);
11993
 
11994
		if (!ret)
11995
			ret = intel_atomic_setup_scalers(dev, intel_crtc,
11996
							 pipe_config);
11997
	}
11998
 
11999
	return ret;
3031 serge 12000
}
12001
 
6084 serge 12002
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12003
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12004
	.load_lut = intel_crtc_load_lut,
12005
	.atomic_begin = intel_begin_crtc_commit,
12006
	.atomic_flush = intel_finish_crtc_commit,
12007
	.atomic_check = intel_crtc_atomic_check,
12008
};
12009
 
12010
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12011
{
12012
	struct intel_connector *connector;
12013
 
12014
	for_each_intel_connector(dev, connector) {
12015
		if (connector->base.encoder) {
12016
			connector->base.state->best_encoder =
12017
				connector->base.encoder;
12018
			connector->base.state->crtc =
12019
				connector->base.encoder->crtc;
12020
		} else {
12021
			connector->base.state->best_encoder = NULL;
12022
			connector->base.state->crtc = NULL;
12023
		}
12024
	}
12025
}
12026
 
4104 Serge 12027
static void
5060 serge 12028
connected_sink_compute_bpp(struct intel_connector *connector,
6084 serge 12029
			   struct intel_crtc_state *pipe_config)
4104 Serge 12030
{
12031
	int bpp = pipe_config->pipe_bpp;
12032
 
12033
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12034
		connector->base.base.id,
5060 serge 12035
		connector->base.name);
4104 Serge 12036
 
12037
	/* Don't use an invalid EDID bpc value */
12038
	if (connector->base.display_info.bpc &&
12039
	    connector->base.display_info.bpc * 3 < bpp) {
12040
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12041
			      bpp, connector->base.display_info.bpc*3);
12042
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12043
	}
12044
 
6937 serge 12045
	/* Clamp bpp to default limit on screens without EDID 1.4 */
12046
	if (connector->base.display_info.bpc == 0) {
12047
		int type = connector->base.connector_type;
12048
		int clamp_bpp = 24;
12049
 
12050
		/* Fall back to 18 bpp when DP sink capability is unknown. */
12051
		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12052
		    type == DRM_MODE_CONNECTOR_eDP)
12053
			clamp_bpp = 18;
12054
 
12055
		if (bpp > clamp_bpp) {
12056
			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12057
				      bpp, clamp_bpp);
12058
			pipe_config->pipe_bpp = clamp_bpp;
12059
		}
4104 Serge 12060
	}
12061
}
12062
 
3746 Serge 12063
static int
4104 Serge 12064
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
6084 serge 12065
			  struct intel_crtc_state *pipe_config)
3746 Serge 12066
{
4104 Serge 12067
	struct drm_device *dev = crtc->base.dev;
6084 serge 12068
	struct drm_atomic_state *state;
12069
	struct drm_connector *connector;
12070
	struct drm_connector_state *connector_state;
12071
	int bpp, i;
3746 Serge 12072
 
6937 serge 12073
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
6084 serge 12074
		bpp = 10*3;
12075
	else if (INTEL_INFO(dev)->gen >= 5)
12076
		bpp = 12*3;
12077
	else
3746 Serge 12078
		bpp = 8*3;
12079
 
6084 serge 12080
 
3746 Serge 12081
	pipe_config->pipe_bpp = bpp;
12082
 
6084 serge 12083
	state = pipe_config->base.state;
12084
 
3746 Serge 12085
	/* Clamp display bpp to EDID value */
6084 serge 12086
	for_each_connector_in_state(state, connector, connector_state, i) {
12087
		if (connector_state->crtc != &crtc->base)
3746 Serge 12088
			continue;
12089
 
6084 serge 12090
		connected_sink_compute_bpp(to_intel_connector(connector),
12091
					   pipe_config);
3746 Serge 12092
	}
12093
 
12094
	return bpp;
12095
}
12096
 
4560 Serge 12097
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12098
{
12099
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12100
			"type: 0x%x flags: 0x%x\n",
12101
		mode->crtc_clock,
12102
		mode->crtc_hdisplay, mode->crtc_hsync_start,
12103
		mode->crtc_hsync_end, mode->crtc_htotal,
12104
		mode->crtc_vdisplay, mode->crtc_vsync_start,
12105
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12106
}
12107
 
4104 Serge 12108
static void intel_dump_pipe_config(struct intel_crtc *crtc,
6084 serge 12109
				   struct intel_crtc_state *pipe_config,
4104 Serge 12110
				   const char *context)
12111
{
6084 serge 12112
	struct drm_device *dev = crtc->base.dev;
12113
	struct drm_plane *plane;
12114
	struct intel_plane *intel_plane;
12115
	struct intel_plane_state *state;
12116
	struct drm_framebuffer *fb;
4104 Serge 12117
 
6084 serge 12118
	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12119
		      context, pipe_config, pipe_name(crtc->pipe));
12120
 
4104 Serge 12121
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12122
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12123
		      pipe_config->pipe_bpp, pipe_config->dither);
12124
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12125
		      pipe_config->has_pch_encoder,
12126
		      pipe_config->fdi_lanes,
12127
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12128
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12129
		      pipe_config->fdi_m_n.tu);
6084 serge 12130
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
4560 Serge 12131
		      pipe_config->has_dp_encoder,
6084 serge 12132
		      pipe_config->lane_count,
4560 Serge 12133
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12134
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12135
		      pipe_config->dp_m_n.tu);
5354 serge 12136
 
6084 serge 12137
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
5354 serge 12138
		      pipe_config->has_dp_encoder,
6084 serge 12139
		      pipe_config->lane_count,
5354 serge 12140
		      pipe_config->dp_m2_n2.gmch_m,
12141
		      pipe_config->dp_m2_n2.gmch_n,
12142
		      pipe_config->dp_m2_n2.link_m,
12143
		      pipe_config->dp_m2_n2.link_n,
12144
		      pipe_config->dp_m2_n2.tu);
12145
 
12146
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12147
		      pipe_config->has_audio,
12148
		      pipe_config->has_infoframe);
12149
 
4104 Serge 12150
	DRM_DEBUG_KMS("requested mode:\n");
6084 serge 12151
	drm_mode_debug_printmodeline(&pipe_config->base.mode);
4104 Serge 12152
	DRM_DEBUG_KMS("adjusted mode:\n");
6084 serge 12153
	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12154
	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
4560 Serge 12155
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12156
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12157
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
6084 serge 12158
	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12159
		      crtc->num_scalers,
12160
		      pipe_config->scaler_state.scaler_users,
12161
		      pipe_config->scaler_state.scaler_id);
4104 Serge 12162
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12163
		      pipe_config->gmch_pfit.control,
12164
		      pipe_config->gmch_pfit.pgm_ratios,
12165
		      pipe_config->gmch_pfit.lvds_border_bits);
12166
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12167
		      pipe_config->pch_pfit.pos,
12168
		      pipe_config->pch_pfit.size,
12169
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12170
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 12171
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 12172
 
6084 serge 12173
	if (IS_BROXTON(dev)) {
12174
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12175
			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12176
			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12177
			      pipe_config->ddi_pll_sel,
12178
			      pipe_config->dpll_hw_state.ebb0,
12179
			      pipe_config->dpll_hw_state.ebb4,
12180
			      pipe_config->dpll_hw_state.pll0,
12181
			      pipe_config->dpll_hw_state.pll1,
12182
			      pipe_config->dpll_hw_state.pll2,
12183
			      pipe_config->dpll_hw_state.pll3,
12184
			      pipe_config->dpll_hw_state.pll6,
12185
			      pipe_config->dpll_hw_state.pll8,
12186
			      pipe_config->dpll_hw_state.pll9,
12187
			      pipe_config->dpll_hw_state.pll10,
12188
			      pipe_config->dpll_hw_state.pcsdw12);
6937 serge 12189
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 12190
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12191
			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12192
			      pipe_config->ddi_pll_sel,
12193
			      pipe_config->dpll_hw_state.ctrl1,
12194
			      pipe_config->dpll_hw_state.cfgcr1,
12195
			      pipe_config->dpll_hw_state.cfgcr2);
12196
	} else if (HAS_DDI(dev)) {
12197
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12198
			      pipe_config->ddi_pll_sel,
12199
			      pipe_config->dpll_hw_state.wrpll,
12200
			      pipe_config->dpll_hw_state.spll);
12201
	} else {
12202
		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12203
			      "fp0: 0x%x, fp1: 0x%x\n",
12204
			      pipe_config->dpll_hw_state.dpll,
12205
			      pipe_config->dpll_hw_state.dpll_md,
12206
			      pipe_config->dpll_hw_state.fp0,
12207
			      pipe_config->dpll_hw_state.fp1);
12208
	}
5060 serge 12209
 
6084 serge 12210
	DRM_DEBUG_KMS("planes on this crtc\n");
12211
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12212
		intel_plane = to_intel_plane(plane);
12213
		if (intel_plane->pipe != crtc->pipe)
5060 serge 12214
			continue;
12215
 
6084 serge 12216
		state = to_intel_plane_state(plane->state);
12217
		fb = state->base.fb;
12218
		if (!fb) {
12219
			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12220
				"disabled, scaler_id = %d\n",
12221
				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12222
				plane->base.id, intel_plane->pipe,
12223
				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12224
				drm_plane_index(plane), state->scaler_id);
4104 Serge 12225
			continue;
6084 serge 12226
		}
4104 Serge 12227
 
6084 serge 12228
		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12229
			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12230
			plane->base.id, intel_plane->pipe,
12231
			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12232
			drm_plane_index(plane));
12233
		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12234
			fb->base.id, fb->width, fb->height, fb->pixel_format);
12235
		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12236
			state->scaler_id,
12237
			state->src.x1 >> 16, state->src.y1 >> 16,
12238
			drm_rect_width(&state->src) >> 16,
12239
			drm_rect_height(&state->src) >> 16,
12240
			state->dst.x1, state->dst.y1,
12241
			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
4104 Serge 12242
	}
12243
}
12244
 
6084 serge 12245
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
5354 serge 12246
{
6084 serge 12247
	struct drm_device *dev = state->dev;
12248
	struct drm_connector *connector;
5354 serge 12249
	unsigned int used_ports = 0;
12250
 
12251
	/*
12252
	 * Walk the connector list instead of the encoder
12253
	 * list to detect the problem on ddi platforms
12254
	 * where there's just one encoder per digital port.
12255
	 */
6084 serge 12256
	drm_for_each_connector(connector, dev) {
12257
		struct drm_connector_state *connector_state;
12258
		struct intel_encoder *encoder;
5354 serge 12259
 
6084 serge 12260
		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12261
		if (!connector_state)
12262
			connector_state = connector->state;
12263
 
12264
		if (!connector_state->best_encoder)
5354 serge 12265
			continue;
12266
 
6084 serge 12267
		encoder = to_intel_encoder(connector_state->best_encoder);
5354 serge 12268
 
6084 serge 12269
		WARN_ON(!connector_state->crtc);
12270
 
5354 serge 12271
		switch (encoder->type) {
12272
			unsigned int port_mask;
12273
		case INTEL_OUTPUT_UNKNOWN:
12274
			if (WARN_ON(!HAS_DDI(dev)))
12275
				break;
12276
		case INTEL_OUTPUT_DISPLAYPORT:
12277
		case INTEL_OUTPUT_HDMI:
12278
		case INTEL_OUTPUT_EDP:
12279
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12280
 
12281
			/* the same port mustn't appear more than once */
12282
			if (used_ports & port_mask)
12283
				return false;
12284
 
12285
			used_ports |= port_mask;
12286
		default:
12287
			break;
12288
		}
12289
	}
12290
 
12291
	return true;
12292
}
12293
 
6084 serge 12294
static void
12295
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12296
{
12297
	struct drm_crtc_state tmp_state;
12298
	struct intel_crtc_scaler_state scaler_state;
12299
	struct intel_dpll_hw_state dpll_hw_state;
12300
	enum intel_dpll_id shared_dpll;
12301
	uint32_t ddi_pll_sel;
12302
	bool force_thru;
12303
 
12304
	/* FIXME: before the switch to atomic started, a new pipe_config was
12305
	 * kzalloc'd. Code that depends on any field being zero should be
12306
	 * fixed, so that the crtc_state can be safely duplicated. For now,
12307
	 * only fields that are know to not cause problems are preserved. */
12308
 
12309
	tmp_state = crtc_state->base;
12310
	scaler_state = crtc_state->scaler_state;
12311
	shared_dpll = crtc_state->shared_dpll;
12312
	dpll_hw_state = crtc_state->dpll_hw_state;
12313
	ddi_pll_sel = crtc_state->ddi_pll_sel;
12314
	force_thru = crtc_state->pch_pfit.force_thru;
12315
 
12316
	memset(crtc_state, 0, sizeof *crtc_state);
12317
 
12318
	crtc_state->base = tmp_state;
12319
	crtc_state->scaler_state = scaler_state;
12320
	crtc_state->shared_dpll = shared_dpll;
12321
	crtc_state->dpll_hw_state = dpll_hw_state;
12322
	crtc_state->ddi_pll_sel = ddi_pll_sel;
12323
	crtc_state->pch_pfit.force_thru = force_thru;
12324
}
12325
 
12326
static int
3746 Serge 12327
intel_modeset_pipe_config(struct drm_crtc *crtc,
6084 serge 12328
			  struct intel_crtc_state *pipe_config)
3031 serge 12329
{
6084 serge 12330
	struct drm_atomic_state *state = pipe_config->base.state;
3031 serge 12331
	struct intel_encoder *encoder;
6084 serge 12332
	struct drm_connector *connector;
12333
	struct drm_connector_state *connector_state;
12334
	int base_bpp, ret = -EINVAL;
12335
	int i;
4104 Serge 12336
	bool retry = true;
3031 serge 12337
 
6084 serge 12338
	clear_intel_crtc_state(pipe_config);
4104 Serge 12339
 
12340
	pipe_config->cpu_transcoder =
12341
		(enum transcoder) to_intel_crtc(crtc)->pipe;
3746 Serge 12342
 
4104 Serge 12343
	/*
12344
	 * Sanitize sync polarity flags based on requested ones. If neither
12345
	 * positive or negative polarity is requested, treat this as meaning
12346
	 * negative polarity.
12347
	 */
6084 serge 12348
	if (!(pipe_config->base.adjusted_mode.flags &
4104 Serge 12349
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
6084 serge 12350
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4104 Serge 12351
 
6084 serge 12352
	if (!(pipe_config->base.adjusted_mode.flags &
4104 Serge 12353
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
6084 serge 12354
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4104 Serge 12355
 
6084 serge 12356
	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12357
					     pipe_config);
12358
	if (base_bpp < 0)
3746 Serge 12359
		goto fail;
12360
 
4560 Serge 12361
	/*
12362
	 * Determine the real pipe dimensions. Note that stereo modes can
12363
	 * increase the actual pipe size due to the frame doubling and
12364
	 * insertion of additional space for blanks between the frame. This
12365
	 * is stored in the crtc timings. We use the requested mode to do this
12366
	 * computation to clearly distinguish it from the adjusted mode, which
12367
	 * can be changed by the connectors in the below retry loop.
12368
	 */
6084 serge 12369
	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12370
			       &pipe_config->pipe_src_w,
12371
			       &pipe_config->pipe_src_h);
4560 Serge 12372
 
4104 Serge 12373
encoder_retry:
12374
	/* Ensure the port clock defaults are reset when retrying. */
12375
	pipe_config->port_clock = 0;
12376
	pipe_config->pixel_multiplier = 1;
12377
 
12378
	/* Fill in default crtc timings, allow encoders to overwrite them. */
6084 serge 12379
	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12380
			      CRTC_STEREO_DOUBLE);
4104 Serge 12381
 
3031 serge 12382
	/* Pass our mode to the connectors and the CRTC to give them a chance to
12383
	 * adjust it according to limitations or connector properties, and also
12384
	 * a chance to reject the mode entirely.
2330 Serge 12385
	 */
6084 serge 12386
	for_each_connector_in_state(state, connector, connector_state, i) {
12387
		if (connector_state->crtc != crtc)
3031 serge 12388
			continue;
3746 Serge 12389
 
6084 serge 12390
		encoder = to_intel_encoder(connector_state->best_encoder);
12391
 
12392
		if (!(encoder->compute_config(encoder, pipe_config))) {
12393
			DRM_DEBUG_KMS("Encoder config failure\n");
12394
			goto fail;
3746 Serge 12395
		}
6084 serge 12396
	}
3746 Serge 12397
 
4104 Serge 12398
	/* Set default port clock if not overwritten by the encoder. Needs to be
12399
	 * done afterwards in case the encoder adjusts the mode. */
12400
	if (!pipe_config->port_clock)
6084 serge 12401
		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
4560 Serge 12402
			* pipe_config->pixel_multiplier;
2327 Serge 12403
 
4104 Serge 12404
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12405
	if (ret < 0) {
3031 serge 12406
		DRM_DEBUG_KMS("CRTC fixup failed\n");
12407
		goto fail;
12408
	}
2327 Serge 12409
 
4104 Serge 12410
	if (ret == RETRY) {
12411
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12412
			ret = -EINVAL;
12413
			goto fail;
12414
		}
12415
 
12416
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12417
		retry = false;
12418
		goto encoder_retry;
12419
	}
12420
 
6084 serge 12421
	/* Dithering seems to not pass-through bits correctly when it should, so
12422
	 * only enable it on 6bpc panels. */
12423
	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12424
	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12425
		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
3746 Serge 12426
 
3031 serge 12427
fail:
6084 serge 12428
	return ret;
3031 serge 12429
}
2327 Serge 12430
 
3031 serge 12431
static void
6084 serge 12432
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
3031 serge 12433
{
6084 serge 12434
	struct drm_crtc *crtc;
12435
	struct drm_crtc_state *crtc_state;
12436
	int i;
3031 serge 12437
 
6084 serge 12438
	/* Double check state. */
12439
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12440
		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
3031 serge 12441
 
6084 serge 12442
		/* Update hwmode for vblank functions */
12443
		if (crtc->state->active)
12444
			crtc->hwmode = crtc->state->adjusted_mode;
5060 serge 12445
		else
6084 serge 12446
			crtc->hwmode.crtc_clock = 0;
6937 serge 12447
 
12448
		/*
12449
		 * Update legacy state to satisfy fbc code. This can
12450
		 * be removed when fbc uses the atomic state.
12451
		 */
12452
		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12453
			struct drm_plane_state *plane_state = crtc->primary->state;
12454
 
12455
			crtc->primary->fb = plane_state->fb;
12456
			crtc->x = plane_state->src_x >> 16;
12457
			crtc->y = plane_state->src_y >> 16;
12458
		}
3031 serge 12459
	}
2330 Serge 12460
}
2327 Serge 12461
 
4560 Serge 12462
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 12463
{
4560 Serge 12464
	int diff;
4104 Serge 12465
 
12466
	if (clock1 == clock2)
12467
		return true;
12468
 
12469
	if (!clock1 || !clock2)
12470
		return false;
12471
 
12472
	diff = abs(clock1 - clock2);
12473
 
12474
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12475
		return true;
12476
 
12477
	return false;
12478
}
12479
 
3031 serge 12480
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12481
	list_for_each_entry((intel_crtc), \
12482
			    &(dev)->mode_config.crtc_list, \
12483
			    base.head) \
6937 serge 12484
		for_each_if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 12485
 
3746 Serge 12486
static bool
6084 serge 12487
intel_compare_m_n(unsigned int m, unsigned int n,
12488
		  unsigned int m2, unsigned int n2,
12489
		  bool exact)
12490
{
12491
	if (m == m2 && n == n2)
12492
		return true;
12493
 
12494
	if (exact || !m || !n || !m2 || !n2)
12495
		return false;
12496
 
12497
	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12498
 
7144 serge 12499
	if (n > n2) {
12500
		while (n > n2) {
6084 serge 12501
			m2 <<= 1;
12502
			n2 <<= 1;
12503
		}
7144 serge 12504
	} else if (n < n2) {
12505
		while (n < n2) {
6084 serge 12506
			m <<= 1;
12507
			n <<= 1;
12508
		}
12509
	}
12510
 
7144 serge 12511
	if (n != n2)
12512
		return false;
12513
 
12514
	return intel_fuzzy_clock_check(m, m2);
6084 serge 12515
}
12516
 
12517
static bool
12518
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12519
		       struct intel_link_m_n *m2_n2,
12520
		       bool adjust)
12521
{
12522
	if (m_n->tu == m2_n2->tu &&
12523
	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12524
			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12525
	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12526
			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12527
		if (adjust)
12528
			*m2_n2 = *m_n;
12529
 
12530
		return true;
12531
	}
12532
 
12533
	return false;
12534
}
12535
 
12536
static bool
4104 Serge 12537
intel_pipe_config_compare(struct drm_device *dev,
6084 serge 12538
			  struct intel_crtc_state *current_config,
12539
			  struct intel_crtc_state *pipe_config,
12540
			  bool adjust)
3746 Serge 12541
{
6084 serge 12542
	bool ret = true;
12543
 
12544
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12545
	do { \
12546
		if (!adjust) \
12547
			DRM_ERROR(fmt, ##__VA_ARGS__); \
12548
		else \
12549
			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12550
	} while (0)
12551
 
4104 Serge 12552
#define PIPE_CONF_CHECK_X(name)	\
12553
	if (current_config->name != pipe_config->name) { \
6084 serge 12554
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4104 Serge 12555
			  "(expected 0x%08x, found 0x%08x)\n", \
12556
			  current_config->name, \
12557
			  pipe_config->name); \
6084 serge 12558
		ret = false; \
3746 Serge 12559
	}
12560
 
4104 Serge 12561
#define PIPE_CONF_CHECK_I(name)	\
12562
	if (current_config->name != pipe_config->name) { \
6084 serge 12563
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4104 Serge 12564
			  "(expected %i, found %i)\n", \
12565
			  current_config->name, \
12566
			  pipe_config->name); \
6084 serge 12567
		ret = false; \
4104 Serge 12568
	}
12569
 
6084 serge 12570
#define PIPE_CONF_CHECK_M_N(name) \
12571
	if (!intel_compare_link_m_n(¤t_config->name, \
12572
				    &pipe_config->name,\
12573
				    adjust)) { \
12574
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12575
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12576
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12577
			  current_config->name.tu, \
12578
			  current_config->name.gmch_m, \
12579
			  current_config->name.gmch_n, \
12580
			  current_config->name.link_m, \
12581
			  current_config->name.link_n, \
12582
			  pipe_config->name.tu, \
12583
			  pipe_config->name.gmch_m, \
12584
			  pipe_config->name.gmch_n, \
12585
			  pipe_config->name.link_m, \
12586
			  pipe_config->name.link_n); \
12587
		ret = false; \
12588
	}
12589
 
12590
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12591
	if (!intel_compare_link_m_n(¤t_config->name, \
12592
				    &pipe_config->name, adjust) && \
12593
	    !intel_compare_link_m_n(¤t_config->alt_name, \
12594
				    &pipe_config->name, adjust)) { \
12595
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12596
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12597
			  "or tu %i gmch %i/%i link %i/%i, " \
12598
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12599
			  current_config->name.tu, \
12600
			  current_config->name.gmch_m, \
12601
			  current_config->name.gmch_n, \
12602
			  current_config->name.link_m, \
12603
			  current_config->name.link_n, \
12604
			  current_config->alt_name.tu, \
12605
			  current_config->alt_name.gmch_m, \
12606
			  current_config->alt_name.gmch_n, \
12607
			  current_config->alt_name.link_m, \
12608
			  current_config->alt_name.link_n, \
12609
			  pipe_config->name.tu, \
12610
			  pipe_config->name.gmch_m, \
12611
			  pipe_config->name.gmch_n, \
12612
			  pipe_config->name.link_m, \
12613
			  pipe_config->name.link_n); \
12614
		ret = false; \
12615
	}
12616
 
5354 serge 12617
/* This is required for BDW+ where there is only one set of registers for
12618
 * switching between high and low RR.
12619
 * This macro can be used whenever a comparison has to be made between one
12620
 * hw state and multiple sw state variables.
12621
 */
12622
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12623
	if ((current_config->name != pipe_config->name) && \
12624
		(current_config->alt_name != pipe_config->name)) { \
6084 serge 12625
			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5354 serge 12626
				  "(expected %i or %i, found %i)\n", \
12627
				  current_config->name, \
12628
				  current_config->alt_name, \
12629
				  pipe_config->name); \
6084 serge 12630
			ret = false; \
5354 serge 12631
	}
12632
 
4104 Serge 12633
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12634
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
6084 serge 12635
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
4104 Serge 12636
			  "(expected %i, found %i)\n", \
12637
			  current_config->name & (mask), \
12638
			  pipe_config->name & (mask)); \
6084 serge 12639
		ret = false; \
4104 Serge 12640
	}
12641
 
4560 Serge 12642
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12643
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6084 serge 12644
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4560 Serge 12645
			  "(expected %i, found %i)\n", \
12646
			  current_config->name, \
12647
			  pipe_config->name); \
6084 serge 12648
		ret = false; \
4560 Serge 12649
	}
12650
 
4104 Serge 12651
#define PIPE_CONF_QUIRK(quirk)	\
12652
	((current_config->quirks | pipe_config->quirks) & (quirk))
12653
 
12654
	PIPE_CONF_CHECK_I(cpu_transcoder);
12655
 
12656
	PIPE_CONF_CHECK_I(has_pch_encoder);
12657
	PIPE_CONF_CHECK_I(fdi_lanes);
6084 serge 12658
	PIPE_CONF_CHECK_M_N(fdi_m_n);
4104 Serge 12659
 
4560 Serge 12660
	PIPE_CONF_CHECK_I(has_dp_encoder);
6084 serge 12661
	PIPE_CONF_CHECK_I(lane_count);
5354 serge 12662
 
12663
	if (INTEL_INFO(dev)->gen < 8) {
6084 serge 12664
		PIPE_CONF_CHECK_M_N(dp_m_n);
4560 Serge 12665
 
6084 serge 12666
		if (current_config->has_drrs)
12667
			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12668
	} else
12669
		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
5354 serge 12670
 
6937 serge 12671
	PIPE_CONF_CHECK_I(has_dsi_encoder);
12672
 
6084 serge 12673
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12674
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12675
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12676
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12677
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12678
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
4104 Serge 12679
 
6084 serge 12680
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12681
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12682
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12683
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12684
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12685
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
4104 Serge 12686
 
6084 serge 12687
	PIPE_CONF_CHECK_I(pixel_multiplier);
5060 serge 12688
	PIPE_CONF_CHECK_I(has_hdmi_sink);
12689
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
6937 serge 12690
	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5060 serge 12691
		PIPE_CONF_CHECK_I(limited_color_range);
5354 serge 12692
	PIPE_CONF_CHECK_I(has_infoframe);
4104 Serge 12693
 
5060 serge 12694
	PIPE_CONF_CHECK_I(has_audio);
12695
 
6084 serge 12696
	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12697
			      DRM_MODE_FLAG_INTERLACE);
12698
 
12699
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6084 serge 12700
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12701
				      DRM_MODE_FLAG_PHSYNC);
6084 serge 12702
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12703
				      DRM_MODE_FLAG_NHSYNC);
6084 serge 12704
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12705
				      DRM_MODE_FLAG_PVSYNC);
6084 serge 12706
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12707
				      DRM_MODE_FLAG_NVSYNC);
12708
	}
12709
 
6084 serge 12710
	PIPE_CONF_CHECK_X(gmch_pfit.control);
4104 Serge 12711
	/* pfit ratios are autocomputed by the hw on gen4+ */
12712
	if (INTEL_INFO(dev)->gen < 4)
12713
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
6084 serge 12714
	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5060 serge 12715
 
6084 serge 12716
	if (!adjust) {
12717
		PIPE_CONF_CHECK_I(pipe_src_w);
12718
		PIPE_CONF_CHECK_I(pipe_src_h);
12719
 
12720
		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12721
		if (current_config->pch_pfit.enabled) {
12722
			PIPE_CONF_CHECK_X(pch_pfit.pos);
12723
			PIPE_CONF_CHECK_X(pch_pfit.size);
12724
		}
12725
 
12726
		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
4104 Serge 12727
	}
12728
 
4560 Serge 12729
	/* BDW+ don't expose a synchronous way to read the state */
12730
	if (IS_HASWELL(dev))
6084 serge 12731
		PIPE_CONF_CHECK_I(ips_enabled);
4104 Serge 12732
 
4560 Serge 12733
	PIPE_CONF_CHECK_I(double_wide);
12734
 
5060 serge 12735
	PIPE_CONF_CHECK_X(ddi_pll_sel);
12736
 
4104 Serge 12737
	PIPE_CONF_CHECK_I(shared_dpll);
12738
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12739
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12740
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12741
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5060 serge 12742
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6084 serge 12743
	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5354 serge 12744
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12745
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12746
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
4104 Serge 12747
 
4280 Serge 12748
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12749
		PIPE_CONF_CHECK_I(pipe_bpp);
12750
 
6084 serge 12751
	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12752
	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
4560 Serge 12753
 
4104 Serge 12754
#undef PIPE_CONF_CHECK_X
12755
#undef PIPE_CONF_CHECK_I
5354 serge 12756
#undef PIPE_CONF_CHECK_I_ALT
4104 Serge 12757
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 12758
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 12759
#undef PIPE_CONF_QUIRK
6084 serge 12760
#undef INTEL_ERR_OR_DBG_KMS
4104 Serge 12761
 
6084 serge 12762
	return ret;
3746 Serge 12763
}
12764
 
5354 serge 12765
static void check_wm_state(struct drm_device *dev)
12766
{
12767
	struct drm_i915_private *dev_priv = dev->dev_private;
12768
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12769
	struct intel_crtc *intel_crtc;
12770
	int plane;
12771
 
12772
	if (INTEL_INFO(dev)->gen < 9)
12773
		return;
12774
 
12775
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12776
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12777
 
12778
	for_each_intel_crtc(dev, intel_crtc) {
12779
		struct skl_ddb_entry *hw_entry, *sw_entry;
12780
		const enum pipe pipe = intel_crtc->pipe;
12781
 
12782
		if (!intel_crtc->active)
12783
			continue;
12784
 
12785
		/* planes */
6084 serge 12786
		for_each_plane(dev_priv, pipe, plane) {
5354 serge 12787
			hw_entry = &hw_ddb.plane[pipe][plane];
12788
			sw_entry = &sw_ddb->plane[pipe][plane];
12789
 
12790
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
12791
				continue;
12792
 
12793
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12794
				  "(expected (%u,%u), found (%u,%u))\n",
12795
				  pipe_name(pipe), plane + 1,
12796
				  sw_entry->start, sw_entry->end,
12797
				  hw_entry->start, hw_entry->end);
12798
		}
12799
 
12800
		/* cursor */
6084 serge 12801
		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12802
		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
5354 serge 12803
 
12804
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12805
			continue;
12806
 
12807
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12808
			  "(expected (%u,%u), found (%u,%u))\n",
12809
			  pipe_name(pipe),
12810
			  sw_entry->start, sw_entry->end,
12811
			  hw_entry->start, hw_entry->end);
12812
	}
12813
}
12814
 
4104 Serge 12815
static void
6084 serge 12816
check_connector_state(struct drm_device *dev,
12817
		      struct drm_atomic_state *old_state)
3031 serge 12818
{
6084 serge 12819
	struct drm_connector_state *old_conn_state;
12820
	struct drm_connector *connector;
12821
	int i;
3031 serge 12822
 
6084 serge 12823
	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12824
		struct drm_encoder *encoder = connector->encoder;
12825
		struct drm_connector_state *state = connector->state;
12826
 
3031 serge 12827
		/* This also checks the encoder/connector hw state with the
12828
		 * ->get_hw_state callbacks. */
6084 serge 12829
		intel_connector_check_state(to_intel_connector(connector));
3031 serge 12830
 
6084 serge 12831
		I915_STATE_WARN(state->best_encoder != encoder,
12832
		     "connector's atomic encoder doesn't match legacy encoder\n");
3031 serge 12833
	}
4104 Serge 12834
}
3031 serge 12835
 
4104 Serge 12836
static void
12837
check_encoder_state(struct drm_device *dev)
12838
{
12839
	struct intel_encoder *encoder;
12840
	struct intel_connector *connector;
12841
 
5354 serge 12842
	for_each_intel_encoder(dev, encoder) {
3031 serge 12843
		bool enabled = false;
6084 serge 12844
		enum pipe pipe;
3031 serge 12845
 
12846
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12847
			      encoder->base.base.id,
5060 serge 12848
			      encoder->base.name);
3031 serge 12849
 
6084 serge 12850
		for_each_intel_connector(dev, connector) {
12851
			if (connector->base.state->best_encoder != &encoder->base)
3031 serge 12852
				continue;
12853
			enabled = true;
6084 serge 12854
 
12855
			I915_STATE_WARN(connector->base.state->crtc !=
12856
					encoder->base.crtc,
12857
			     "connector's crtc doesn't match encoder crtc\n");
3031 serge 12858
		}
5060 serge 12859
 
6084 serge 12860
		I915_STATE_WARN(!!encoder->base.crtc != enabled,
3031 serge 12861
		     "encoder's enabled state mismatch "
12862
		     "(expected %i, found %i)\n",
12863
		     !!encoder->base.crtc, enabled);
12864
 
6084 serge 12865
		if (!encoder->base.crtc) {
12866
			bool active;
3031 serge 12867
 
6084 serge 12868
			active = encoder->get_hw_state(encoder, &pipe);
12869
			I915_STATE_WARN(active,
12870
			     "encoder detached but still enabled on pipe %c.\n",
12871
			     pipe_name(pipe));
12872
		}
3031 serge 12873
	}
4104 Serge 12874
}
3031 serge 12875
 
4104 Serge 12876
static void
6084 serge 12877
check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
4104 Serge 12878
{
5060 serge 12879
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 12880
	struct intel_encoder *encoder;
6084 serge 12881
	struct drm_crtc_state *old_crtc_state;
12882
	struct drm_crtc *crtc;
12883
	int i;
4104 Serge 12884
 
6084 serge 12885
	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12886
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12887
		struct intel_crtc_state *pipe_config, *sw_config;
12888
		bool active;
3031 serge 12889
 
6084 serge 12890
		if (!needs_modeset(crtc->state) &&
12891
		    !to_intel_crtc_state(crtc->state)->update_pipe)
12892
			continue;
4104 Serge 12893
 
6084 serge 12894
		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12895
		pipe_config = to_intel_crtc_state(old_crtc_state);
12896
		memset(pipe_config, 0, sizeof(*pipe_config));
12897
		pipe_config->base.crtc = crtc;
12898
		pipe_config->base.state = old_state;
12899
 
3031 serge 12900
		DRM_DEBUG_KMS("[CRTC:%d]\n",
6084 serge 12901
			      crtc->base.id);
3031 serge 12902
 
6084 serge 12903
		active = dev_priv->display.get_pipe_config(intel_crtc,
12904
							   pipe_config);
3031 serge 12905
 
6084 serge 12906
		/* hw state is inconsistent with the pipe quirk */
12907
		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12908
		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12909
			active = crtc->state->active;
4104 Serge 12910
 
6084 serge 12911
		I915_STATE_WARN(crtc->state->active != active,
12912
		     "crtc active state doesn't match with hw state "
12913
		     "(expected %i, found %i)\n", crtc->state->active, active);
3031 serge 12914
 
6084 serge 12915
		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12916
		     "transitional active state does not match atomic hw state "
12917
		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
3746 Serge 12918
 
6084 serge 12919
		for_each_encoder_on_crtc(dev, crtc, encoder) {
12920
			enum pipe pipe;
3746 Serge 12921
 
6084 serge 12922
			active = encoder->get_hw_state(encoder, &pipe);
12923
			I915_STATE_WARN(active != crtc->state->active,
12924
				"[ENCODER:%i] active %i with crtc active %i\n",
12925
				encoder->base.base.id, active, crtc->state->active);
12926
 
12927
			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12928
					"Encoder connected to wrong pipe %c\n",
12929
					pipe_name(pipe));
12930
 
12931
			if (active)
12932
				encoder->get_config(encoder, pipe_config);
4104 Serge 12933
		}
12934
 
6084 serge 12935
		if (!crtc->state->active)
12936
			continue;
3746 Serge 12937
 
6084 serge 12938
		sw_config = to_intel_crtc_state(crtc->state);
12939
		if (!intel_pipe_config_compare(dev, sw_config,
12940
					       pipe_config, false)) {
12941
			I915_STATE_WARN(1, "pipe state doesn't match!\n");
12942
			intel_dump_pipe_config(intel_crtc, pipe_config,
4104 Serge 12943
					       "[hw state]");
6084 serge 12944
			intel_dump_pipe_config(intel_crtc, sw_config,
4104 Serge 12945
					       "[sw state]");
12946
		}
3031 serge 12947
	}
12948
}
12949
 
4104 Serge 12950
static void
12951
check_shared_dpll_state(struct drm_device *dev)
12952
{
5060 serge 12953
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 12954
	struct intel_crtc *crtc;
12955
	struct intel_dpll_hw_state dpll_hw_state;
12956
	int i;
12957
 
12958
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12959
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12960
		int enabled_crtcs = 0, active_crtcs = 0;
12961
		bool active;
12962
 
12963
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12964
 
12965
		DRM_DEBUG_KMS("%s\n", pll->name);
12966
 
12967
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12968
 
6084 serge 12969
		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
4104 Serge 12970
		     "more active pll users than references: %i vs %i\n",
5354 serge 12971
		     pll->active, hweight32(pll->config.crtc_mask));
6084 serge 12972
		I915_STATE_WARN(pll->active && !pll->on,
4104 Serge 12973
		     "pll in active use but not on in sw tracking\n");
6084 serge 12974
		I915_STATE_WARN(pll->on && !pll->active,
4104 Serge 12975
		     "pll in on but not on in use in sw tracking\n");
6084 serge 12976
		I915_STATE_WARN(pll->on != active,
4104 Serge 12977
		     "pll on state mismatch (expected %i, found %i)\n",
12978
		     pll->on, active);
12979
 
5060 serge 12980
		for_each_intel_crtc(dev, crtc) {
6084 serge 12981
			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
4104 Serge 12982
				enabled_crtcs++;
12983
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12984
				active_crtcs++;
12985
		}
6084 serge 12986
		I915_STATE_WARN(pll->active != active_crtcs,
4104 Serge 12987
		     "pll active crtcs mismatch (expected %i, found %i)\n",
12988
		     pll->active, active_crtcs);
6084 serge 12989
		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
4104 Serge 12990
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
5354 serge 12991
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
4104 Serge 12992
 
6084 serge 12993
		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
4104 Serge 12994
				       sizeof(dpll_hw_state)),
12995
		     "pll hw state mismatch\n");
12996
	}
12997
}
12998
 
6084 serge 12999
static void
13000
intel_modeset_check_state(struct drm_device *dev,
13001
			  struct drm_atomic_state *old_state)
4104 Serge 13002
{
5354 serge 13003
	check_wm_state(dev);
6084 serge 13004
	check_connector_state(dev, old_state);
4104 Serge 13005
	check_encoder_state(dev);
6084 serge 13006
	check_crtc_state(dev, old_state);
4104 Serge 13007
	check_shared_dpll_state(dev);
13008
}
13009
 
6084 serge 13010
void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
4560 Serge 13011
				     int dotclock)
13012
{
13013
	/*
13014
	 * FDI already provided one idea for the dotclock.
13015
	 * Yell if the encoder disagrees.
13016
	 */
6084 serge 13017
	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
4560 Serge 13018
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6084 serge 13019
	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
4560 Serge 13020
}
13021
 
5060 serge 13022
static void update_scanline_offset(struct intel_crtc *crtc)
13023
{
13024
	struct drm_device *dev = crtc->base.dev;
13025
 
13026
	/*
13027
	 * The scanline counter increments at the leading edge of hsync.
13028
	 *
13029
	 * On most platforms it starts counting from vtotal-1 on the
13030
	 * first active line. That means the scanline counter value is
13031
	 * always one less than what we would expect. Ie. just after
13032
	 * start of vblank, which also occurs at start of hsync (on the
13033
	 * last active line), the scanline counter will read vblank_start-1.
13034
	 *
13035
	 * On gen2 the scanline counter starts counting from 1 instead
13036
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13037
	 * to keep the value positive), instead of adding one.
13038
	 *
13039
	 * On HSW+ the behaviour of the scanline counter depends on the output
13040
	 * type. For DP ports it behaves like most other platforms, but on HDMI
13041
	 * there's an extra 1 line difference. So we need to add two instead of
13042
	 * one to the value.
13043
	 */
13044
	if (IS_GEN2(dev)) {
6084 serge 13045
		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
5060 serge 13046
		int vtotal;
13047
 
6084 serge 13048
		vtotal = adjusted_mode->crtc_vtotal;
13049
		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5060 serge 13050
			vtotal /= 2;
13051
 
13052
		crtc->scanline_offset = vtotal - 1;
13053
	} else if (HAS_DDI(dev) &&
5354 serge 13054
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
5060 serge 13055
		crtc->scanline_offset = 2;
13056
	} else
13057
		crtc->scanline_offset = 1;
13058
}
13059
 
6084 serge 13060
static void intel_modeset_clear_plls(struct drm_atomic_state *state)
5354 serge 13061
{
6084 serge 13062
	struct drm_device *dev = state->dev;
13063
	struct drm_i915_private *dev_priv = to_i915(dev);
13064
	struct intel_shared_dpll_config *shared_dpll = NULL;
13065
	struct drm_crtc *crtc;
13066
	struct drm_crtc_state *crtc_state;
13067
	int i;
5354 serge 13068
 
6084 serge 13069
	if (!dev_priv->display.crtc_compute_clock)
13070
		return;
5354 serge 13071
 
6084 serge 13072
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
7144 serge 13073
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13074
		int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
5354 serge 13075
 
7144 serge 13076
		if (!needs_modeset(crtc_state))
13077
			continue;
6084 serge 13078
 
7144 serge 13079
		to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
13080
 
13081
		if (old_dpll == DPLL_ID_PRIVATE)
6084 serge 13082
			continue;
13083
 
13084
		if (!shared_dpll)
13085
			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13086
 
7144 serge 13087
		shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
5354 serge 13088
	}
13089
}
13090
 
6084 serge 13091
/*
13092
 * This implements the workaround described in the "notes" section of the mode
13093
 * set sequence documentation. When going from no pipes or single pipe to
13094
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13095
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13096
 */
13097
static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
3031 serge 13098
{
6084 serge 13099
	struct drm_crtc_state *crtc_state;
3031 serge 13100
	struct intel_crtc *intel_crtc;
6084 serge 13101
	struct drm_crtc *crtc;
13102
	struct intel_crtc_state *first_crtc_state = NULL;
13103
	struct intel_crtc_state *other_crtc_state = NULL;
13104
	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13105
	int i;
3031 serge 13106
 
6084 serge 13107
	/* look at all crtc's that are going to be enabled in during modeset */
13108
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13109
		intel_crtc = to_intel_crtc(crtc);
3480 Serge 13110
 
6084 serge 13111
		if (!crtc_state->active || !needs_modeset(crtc_state))
13112
			continue;
3031 serge 13113
 
6084 serge 13114
		if (first_crtc_state) {
13115
			other_crtc_state = to_intel_crtc_state(crtc_state);
13116
			break;
13117
		} else {
13118
			first_crtc_state = to_intel_crtc_state(crtc_state);
13119
			first_pipe = intel_crtc->pipe;
13120
		}
13121
	}
3031 serge 13122
 
6084 serge 13123
	/* No workaround needed? */
13124
	if (!first_crtc_state)
13125
		return 0;
4560 Serge 13126
 
6084 serge 13127
	/* w/a possibly needed, check how many crtc's are already enabled. */
13128
	for_each_intel_crtc(state->dev, intel_crtc) {
13129
		struct intel_crtc_state *pipe_config;
4560 Serge 13130
 
6084 serge 13131
		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13132
		if (IS_ERR(pipe_config))
13133
			return PTR_ERR(pipe_config);
5354 serge 13134
 
6084 serge 13135
		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
5354 serge 13136
 
6084 serge 13137
		if (!pipe_config->base.active ||
13138
		    needs_modeset(&pipe_config->base))
13139
			continue;
5354 serge 13140
 
6084 serge 13141
		/* 2 or more enabled crtcs means no need for w/a */
13142
		if (enabled_pipe != INVALID_PIPE)
13143
			return 0;
3746 Serge 13144
 
6084 serge 13145
		enabled_pipe = intel_crtc->pipe;
3031 serge 13146
	}
13147
 
6084 serge 13148
	if (enabled_pipe != INVALID_PIPE)
13149
		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13150
	else if (other_crtc_state)
13151
		other_crtc_state->hsw_workaround_pipe = first_pipe;
4560 Serge 13152
 
6084 serge 13153
	return 0;
13154
}
2327 Serge 13155
 
6084 serge 13156
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13157
{
13158
	struct drm_crtc *crtc;
13159
	struct drm_crtc_state *crtc_state;
13160
	int ret = 0;
3031 serge 13161
 
6084 serge 13162
	/* add all active pipes to the state */
13163
	for_each_crtc(state->dev, crtc) {
13164
		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13165
		if (IS_ERR(crtc_state))
13166
			return PTR_ERR(crtc_state);
3243 Serge 13167
 
6084 serge 13168
		if (!crtc_state->active || needs_modeset(crtc_state))
13169
			continue;
5060 serge 13170
 
6084 serge 13171
		crtc_state->mode_changed = true;
5060 serge 13172
 
6084 serge 13173
		ret = drm_atomic_add_affected_connectors(state, crtc);
13174
		if (ret)
13175
			break;
3031 serge 13176
 
6084 serge 13177
		ret = drm_atomic_add_affected_planes(state, crtc);
13178
		if (ret)
13179
			break;
5060 serge 13180
	}
3031 serge 13181
 
13182
	return ret;
2330 Serge 13183
}
2327 Serge 13184
 
6084 serge 13185
static int intel_modeset_checks(struct drm_atomic_state *state)
3746 Serge 13186
{
7144 serge 13187
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13188
	struct drm_i915_private *dev_priv = state->dev->dev_private;
13189
	struct drm_crtc *crtc;
13190
	struct drm_crtc_state *crtc_state;
13191
	int ret = 0, i;
3746 Serge 13192
 
6084 serge 13193
	if (!check_digital_port_conflicts(state)) {
13194
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13195
		return -EINVAL;
13196
	}
3746 Serge 13197
 
7144 serge 13198
	intel_state->modeset = true;
13199
	intel_state->active_crtcs = dev_priv->active_crtcs;
13200
 
13201
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13202
		if (crtc_state->active)
13203
			intel_state->active_crtcs |= 1 << i;
13204
		else
13205
			intel_state->active_crtcs &= ~(1 << i);
13206
	}
13207
 
6084 serge 13208
	/*
13209
	 * See if the config requires any additional preparation, e.g.
13210
	 * to adjust global state with pipes off.  We need to do this
13211
	 * here so we can get the modeset_pipe updated config for the new
13212
	 * mode set on this crtc.  For other crtcs we need to use the
13213
	 * adjusted_mode bits in the crtc directly.
13214
	 */
13215
	if (dev_priv->display.modeset_calc_cdclk) {
13216
		ret = dev_priv->display.modeset_calc_cdclk(state);
3746 Serge 13217
 
7144 serge 13218
		if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
6084 serge 13219
			ret = intel_modeset_all_pipes(state);
5354 serge 13220
 
6084 serge 13221
		if (ret < 0)
13222
			return ret;
7144 serge 13223
 
13224
		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13225
			      intel_state->cdclk, intel_state->dev_cdclk);
6084 serge 13226
	} else
7144 serge 13227
		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
5354 serge 13228
 
6084 serge 13229
	intel_modeset_clear_plls(state);
5354 serge 13230
 
7144 serge 13231
	if (IS_HASWELL(dev_priv))
6084 serge 13232
		return haswell_mode_set_planes_workaround(state);
5354 serge 13233
 
6084 serge 13234
	return 0;
3480 Serge 13235
}
13236
 
6937 serge 13237
/*
13238
 * Handle calculation of various watermark data at the end of the atomic check
13239
 * phase.  The code here should be run after the per-crtc and per-plane 'check'
13240
 * handlers to ensure that all derived state has been updated.
13241
 */
13242
static void calc_watermark_data(struct drm_atomic_state *state)
13243
{
13244
	struct drm_device *dev = state->dev;
13245
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13246
	struct drm_crtc *crtc;
13247
	struct drm_crtc_state *cstate;
13248
	struct drm_plane *plane;
13249
	struct drm_plane_state *pstate;
13250
 
13251
	/*
13252
	 * Calculate watermark configuration details now that derived
13253
	 * plane/crtc state is all properly updated.
13254
	 */
13255
	drm_for_each_crtc(crtc, dev) {
13256
		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13257
			crtc->state;
13258
 
13259
		if (cstate->active)
13260
			intel_state->wm_config.num_pipes_active++;
13261
	}
13262
	drm_for_each_legacy_plane(plane, dev) {
13263
		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13264
			plane->state;
13265
 
13266
		if (!to_intel_plane_state(pstate)->visible)
13267
			continue;
13268
 
13269
		intel_state->wm_config.sprites_enabled = true;
13270
		if (pstate->crtc_w != pstate->src_w >> 16 ||
13271
		    pstate->crtc_h != pstate->src_h >> 16)
13272
			intel_state->wm_config.sprites_scaled = true;
13273
	}
13274
}
13275
 
6084 serge 13276
/**
13277
 * intel_atomic_check - validate state object
13278
 * @dev: drm device
13279
 * @state: state to validate
13280
 */
13281
static int intel_atomic_check(struct drm_device *dev,
13282
			      struct drm_atomic_state *state)
3031 serge 13283
{
7144 serge 13284
	struct drm_i915_private *dev_priv = to_i915(dev);
6937 serge 13285
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6084 serge 13286
	struct drm_crtc *crtc;
13287
	struct drm_crtc_state *crtc_state;
13288
	int ret, i;
13289
	bool any_ms = false;
3031 serge 13290
 
6084 serge 13291
	ret = drm_atomic_helper_check_modeset(dev, state);
13292
	if (ret)
13293
		return ret;
3031 serge 13294
 
6084 serge 13295
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13296
		struct intel_crtc_state *pipe_config =
13297
			to_intel_crtc_state(crtc_state);
3031 serge 13298
 
6084 serge 13299
		memset(&to_intel_crtc(crtc)->atomic, 0,
13300
		       sizeof(struct intel_crtc_atomic_commit));
5060 serge 13301
 
6084 serge 13302
		/* Catch I915_MODE_FLAG_INHERITED */
13303
		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13304
			crtc_state->mode_changed = true;
3031 serge 13305
 
6084 serge 13306
		if (!crtc_state->enable) {
13307
			if (needs_modeset(crtc_state))
13308
				any_ms = true;
13309
			continue;
13310
		}
3031 serge 13311
 
6084 serge 13312
		if (!needs_modeset(crtc_state))
13313
			continue;
5060 serge 13314
 
6084 serge 13315
		/* FIXME: For only active_changed we shouldn't need to do any
13316
		 * state recomputation at all. */
3031 serge 13317
 
6084 serge 13318
		ret = drm_atomic_add_affected_connectors(state, crtc);
13319
		if (ret)
13320
			return ret;
3031 serge 13321
 
6084 serge 13322
		ret = intel_modeset_pipe_config(crtc, pipe_config);
13323
		if (ret)
13324
			return ret;
3031 serge 13325
 
6084 serge 13326
		if (i915.fastboot &&
7144 serge 13327
		    intel_pipe_config_compare(dev,
6084 serge 13328
					to_intel_crtc_state(crtc->state),
13329
					pipe_config, true)) {
13330
			crtc_state->mode_changed = false;
13331
			to_intel_crtc_state(crtc_state)->update_pipe = true;
13332
		}
3031 serge 13333
 
6084 serge 13334
		if (needs_modeset(crtc_state)) {
13335
			any_ms = true;
5060 serge 13336
 
6084 serge 13337
			ret = drm_atomic_add_affected_planes(state, crtc);
13338
			if (ret)
13339
				return ret;
13340
		}
5060 serge 13341
 
6084 serge 13342
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13343
				       needs_modeset(crtc_state) ?
13344
				       "[modeset]" : "[fastset]");
3031 serge 13345
	}
13346
 
6084 serge 13347
	if (any_ms) {
13348
		ret = intel_modeset_checks(state);
3031 serge 13349
 
6084 serge 13350
		if (ret)
13351
			return ret;
13352
	} else
7144 serge 13353
		intel_state->cdclk = dev_priv->cdclk_freq;
3746 Serge 13354
 
7144 serge 13355
	ret = drm_atomic_helper_check_planes(dev, state);
6937 serge 13356
	if (ret)
13357
		return ret;
13358
 
7144 serge 13359
	intel_fbc_choose_crtc(dev_priv, state);
6937 serge 13360
	calc_watermark_data(state);
13361
 
13362
	return 0;
3746 Serge 13363
}
13364
 
6937 serge 13365
static int intel_atomic_prepare_commit(struct drm_device *dev,
13366
				       struct drm_atomic_state *state,
13367
				       bool async)
13368
{
13369
	struct drm_i915_private *dev_priv = dev->dev_private;
13370
	struct drm_plane_state *plane_state;
13371
	struct drm_crtc_state *crtc_state;
13372
	struct drm_plane *plane;
13373
	struct drm_crtc *crtc;
13374
	int i, ret;
13375
 
13376
	if (async) {
13377
		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13378
		return -EINVAL;
13379
	}
13380
 
13381
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13382
		if (state->legacy_cursor_update)
13383
			continue;
13384
 
13385
		ret = intel_crtc_wait_for_pending_flips(crtc);
13386
		if (ret)
13387
			return ret;
13388
 
13389
//		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13390
//			flush_workqueue(dev_priv->wq);
13391
	}
13392
 
13393
	ret = mutex_lock_interruptible(&dev->struct_mutex);
13394
	if (ret)
13395
		return ret;
13396
 
13397
	ret = drm_atomic_helper_prepare_planes(dev, state);
13398
	if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13399
		u32 reset_counter;
13400
 
13401
		reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13402
		mutex_unlock(&dev->struct_mutex);
13403
 
13404
		for_each_plane_in_state(state, plane, plane_state, i) {
13405
			struct intel_plane_state *intel_plane_state =
13406
				to_intel_plane_state(plane_state);
13407
 
13408
			if (!intel_plane_state->wait_req)
13409
				continue;
13410
 
13411
			ret = __i915_wait_request(intel_plane_state->wait_req,
13412
						  reset_counter, true,
13413
						  NULL, NULL);
13414
 
13415
			/* Swallow -EIO errors to allow updates during hw lockup. */
13416
			if (ret == -EIO)
13417
				ret = 0;
13418
 
13419
			if (ret)
13420
				break;
13421
		}
13422
 
13423
		if (!ret)
13424
			return 0;
13425
 
13426
		mutex_lock(&dev->struct_mutex);
13427
		drm_atomic_helper_cleanup_planes(dev, state);
13428
	}
13429
 
13430
	mutex_unlock(&dev->struct_mutex);
13431
	return ret;
13432
}
13433
 
7144 serge 13434
static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13435
					  struct drm_i915_private *dev_priv,
13436
					  unsigned crtc_mask)
13437
{
13438
	unsigned last_vblank_count[I915_MAX_PIPES];
13439
	enum pipe pipe;
13440
	int ret;
13441
 
13442
	if (!crtc_mask)
13443
		return;
13444
 
13445
	for_each_pipe(dev_priv, pipe) {
13446
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13447
 
13448
		if (!((1 << pipe) & crtc_mask))
13449
			continue;
13450
 
13451
		ret = drm_crtc_vblank_get(crtc);
13452
		if (WARN_ON(ret != 0)) {
13453
			crtc_mask &= ~(1 << pipe);
13454
			continue;
13455
		}
13456
 
13457
		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13458
	}
13459
 
13460
	for_each_pipe(dev_priv, pipe) {
13461
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13462
		long lret;
13463
 
13464
		if (!((1 << pipe) & crtc_mask))
13465
			continue;
13466
 
13467
		lret = wait_event_timeout(dev->vblank[pipe].queue,
13468
				last_vblank_count[pipe] !=
13469
					drm_crtc_vblank_count(crtc),
13470
				msecs_to_jiffies(50));
13471
 
13472
		WARN_ON(!lret);
13473
 
13474
		drm_crtc_vblank_put(crtc);
13475
	}
13476
}
13477
 
13478
static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13479
{
13480
	/* fb updated, need to unpin old fb */
13481
	if (crtc_state->fb_changed)
13482
		return true;
13483
 
13484
	/* wm changes, need vblank before final wm's */
13485
	if (crtc_state->update_wm_post)
13486
		return true;
13487
 
13488
	/*
13489
	 * cxsr is re-enabled after vblank.
13490
	 * This is already handled by crtc_state->update_wm_post,
13491
	 * but added for clarity.
13492
	 */
13493
	if (crtc_state->disable_cxsr)
13494
		return true;
13495
 
13496
	return false;
13497
}
13498
 
6084 serge 13499
/**
13500
 * intel_atomic_commit - commit validated state object
13501
 * @dev: DRM device
13502
 * @state: the top-level driver state object
13503
 * @async: asynchronous commit
13504
 *
13505
 * This function commits a top-level state object that has been validated
13506
 * with drm_atomic_helper_check().
13507
 *
13508
 * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13509
 * we can only handle plane-related operations and do not yet support
13510
 * asynchronous commit.
13511
 *
13512
 * RETURNS
13513
 * Zero for success or -errno.
13514
 */
13515
static int intel_atomic_commit(struct drm_device *dev,
13516
			       struct drm_atomic_state *state,
13517
			       bool async)
3031 serge 13518
{
7144 serge 13519
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6084 serge 13520
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 13521
	struct drm_crtc_state *crtc_state;
6084 serge 13522
	struct drm_crtc *crtc;
7144 serge 13523
	int ret = 0, i;
13524
	bool hw_check = intel_state->modeset;
13525
	unsigned long put_domains[I915_MAX_PIPES] = {};
13526
	unsigned crtc_vblank_mask = 0;
3031 serge 13527
 
6937 serge 13528
	ret = intel_atomic_prepare_commit(dev, state, async);
13529
	if (ret) {
13530
		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13531
		return ret;
3031 serge 13532
	}
13533
 
6084 serge 13534
	drm_atomic_helper_swap_state(dev, state);
6937 serge 13535
	dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
4104 Serge 13536
 
7144 serge 13537
	if (intel_state->modeset) {
13538
		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13539
		       sizeof(intel_state->min_pixclk));
13540
		dev_priv->active_crtcs = intel_state->active_crtcs;
13541
		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13542
 
13543
		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13544
	}
13545
 
6084 serge 13546
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13547
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 13548
 
7144 serge 13549
		if (needs_modeset(crtc->state) ||
13550
		    to_intel_crtc_state(crtc->state)->update_pipe) {
13551
			hw_check = true;
13552
 
13553
			put_domains[to_intel_crtc(crtc)->pipe] =
13554
				modeset_get_crtc_power_domains(crtc,
13555
					to_intel_crtc_state(crtc->state));
13556
		}
13557
 
6084 serge 13558
		if (!needs_modeset(crtc->state))
13559
			continue;
3031 serge 13560
 
7144 serge 13561
		intel_pre_plane_update(to_intel_crtc_state(crtc_state));
3031 serge 13562
 
6084 serge 13563
		if (crtc_state->active) {
13564
			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13565
			dev_priv->display.crtc_disable(crtc);
13566
			intel_crtc->active = false;
7144 serge 13567
			intel_fbc_disable(intel_crtc);
6084 serge 13568
			intel_disable_shared_dpll(intel_crtc);
6937 serge 13569
 
13570
			/*
13571
			 * Underruns don't always raise
13572
			 * interrupts, so check manually.
13573
			 */
13574
			intel_check_cpu_fifo_underruns(dev_priv);
13575
			intel_check_pch_fifo_underruns(dev_priv);
13576
 
13577
			if (!crtc->state->active)
13578
				intel_update_watermarks(crtc);
3031 serge 13579
		}
6084 serge 13580
	}
3031 serge 13581
 
6084 serge 13582
	/* Only after disabling all output pipelines that will be changed can we
13583
	 * update the the output configuration. */
13584
	intel_modeset_update_crtc_state(state);
3031 serge 13585
 
7144 serge 13586
	if (intel_state->modeset) {
6084 serge 13587
		intel_shared_dpll_commit(state);
3031 serge 13588
 
6084 serge 13589
		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
7144 serge 13590
 
13591
		if (dev_priv->display.modeset_commit_cdclk &&
13592
		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
13593
			dev_priv->display.modeset_commit_cdclk(state);
3031 serge 13594
	}
13595
 
6084 serge 13596
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13597
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13598
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13599
		bool modeset = needs_modeset(crtc->state);
7144 serge 13600
		struct intel_crtc_state *pipe_config =
13601
			to_intel_crtc_state(crtc->state);
13602
		bool update_pipe = !modeset && pipe_config->update_pipe;
5060 serge 13603
 
6084 serge 13604
		if (modeset && crtc->state->active) {
13605
			update_scanline_offset(to_intel_crtc(crtc));
13606
			dev_priv->display.crtc_enable(crtc);
3031 serge 13607
		}
13608
 
7144 serge 13609
		if (!modeset)
13610
			intel_pre_plane_update(to_intel_crtc_state(crtc_state));
3031 serge 13611
 
7144 serge 13612
		if (crtc->state->active && intel_crtc->atomic.update_fbc)
13613
			intel_fbc_enable(intel_crtc);
4560 Serge 13614
 
6937 serge 13615
		if (crtc->state->active &&
13616
		    (crtc->state->planes_changed || update_pipe))
7144 serge 13617
			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
3031 serge 13618
 
7144 serge 13619
		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13620
			crtc_vblank_mask |= 1 << i;
13621
	}
5060 serge 13622
 
7144 serge 13623
	/* FIXME: add subpixel order */
6937 serge 13624
 
7144 serge 13625
	if (!state->legacy_cursor_update)
13626
		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13627
 
13628
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13629
		intel_post_plane_update(to_intel_crtc(crtc));
13630
 
13631
		if (put_domains[i])
13632
			modeset_put_power_domains(dev_priv, put_domains[i]);
5060 serge 13633
	}
13634
 
7144 serge 13635
	if (intel_state->modeset)
13636
		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
3031 serge 13637
 
6937 serge 13638
	mutex_lock(&dev->struct_mutex);
6084 serge 13639
	drm_atomic_helper_cleanup_planes(dev, state);
6937 serge 13640
	mutex_unlock(&dev->struct_mutex);
5060 serge 13641
 
7144 serge 13642
	if (hw_check)
6084 serge 13643
		intel_modeset_check_state(dev, state);
5060 serge 13644
 
6084 serge 13645
	drm_atomic_state_free(state);
5060 serge 13646
 
7144 serge 13647
	/* As one of the primary mmio accessors, KMS has a high likelihood
13648
	 * of triggering bugs in unclaimed access. After we finish
13649
	 * modesetting, see if an error has been flagged, and if so
13650
	 * enable debugging for the next modeset - and hope we catch
13651
	 * the culprit.
13652
	 *
13653
	 * XXX note that we assume display power is on at this point.
13654
	 * This might hold true now but we need to add pm helper to check
13655
	 * unclaimed only when the hardware is on, as atomic commits
13656
	 * can happen also when the device is completely off.
13657
	 */
13658
	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13659
 
6084 serge 13660
	return 0;
5060 serge 13661
}
13662
 
6084 serge 13663
void intel_crtc_restore_mode(struct drm_crtc *crtc)
3031 serge 13664
{
6084 serge 13665
	struct drm_device *dev = crtc->dev;
13666
	struct drm_atomic_state *state;
13667
	struct drm_crtc_state *crtc_state;
3031 serge 13668
	int ret;
13669
 
6084 serge 13670
	state = drm_atomic_state_alloc(dev);
13671
	if (!state) {
13672
		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13673
			      crtc->base.id);
13674
		return;
3031 serge 13675
	}
13676
 
6084 serge 13677
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
3031 serge 13678
 
6084 serge 13679
retry:
13680
	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13681
	ret = PTR_ERR_OR_ZERO(crtc_state);
13682
	if (!ret) {
13683
		if (!crtc_state->active)
13684
			goto out;
3031 serge 13685
 
6084 serge 13686
		crtc_state->mode_changed = true;
13687
		ret = drm_atomic_commit(state);
5354 serge 13688
	}
13689
 
6084 serge 13690
	if (ret == -EDEADLK) {
13691
		drm_atomic_state_clear(state);
13692
		drm_modeset_backoff(state->acquire_ctx);
13693
		goto retry;
3031 serge 13694
	}
13695
 
6084 serge 13696
	if (ret)
13697
out:
13698
		drm_atomic_state_free(state);
13699
}
3031 serge 13700
 
6084 serge 13701
#undef for_each_intel_crtc_masked
5060 serge 13702
 
2330 Serge 13703
static const struct drm_crtc_funcs intel_crtc_funcs = {
13704
	.gamma_set = intel_crtc_gamma_set,
6084 serge 13705
	.set_config = drm_atomic_helper_set_config,
2330 Serge 13706
	.destroy = intel_crtc_destroy,
6320 serge 13707
	.page_flip = intel_crtc_page_flip,
6084 serge 13708
	.atomic_duplicate_state = intel_crtc_duplicate_state,
13709
	.atomic_destroy_state = intel_crtc_destroy_state,
2330 Serge 13710
};
2327 Serge 13711
 
4104 Serge 13712
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13713
				      struct intel_shared_dpll *pll,
13714
				      struct intel_dpll_hw_state *hw_state)
3031 serge 13715
{
4104 Serge 13716
	uint32_t val;
3031 serge 13717
 
6937 serge 13718
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
5060 serge 13719
		return false;
13720
 
4104 Serge 13721
	val = I915_READ(PCH_DPLL(pll->id));
13722
	hw_state->dpll = val;
13723
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13724
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13725
 
6937 serge 13726
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13727
 
4104 Serge 13728
	return val & DPLL_VCO_ENABLE;
13729
}
13730
 
13731
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13732
				  struct intel_shared_dpll *pll)
13733
{
5354 serge 13734
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13735
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
4104 Serge 13736
}
13737
 
13738
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13739
				struct intel_shared_dpll *pll)
13740
{
13741
	/* PCH refclock must be enabled first */
4560 Serge 13742
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 13743
 
5354 serge 13744
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 13745
 
13746
	/* Wait for the clocks to stabilize. */
13747
	POSTING_READ(PCH_DPLL(pll->id));
13748
	udelay(150);
13749
 
13750
	/* The pixel multiplier can only be updated once the
13751
	 * DPLL is enabled and the clocks are stable.
13752
	 *
13753
	 * So write it again.
13754
	 */
5354 serge 13755
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 13756
	POSTING_READ(PCH_DPLL(pll->id));
13757
	udelay(200);
13758
}
13759
 
13760
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13761
				 struct intel_shared_dpll *pll)
13762
{
13763
	struct drm_device *dev = dev_priv->dev;
13764
	struct intel_crtc *crtc;
13765
 
13766
	/* Make sure no transcoder isn't still depending on us. */
5060 serge 13767
	for_each_intel_crtc(dev, crtc) {
4104 Serge 13768
		if (intel_crtc_to_shared_dpll(crtc) == pll)
13769
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 13770
	}
13771
 
4104 Serge 13772
	I915_WRITE(PCH_DPLL(pll->id), 0);
13773
	POSTING_READ(PCH_DPLL(pll->id));
13774
	udelay(200);
13775
}
13776
 
13777
static char *ibx_pch_dpll_names[] = {
13778
	"PCH DPLL A",
13779
	"PCH DPLL B",
13780
};
13781
 
13782
static void ibx_pch_dpll_init(struct drm_device *dev)
13783
{
13784
	struct drm_i915_private *dev_priv = dev->dev_private;
13785
	int i;
13786
 
13787
	dev_priv->num_shared_dpll = 2;
13788
 
13789
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13790
		dev_priv->shared_dplls[i].id = i;
13791
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13792
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13793
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13794
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13795
		dev_priv->shared_dplls[i].get_hw_state =
13796
			ibx_pch_dpll_get_hw_state;
3031 serge 13797
	}
13798
}
13799
 
4104 Serge 13800
static void intel_shared_dpll_init(struct drm_device *dev)
13801
{
13802
	struct drm_i915_private *dev_priv = dev->dev_private;
13803
 
5060 serge 13804
	if (HAS_DDI(dev))
13805
		intel_ddi_pll_init(dev);
13806
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4104 Serge 13807
		ibx_pch_dpll_init(dev);
13808
	else
13809
		dev_priv->num_shared_dpll = 0;
13810
 
13811
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13812
}
13813
 
6084 serge 13814
/**
13815
 * intel_prepare_plane_fb - Prepare fb for usage on plane
13816
 * @plane: drm plane to prepare for
13817
 * @fb: framebuffer to prepare for presentation
13818
 *
13819
 * Prepares a framebuffer for usage on a display plane.  Generally this
13820
 * involves pinning the underlying object and updating the frontbuffer tracking
13821
 * bits.  Some older platforms need special physical address handling for
13822
 * cursor planes.
13823
 *
6937 serge 13824
 * Must be called with struct_mutex held.
13825
 *
6084 serge 13826
 * Returns 0 on success, negative error code on failure.
13827
 */
13828
int
13829
intel_prepare_plane_fb(struct drm_plane *plane,
13830
		       const struct drm_plane_state *new_state)
5060 serge 13831
{
13832
	struct drm_device *dev = plane->dev;
6084 serge 13833
	struct drm_framebuffer *fb = new_state->fb;
13834
	struct intel_plane *intel_plane = to_intel_plane(plane);
13835
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6937 serge 13836
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
6084 serge 13837
	int ret = 0;
5060 serge 13838
 
6937 serge 13839
	if (!obj && !old_obj)
5060 serge 13840
		return 0;
13841
 
6937 serge 13842
	if (old_obj) {
13843
		struct drm_crtc_state *crtc_state =
13844
			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
5060 serge 13845
 
6937 serge 13846
		/* Big Hammer, we also need to ensure that any pending
13847
		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13848
		 * current scanout is retired before unpinning the old
13849
		 * framebuffer. Note that we rely on userspace rendering
13850
		 * into the buffer attached to the pipe they are waiting
13851
		 * on. If not, userspace generates a GPU hang with IPEHR
13852
		 * point to the MI_WAIT_FOR_EVENT.
13853
		 *
13854
		 * This should only fail upon a hung GPU, in which case we
13855
		 * can safely continue.
13856
		 */
13857
		if (needs_modeset(crtc_state))
13858
			ret = i915_gem_object_wait_rendering(old_obj, true);
13859
 
13860
		/* Swallow -EIO errors to allow updates during hw lockup. */
13861
		if (ret && ret != -EIO)
13862
			return ret;
13863
	}
13864
 
13865
	/* For framebuffer backed by dmabuf, wait for fence */
13866
 
13867
	if (!obj) {
13868
		ret = 0;
13869
	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
6084 serge 13870
	    INTEL_INFO(dev)->cursor_needs_physical) {
13871
		int align = IS_I830(dev) ? 16 * 1024 : 256;
6937 serge 13872
		ret = i915_gem_object_attach_phys(obj, align);
6084 serge 13873
		if (ret)
13874
			DRM_DEBUG_KMS("failed to attach phys object\n");
13875
	} else {
6937 serge 13876
		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
6084 serge 13877
	}
5060 serge 13878
 
6937 serge 13879
	if (ret == 0) {
13880
		if (obj) {
13881
			struct intel_plane_state *plane_state =
13882
				to_intel_plane_state(new_state);
13883
 
13884
			i915_gem_request_assign(&plane_state->wait_req,
13885
						obj->last_write_req);
7144 serge 13886
		}
6937 serge 13887
 
6084 serge 13888
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
6937 serge 13889
	}
5060 serge 13890
 
6084 serge 13891
	return ret;
5060 serge 13892
}
13893
 
6084 serge 13894
/**
13895
 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13896
 * @plane: drm plane to clean up for
13897
 * @fb: old framebuffer that was on plane
13898
 *
13899
 * Cleans up a framebuffer that has just been removed from a plane.
6937 serge 13900
 *
13901
 * Must be called with struct_mutex held.
6084 serge 13902
 */
13903
void
13904
intel_cleanup_plane_fb(struct drm_plane *plane,
13905
		       const struct drm_plane_state *old_state)
5060 serge 13906
{
6084 serge 13907
	struct drm_device *dev = plane->dev;
6937 serge 13908
	struct intel_plane *intel_plane = to_intel_plane(plane);
13909
	struct intel_plane_state *old_intel_state;
13910
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13911
	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
5354 serge 13912
 
6937 serge 13913
	old_intel_state = to_intel_plane_state(old_state);
13914
 
13915
	if (!obj && !old_obj)
6084 serge 13916
		return;
13917
 
6937 serge 13918
	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13919
	    !INTEL_INFO(dev)->cursor_needs_physical))
6084 serge 13920
		intel_unpin_fb_obj(old_state->fb, old_state);
6937 serge 13921
 
13922
	/* prepare_fb aborted? */
13923
	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13924
	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13925
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13926
 
13927
	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13928
 
5354 serge 13929
}
13930
 
6084 serge 13931
int
13932
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
5354 serge 13933
{
6084 serge 13934
	int max_scale;
13935
	struct drm_device *dev;
13936
	struct drm_i915_private *dev_priv;
13937
	int crtc_clock, cdclk;
5060 serge 13938
 
7144 serge 13939
	if (!intel_crtc || !crtc_state->base.enable)
6084 serge 13940
		return DRM_PLANE_HELPER_NO_SCALING;
5060 serge 13941
 
6084 serge 13942
	dev = intel_crtc->base.dev;
13943
	dev_priv = dev->dev_private;
13944
	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13945
	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
5060 serge 13946
 
6937 serge 13947
	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
6084 serge 13948
		return DRM_PLANE_HELPER_NO_SCALING;
13949
 
13950
	/*
13951
	 * skl max scale is lower of:
13952
	 *    close to 3 but not 3, -1 is for that purpose
13953
	 *            or
13954
	 *    cdclk/crtc_clock
13955
	 */
13956
	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13957
 
13958
	return max_scale;
13959
}
13960
 
13961
static int
13962
intel_check_primary_plane(struct drm_plane *plane,
13963
			  struct intel_crtc_state *crtc_state,
13964
			  struct intel_plane_state *state)
13965
{
13966
	struct drm_crtc *crtc = state->base.crtc;
13967
	struct drm_framebuffer *fb = state->base.fb;
13968
	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13969
	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13970
	bool can_position = false;
13971
 
6320 serge 13972
	if (INTEL_INFO(plane->dev)->gen >= 9) {
7144 serge 13973
		/* use scaler when colorkey is not required */
6320 serge 13974
		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
7144 serge 13975
			min_scale = 1;
13976
			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
6320 serge 13977
		}
6084 serge 13978
		can_position = true;
5354 serge 13979
	}
5060 serge 13980
 
6084 serge 13981
	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13982
					     &state->dst, &state->clip,
13983
					     min_scale, max_scale,
13984
					     can_position, true,
13985
					     &state->visible);
5354 serge 13986
}
13987
 
6084 serge 13988
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13989
				    struct drm_crtc_state *old_crtc_state)
13990
{
13991
	struct drm_device *dev = crtc->dev;
13992
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13993
	struct intel_crtc_state *old_intel_state =
13994
		to_intel_crtc_state(old_crtc_state);
13995
	bool modeset = needs_modeset(crtc->state);
5060 serge 13996
 
6084 serge 13997
	/* Perform vblank evasion around commit operation */
7144 serge 13998
	intel_pipe_update_start(intel_crtc);
5354 serge 13999
 
6084 serge 14000
	if (modeset)
14001
		return;
5354 serge 14002
 
6084 serge 14003
	if (to_intel_crtc_state(crtc->state)->update_pipe)
14004
		intel_update_pipe_config(intel_crtc, old_intel_state);
14005
	else if (INTEL_INFO(dev)->gen >= 9)
14006
		skl_detach_scalers(intel_crtc);
5354 serge 14007
}
5060 serge 14008
 
6084 serge 14009
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14010
				     struct drm_crtc_state *old_crtc_state)
5354 serge 14011
{
14012
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 14013
 
7144 serge 14014
	intel_pipe_update_end(intel_crtc);
5060 serge 14015
}
14016
 
6084 serge 14017
/**
14018
 * intel_plane_destroy - destroy a plane
14019
 * @plane: plane to destroy
14020
 *
14021
 * Common destruction function for all types of planes (primary, cursor,
14022
 * sprite).
14023
 */
14024
void intel_plane_destroy(struct drm_plane *plane)
5060 serge 14025
{
14026
	struct intel_plane *intel_plane = to_intel_plane(plane);
14027
	drm_plane_cleanup(plane);
14028
	kfree(intel_plane);
14029
}
14030
 
6084 serge 14031
const struct drm_plane_funcs intel_plane_funcs = {
14032
	.update_plane = drm_atomic_helper_update_plane,
14033
	.disable_plane = drm_atomic_helper_disable_plane,
5060 serge 14034
	.destroy = intel_plane_destroy,
6084 serge 14035
	.set_property = drm_atomic_helper_plane_set_property,
14036
	.atomic_get_property = intel_plane_atomic_get_property,
14037
	.atomic_set_property = intel_plane_atomic_set_property,
14038
	.atomic_duplicate_state = intel_plane_duplicate_state,
14039
	.atomic_destroy_state = intel_plane_destroy_state,
14040
 
5060 serge 14041
};
14042
 
14043
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14044
						    int pipe)
14045
{
14046
	struct intel_plane *primary;
6084 serge 14047
	struct intel_plane_state *state;
5060 serge 14048
	const uint32_t *intel_primary_formats;
6084 serge 14049
	unsigned int num_formats;
5060 serge 14050
 
14051
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14052
	if (primary == NULL)
14053
		return NULL;
14054
 
6084 serge 14055
	state = intel_create_plane_state(&primary->base);
14056
	if (!state) {
14057
		kfree(primary);
14058
		return NULL;
14059
	}
14060
	primary->base.state = &state->base;
14061
 
5060 serge 14062
	primary->can_scale = false;
14063
	primary->max_downscale = 1;
6084 serge 14064
	if (INTEL_INFO(dev)->gen >= 9) {
14065
		primary->can_scale = true;
14066
		state->scaler_id = -1;
14067
	}
5060 serge 14068
	primary->pipe = pipe;
14069
	primary->plane = pipe;
6084 serge 14070
	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14071
	primary->check_plane = intel_check_primary_plane;
5060 serge 14072
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14073
		primary->plane = !pipe;
14074
 
6084 serge 14075
	if (INTEL_INFO(dev)->gen >= 9) {
14076
		intel_primary_formats = skl_primary_formats;
14077
		num_formats = ARRAY_SIZE(skl_primary_formats);
7144 serge 14078
 
14079
		primary->update_plane = skylake_update_primary_plane;
14080
		primary->disable_plane = skylake_disable_primary_plane;
14081
	} else if (HAS_PCH_SPLIT(dev)) {
14082
		intel_primary_formats = i965_primary_formats;
14083
		num_formats = ARRAY_SIZE(i965_primary_formats);
14084
 
14085
		primary->update_plane = ironlake_update_primary_plane;
14086
		primary->disable_plane = i9xx_disable_primary_plane;
6084 serge 14087
	} else if (INTEL_INFO(dev)->gen >= 4) {
14088
		intel_primary_formats = i965_primary_formats;
14089
		num_formats = ARRAY_SIZE(i965_primary_formats);
7144 serge 14090
 
14091
		primary->update_plane = i9xx_update_primary_plane;
14092
		primary->disable_plane = i9xx_disable_primary_plane;
5060 serge 14093
	} else {
6084 serge 14094
		intel_primary_formats = i8xx_primary_formats;
14095
		num_formats = ARRAY_SIZE(i8xx_primary_formats);
7144 serge 14096
 
14097
		primary->update_plane = i9xx_update_primary_plane;
14098
		primary->disable_plane = i9xx_disable_primary_plane;
5060 serge 14099
	}
14100
 
14101
	drm_universal_plane_init(dev, &primary->base, 0,
6084 serge 14102
				 &intel_plane_funcs,
5060 serge 14103
				 intel_primary_formats, num_formats,
6937 serge 14104
				 DRM_PLANE_TYPE_PRIMARY, NULL);
5354 serge 14105
 
6084 serge 14106
	if (INTEL_INFO(dev)->gen >= 4)
14107
		intel_create_rotation_property(dev, primary);
5354 serge 14108
 
6084 serge 14109
	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14110
 
5060 serge 14111
	return &primary->base;
14112
}
14113
 
6084 serge 14114
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
5060 serge 14115
{
6084 serge 14116
	if (!dev->mode_config.rotation_property) {
14117
		unsigned long flags = BIT(DRM_ROTATE_0) |
14118
			BIT(DRM_ROTATE_180);
5060 serge 14119
 
6084 serge 14120
		if (INTEL_INFO(dev)->gen >= 9)
14121
			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
5060 serge 14122
 
6084 serge 14123
		dev->mode_config.rotation_property =
14124
			drm_mode_create_rotation_property(dev, flags);
14125
	}
14126
	if (dev->mode_config.rotation_property)
14127
		drm_object_attach_property(&plane->base.base,
14128
				dev->mode_config.rotation_property,
14129
				plane->base.state->rotation);
5060 serge 14130
}
14131
 
14132
static int
5354 serge 14133
intel_check_cursor_plane(struct drm_plane *plane,
6084 serge 14134
			 struct intel_crtc_state *crtc_state,
5354 serge 14135
			 struct intel_plane_state *state)
5060 serge 14136
{
6084 serge 14137
	struct drm_crtc *crtc = crtc_state->base.crtc;
14138
	struct drm_framebuffer *fb = state->base.fb;
5354 serge 14139
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 14140
	enum pipe pipe = to_intel_plane(plane)->pipe;
5354 serge 14141
	unsigned stride;
5060 serge 14142
	int ret;
14143
 
6084 serge 14144
	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14145
					    &state->dst, &state->clip,
5060 serge 14146
					    DRM_PLANE_HELPER_NO_SCALING,
14147
					    DRM_PLANE_HELPER_NO_SCALING,
5354 serge 14148
					    true, true, &state->visible);
5060 serge 14149
	if (ret)
14150
		return ret;
14151
 
5354 serge 14152
	/* if we want to turn off the cursor ignore width and height */
14153
	if (!obj)
14154
		return 0;
14155
 
14156
	/* Check for which cursor types we support */
6084 serge 14157
	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14158
		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14159
			  state->base.crtc_w, state->base.crtc_h);
5354 serge 14160
		return -EINVAL;
14161
	}
14162
 
6084 serge 14163
	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14164
	if (obj->base.size < stride * state->base.crtc_h) {
5354 serge 14165
		DRM_DEBUG_KMS("buffer is too small\n");
14166
		return -ENOMEM;
14167
	}
14168
 
6084 serge 14169
	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
5354 serge 14170
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
6084 serge 14171
		return -EINVAL;
5354 serge 14172
	}
14173
 
6084 serge 14174
	/*
14175
	 * There's something wrong with the cursor on CHV pipe C.
14176
	 * If it straddles the left edge of the screen then
14177
	 * moving it away from the edge or disabling it often
14178
	 * results in a pipe underrun, and often that can lead to
14179
	 * dead pipe (constant underrun reported, and it scans
14180
	 * out just a solid color). To recover from that, the
14181
	 * display power well must be turned off and on again.
14182
	 * Refuse the put the cursor into that compromised position.
14183
	 */
14184
	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14185
	    state->visible && state->base.crtc_x < 0) {
14186
		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14187
		return -EINVAL;
14188
	}
14189
 
14190
	return 0;
5354 serge 14191
}
14192
 
6084 serge 14193
static void
14194
intel_disable_cursor_plane(struct drm_plane *plane,
14195
			   struct drm_crtc *crtc)
5354 serge 14196
{
7144 serge 14197
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14198
 
14199
	intel_crtc->cursor_addr = 0;
14200
	intel_crtc_update_cursor(crtc, NULL);
5060 serge 14201
}
5354 serge 14202
 
6084 serge 14203
static void
7144 serge 14204
intel_update_cursor_plane(struct drm_plane *plane,
14205
			  const struct intel_crtc_state *crtc_state,
14206
			  const struct intel_plane_state *state)
5354 serge 14207
{
7144 serge 14208
	struct drm_crtc *crtc = crtc_state->base.crtc;
14209
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 14210
	struct drm_device *dev = plane->dev;
14211
	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14212
	uint32_t addr;
5354 serge 14213
 
6084 serge 14214
	if (!obj)
14215
		addr = 0;
14216
	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14217
		addr = i915_gem_obj_ggtt_offset(obj);
14218
	else
14219
		addr = obj->phys_handle->busaddr;
5354 serge 14220
 
6084 serge 14221
	intel_crtc->cursor_addr = addr;
7144 serge 14222
	intel_crtc_update_cursor(crtc, state);
5354 serge 14223
}
14224
 
5060 serge 14225
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14226
						   int pipe)
14227
{
14228
	struct intel_plane *cursor;
6084 serge 14229
	struct intel_plane_state *state;
5060 serge 14230
 
14231
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14232
	if (cursor == NULL)
14233
		return NULL;
14234
 
6084 serge 14235
	state = intel_create_plane_state(&cursor->base);
14236
	if (!state) {
14237
		kfree(cursor);
14238
		return NULL;
14239
	}
14240
	cursor->base.state = &state->base;
14241
 
5060 serge 14242
	cursor->can_scale = false;
14243
	cursor->max_downscale = 1;
14244
	cursor->pipe = pipe;
14245
	cursor->plane = pipe;
6084 serge 14246
	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14247
	cursor->check_plane = intel_check_cursor_plane;
7144 serge 14248
	cursor->update_plane = intel_update_cursor_plane;
6084 serge 14249
	cursor->disable_plane = intel_disable_cursor_plane;
5060 serge 14250
 
14251
	drm_universal_plane_init(dev, &cursor->base, 0,
6084 serge 14252
				 &intel_plane_funcs,
5060 serge 14253
				 intel_cursor_formats,
14254
				 ARRAY_SIZE(intel_cursor_formats),
6937 serge 14255
				 DRM_PLANE_TYPE_CURSOR, NULL);
5354 serge 14256
 
14257
	if (INTEL_INFO(dev)->gen >= 4) {
14258
		if (!dev->mode_config.rotation_property)
14259
			dev->mode_config.rotation_property =
14260
				drm_mode_create_rotation_property(dev,
14261
							BIT(DRM_ROTATE_0) |
14262
							BIT(DRM_ROTATE_180));
14263
		if (dev->mode_config.rotation_property)
14264
			drm_object_attach_property(&cursor->base.base,
14265
				dev->mode_config.rotation_property,
6084 serge 14266
				state->base.rotation);
5354 serge 14267
	}
14268
 
6084 serge 14269
	if (INTEL_INFO(dev)->gen >=9)
14270
		state->scaler_id = -1;
14271
 
14272
	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14273
 
5060 serge 14274
	return &cursor->base;
14275
}
14276
 
6084 serge 14277
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14278
	struct intel_crtc_state *crtc_state)
14279
{
14280
	int i;
14281
	struct intel_scaler *intel_scaler;
14282
	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14283
 
14284
	for (i = 0; i < intel_crtc->num_scalers; i++) {
14285
		intel_scaler = &scaler_state->scalers[i];
14286
		intel_scaler->in_use = 0;
14287
		intel_scaler->mode = PS_SCALER_MODE_DYN;
14288
	}
14289
 
14290
	scaler_state->scaler_id = -1;
14291
}
14292
 
2330 Serge 14293
static void intel_crtc_init(struct drm_device *dev, int pipe)
14294
{
5060 serge 14295
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 14296
	struct intel_crtc *intel_crtc;
6084 serge 14297
	struct intel_crtc_state *crtc_state = NULL;
5060 serge 14298
	struct drm_plane *primary = NULL;
14299
	struct drm_plane *cursor = NULL;
14300
	int i, ret;
2327 Serge 14301
 
4560 Serge 14302
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 14303
	if (intel_crtc == NULL)
14304
		return;
2327 Serge 14305
 
6084 serge 14306
	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14307
	if (!crtc_state)
14308
		goto fail;
14309
	intel_crtc->config = crtc_state;
14310
	intel_crtc->base.state = &crtc_state->base;
14311
	crtc_state->base.crtc = &intel_crtc->base;
14312
 
14313
	/* initialize shared scalers */
14314
	if (INTEL_INFO(dev)->gen >= 9) {
14315
		if (pipe == PIPE_C)
14316
			intel_crtc->num_scalers = 1;
14317
		else
14318
			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14319
 
14320
		skl_init_scalers(dev, intel_crtc, crtc_state);
14321
	}
14322
 
5060 serge 14323
	primary = intel_primary_plane_create(dev, pipe);
14324
	if (!primary)
14325
		goto fail;
2327 Serge 14326
 
5060 serge 14327
	cursor = intel_cursor_plane_create(dev, pipe);
14328
	if (!cursor)
14329
		goto fail;
14330
 
14331
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
6937 serge 14332
					cursor, &intel_crtc_funcs, NULL);
5060 serge 14333
	if (ret)
14334
		goto fail;
14335
 
2330 Serge 14336
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14337
	for (i = 0; i < 256; i++) {
14338
		intel_crtc->lut_r[i] = i;
14339
		intel_crtc->lut_g[i] = i;
14340
		intel_crtc->lut_b[i] = i;
14341
	}
2327 Serge 14342
 
4560 Serge 14343
	/*
14344
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
5060 serge 14345
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
4560 Serge 14346
	 */
2330 Serge 14347
	intel_crtc->pipe = pipe;
14348
	intel_crtc->plane = pipe;
4560 Serge 14349
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 14350
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14351
		intel_crtc->plane = !pipe;
14352
	}
2327 Serge 14353
 
5060 serge 14354
	intel_crtc->cursor_base = ~0;
14355
	intel_crtc->cursor_cntl = ~0;
5354 serge 14356
	intel_crtc->cursor_size = ~0;
5060 serge 14357
 
6084 serge 14358
	intel_crtc->wm.cxsr_allowed = true;
14359
 
2330 Serge 14360
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14361
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14362
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14363
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 14364
 
2330 Serge 14365
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5060 serge 14366
 
14367
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14368
	return;
14369
 
14370
fail:
14371
	if (primary)
14372
		drm_plane_cleanup(primary);
14373
	if (cursor)
14374
		drm_plane_cleanup(cursor);
6084 serge 14375
	kfree(crtc_state);
5060 serge 14376
	kfree(intel_crtc);
2330 Serge 14377
}
2327 Serge 14378
 
4560 Serge 14379
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14380
{
14381
	struct drm_encoder *encoder = connector->base.encoder;
5060 serge 14382
	struct drm_device *dev = connector->base.dev;
4560 Serge 14383
 
5060 serge 14384
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4560 Serge 14385
 
5354 serge 14386
	if (!encoder || WARN_ON(!encoder->crtc))
4560 Serge 14387
		return INVALID_PIPE;
14388
 
14389
	return to_intel_crtc(encoder->crtc)->pipe;
14390
}
14391
 
3031 serge 14392
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14393
				struct drm_file *file)
14394
{
14395
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5060 serge 14396
	struct drm_crtc *drmmode_crtc;
3031 serge 14397
	struct intel_crtc *crtc;
2327 Serge 14398
 
5060 serge 14399
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
2327 Serge 14400
 
5060 serge 14401
	if (!drmmode_crtc) {
3031 serge 14402
		DRM_ERROR("no such CRTC id\n");
4560 Serge 14403
		return -ENOENT;
3031 serge 14404
	}
2327 Serge 14405
 
5060 serge 14406
	crtc = to_intel_crtc(drmmode_crtc);
3031 serge 14407
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 14408
 
3031 serge 14409
	return 0;
14410
}
2327 Serge 14411
 
3031 serge 14412
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 14413
{
3031 serge 14414
	struct drm_device *dev = encoder->base.dev;
14415
	struct intel_encoder *source_encoder;
2330 Serge 14416
	int index_mask = 0;
14417
	int entry = 0;
2327 Serge 14418
 
5354 serge 14419
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 14420
		if (encoders_cloneable(encoder, source_encoder))
2330 Serge 14421
			index_mask |= (1 << entry);
3031 serge 14422
 
2330 Serge 14423
		entry++;
14424
	}
2327 Serge 14425
 
2330 Serge 14426
	return index_mask;
14427
}
2327 Serge 14428
 
2330 Serge 14429
static bool has_edp_a(struct drm_device *dev)
14430
{
14431
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14432
 
2330 Serge 14433
	if (!IS_MOBILE(dev))
14434
		return false;
2327 Serge 14435
 
2330 Serge 14436
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14437
		return false;
2327 Serge 14438
 
5060 serge 14439
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
2330 Serge 14440
		return false;
2327 Serge 14441
 
2330 Serge 14442
	return true;
14443
}
2327 Serge 14444
 
5060 serge 14445
static bool intel_crt_present(struct drm_device *dev)
14446
{
14447
	struct drm_i915_private *dev_priv = dev->dev_private;
14448
 
5354 serge 14449
	if (INTEL_INFO(dev)->gen >= 9)
5060 serge 14450
		return false;
14451
 
5354 serge 14452
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14453
		return false;
14454
 
5060 serge 14455
	if (IS_CHERRYVIEW(dev))
14456
		return false;
14457
 
6937 serge 14458
	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
5060 serge 14459
		return false;
14460
 
6937 serge 14461
	/* DDI E can't be used if DDI A requires 4 lanes */
14462
	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14463
		return false;
14464
 
14465
	if (!dev_priv->vbt.int_crt_support)
14466
		return false;
14467
 
5060 serge 14468
	return true;
14469
}
14470
 
2330 Serge 14471
static void intel_setup_outputs(struct drm_device *dev)
14472
{
14473
	struct drm_i915_private *dev_priv = dev->dev_private;
14474
	struct intel_encoder *encoder;
14475
	bool dpd_is_edp = false;
2327 Serge 14476
 
4104 Serge 14477
	intel_lvds_init(dev);
2327 Serge 14478
 
5060 serge 14479
	if (intel_crt_present(dev))
6084 serge 14480
		intel_crt_init(dev);
2327 Serge 14481
 
6084 serge 14482
	if (IS_BROXTON(dev)) {
14483
		/*
14484
		 * FIXME: Broxton doesn't support port detection via the
14485
		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14486
		 * detect the ports.
14487
		 */
14488
		intel_ddi_init(dev, PORT_A);
14489
		intel_ddi_init(dev, PORT_B);
14490
		intel_ddi_init(dev, PORT_C);
14491
	} else if (HAS_DDI(dev)) {
2330 Serge 14492
		int found;
2327 Serge 14493
 
6084 serge 14494
		/*
14495
		 * Haswell uses DDI functions to detect digital outputs.
14496
		 * On SKL pre-D0 the strap isn't connected, so we assume
14497
		 * it's there.
14498
		 */
14499
		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14500
		/* WaIgnoreDDIAStrap: skl */
6937 serge 14501
		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
3031 serge 14502
			intel_ddi_init(dev, PORT_A);
14503
 
14504
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14505
		 * register */
14506
		found = I915_READ(SFUSE_STRAP);
14507
 
14508
		if (found & SFUSE_STRAP_DDIB_DETECTED)
14509
			intel_ddi_init(dev, PORT_B);
14510
		if (found & SFUSE_STRAP_DDIC_DETECTED)
14511
			intel_ddi_init(dev, PORT_C);
14512
		if (found & SFUSE_STRAP_DDID_DETECTED)
14513
			intel_ddi_init(dev, PORT_D);
6084 serge 14514
		/*
14515
		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14516
		 */
6937 serge 14517
		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
6084 serge 14518
		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14519
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14520
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14521
			intel_ddi_init(dev, PORT_E);
14522
 
3031 serge 14523
	} else if (HAS_PCH_SPLIT(dev)) {
14524
		int found;
4560 Serge 14525
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 14526
 
3243 Serge 14527
		if (has_edp_a(dev))
14528
			intel_dp_init(dev, DP_A, PORT_A);
14529
 
3746 Serge 14530
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 14531
			/* PCH SDVOB multiplex with HDMIB */
6937 serge 14532
			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
2330 Serge 14533
			if (!found)
3746 Serge 14534
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 14535
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 14536
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 14537
		}
2327 Serge 14538
 
3746 Serge 14539
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14540
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 14541
 
3746 Serge 14542
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14543
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 14544
 
2330 Serge 14545
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 14546
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 14547
 
3243 Serge 14548
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 14549
			intel_dp_init(dev, PCH_DP_D, PORT_D);
6937 serge 14550
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 14551
		/*
14552
		 * The DP_DETECTED bit is the latched state of the DDC
14553
		 * SDA pin at boot. However since eDP doesn't require DDC
14554
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14555
		 * eDP ports may have been muxed to an alternate function.
14556
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14557
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14558
		 * detect eDP ports.
14559
		 */
6937 serge 14560
		if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14561
		    !intel_dp_is_edp(dev, PORT_B))
6084 serge 14562
			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
6937 serge 14563
		if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14564
		    intel_dp_is_edp(dev, PORT_B))
14565
			intel_dp_init(dev, VLV_DP_B, PORT_B);
4560 Serge 14566
 
6937 serge 14567
		if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14568
		    !intel_dp_is_edp(dev, PORT_C))
6084 serge 14569
			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
6937 serge 14570
		if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14571
		    intel_dp_is_edp(dev, PORT_C))
14572
			intel_dp_init(dev, VLV_DP_C, PORT_C);
3243 Serge 14573
 
5060 serge 14574
		if (IS_CHERRYVIEW(dev)) {
6937 serge 14575
			/* eDP not supported on port D, so don't check VBT */
14576
			if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14577
				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14578
			if (I915_READ(CHV_DP_D) & DP_DETECTED)
6660 serge 14579
				intel_dp_init(dev, CHV_DP_D, PORT_D);
6084 serge 14580
		}
5060 serge 14581
 
4560 Serge 14582
		intel_dsi_init(dev);
6084 serge 14583
	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
2330 Serge 14584
		bool found = false;
2327 Serge 14585
 
3746 Serge 14586
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 14587
			DRM_DEBUG_KMS("probing SDVOB\n");
6937 serge 14588
			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
6084 serge 14589
			if (!found && IS_G4X(dev)) {
2330 Serge 14590
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 14591
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 14592
			}
2327 Serge 14593
 
6084 serge 14594
			if (!found && IS_G4X(dev))
3031 serge 14595
				intel_dp_init(dev, DP_B, PORT_B);
6084 serge 14596
		}
2327 Serge 14597
 
2330 Serge 14598
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 14599
 
3746 Serge 14600
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 14601
			DRM_DEBUG_KMS("probing SDVOC\n");
6937 serge 14602
			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
2330 Serge 14603
		}
2327 Serge 14604
 
3746 Serge 14605
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 14606
 
6084 serge 14607
			if (IS_G4X(dev)) {
2330 Serge 14608
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 14609
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 14610
			}
6084 serge 14611
			if (IS_G4X(dev))
3031 serge 14612
				intel_dp_init(dev, DP_C, PORT_C);
6084 serge 14613
		}
2327 Serge 14614
 
6084 serge 14615
		if (IS_G4X(dev) &&
4104 Serge 14616
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 14617
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 14618
	} else if (IS_GEN2(dev))
14619
		intel_dvo_init(dev);
2327 Serge 14620
 
6937 serge 14621
//   if (SUPPORTS_TV(dev))
14622
//       intel_tv_init(dev);
14623
 
5354 serge 14624
	intel_psr_init(dev);
5060 serge 14625
 
5354 serge 14626
	for_each_intel_encoder(dev, encoder) {
2330 Serge 14627
		encoder->base.possible_crtcs = encoder->crtc_mask;
14628
		encoder->base.possible_clones =
3031 serge 14629
			intel_encoder_clones(encoder);
2330 Serge 14630
	}
2327 Serge 14631
 
3243 Serge 14632
	intel_init_pch_refclk(dev);
14633
 
14634
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 14635
}
14636
 
6084 serge 14637
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14638
{
14639
	struct drm_device *dev = fb->dev;
14640
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2330 Serge 14641
 
6084 serge 14642
	drm_framebuffer_cleanup(fb);
14643
	mutex_lock(&dev->struct_mutex);
14644
	WARN_ON(!intel_fb->obj->framebuffer_references--);
14645
	drm_gem_object_unreference(&intel_fb->obj->base);
14646
	mutex_unlock(&dev->struct_mutex);
14647
	kfree(intel_fb);
14648
}
2330 Serge 14649
 
6084 serge 14650
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14651
						struct drm_file *file,
14652
						unsigned int *handle)
14653
{
14654
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14655
	struct drm_i915_gem_object *obj = intel_fb->obj;
14656
 
14657
	if (obj->userptr.mm) {
14658
		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14659
		return -EINVAL;
14660
	}
14661
 
14662
	return drm_gem_handle_create(file, &obj->base, handle);
14663
}
14664
 
14665
static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14666
					struct drm_file *file,
14667
					unsigned flags, unsigned color,
14668
					struct drm_clip_rect *clips,
14669
					unsigned num_clips)
14670
{
14671
	struct drm_device *dev = fb->dev;
14672
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14673
	struct drm_i915_gem_object *obj = intel_fb->obj;
14674
 
14675
	mutex_lock(&dev->struct_mutex);
14676
	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14677
	mutex_unlock(&dev->struct_mutex);
14678
 
14679
	return 0;
14680
}
14681
 
2335 Serge 14682
static const struct drm_framebuffer_funcs intel_fb_funcs = {
6084 serge 14683
	.destroy = intel_user_framebuffer_destroy,
14684
	.create_handle = intel_user_framebuffer_create_handle,
14685
	.dirty = intel_user_framebuffer_dirty,
2335 Serge 14686
};
2327 Serge 14687
 
6084 serge 14688
static
14689
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14690
			 uint32_t pixel_format)
14691
{
14692
	u32 gen = INTEL_INFO(dev)->gen;
14693
 
14694
	if (gen >= 9) {
7144 serge 14695
		int cpp = drm_format_plane_cpp(pixel_format, 0);
14696
 
6084 serge 14697
		/* "The stride in bytes must not exceed the of the size of 8K
14698
		 *  pixels and 32K bytes."
14699
		 */
7144 serge 14700
		return min(8192 * cpp, 32768);
6937 serge 14701
	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6084 serge 14702
		return 32*1024;
14703
	} else if (gen >= 4) {
14704
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14705
			return 16*1024;
14706
		else
14707
			return 32*1024;
14708
	} else if (gen >= 3) {
14709
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14710
			return 8*1024;
14711
		else
14712
			return 16*1024;
14713
	} else {
14714
		/* XXX DSPC is limited to 4k tiled */
14715
		return 8*1024;
14716
	}
14717
}
14718
 
5060 serge 14719
static int intel_framebuffer_init(struct drm_device *dev,
6084 serge 14720
				  struct intel_framebuffer *intel_fb,
14721
				  struct drm_mode_fb_cmd2 *mode_cmd,
14722
				  struct drm_i915_gem_object *obj)
2335 Serge 14723
{
7144 serge 14724
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 14725
	unsigned int aligned_height;
2335 Serge 14726
	int ret;
6084 serge 14727
	u32 pitch_limit, stride_alignment;
2327 Serge 14728
 
4560 Serge 14729
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14730
 
6084 serge 14731
	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14732
		/* Enforce that fb modifier and tiling mode match, but only for
14733
		 * X-tiled. This is needed for FBC. */
14734
		if (!!(obj->tiling_mode == I915_TILING_X) !=
14735
		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14736
			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14737
			return -EINVAL;
14738
		}
14739
	} else {
14740
		if (obj->tiling_mode == I915_TILING_X)
14741
			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14742
		else if (obj->tiling_mode == I915_TILING_Y) {
14743
			DRM_DEBUG("No Y tiling for legacy addfb\n");
14744
			return -EINVAL;
14745
		}
14746
	}
14747
 
14748
	/* Passed in modifier sanity checking. */
14749
	switch (mode_cmd->modifier[0]) {
14750
	case I915_FORMAT_MOD_Y_TILED:
14751
	case I915_FORMAT_MOD_Yf_TILED:
14752
		if (INTEL_INFO(dev)->gen < 9) {
14753
			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14754
				  mode_cmd->modifier[0]);
14755
			return -EINVAL;
14756
		}
14757
	case DRM_FORMAT_MOD_NONE:
14758
	case I915_FORMAT_MOD_X_TILED:
14759
		break;
14760
	default:
14761
		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14762
			  mode_cmd->modifier[0]);
2335 Serge 14763
		return -EINVAL;
3243 Serge 14764
	}
2327 Serge 14765
 
7144 serge 14766
	stride_alignment = intel_fb_stride_alignment(dev_priv,
14767
						     mode_cmd->modifier[0],
6084 serge 14768
						     mode_cmd->pixel_format);
14769
	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14770
		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14771
			  mode_cmd->pitches[0], stride_alignment);
3243 Serge 14772
		return -EINVAL;
14773
	}
14774
 
6084 serge 14775
	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14776
					   mode_cmd->pixel_format);
4104 Serge 14777
	if (mode_cmd->pitches[0] > pitch_limit) {
6084 serge 14778
		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14779
			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14780
			  "tiled" : "linear",
4104 Serge 14781
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 14782
		return -EINVAL;
14783
	}
14784
 
6084 serge 14785
	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
3243 Serge 14786
	    mode_cmd->pitches[0] != obj->stride) {
14787
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14788
			  mode_cmd->pitches[0], obj->stride);
6084 serge 14789
		return -EINVAL;
3243 Serge 14790
	}
2327 Serge 14791
 
3243 Serge 14792
	/* Reject formats not supported by any plane early. */
2342 Serge 14793
	switch (mode_cmd->pixel_format) {
3243 Serge 14794
	case DRM_FORMAT_C8:
2342 Serge 14795
	case DRM_FORMAT_RGB565:
14796
	case DRM_FORMAT_XRGB8888:
3243 Serge 14797
	case DRM_FORMAT_ARGB8888:
14798
		break;
14799
	case DRM_FORMAT_XRGB1555:
14800
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 14801
			DRM_DEBUG("unsupported pixel format: %s\n",
14802
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14803
			return -EINVAL;
14804
		}
14805
		break;
6084 serge 14806
	case DRM_FORMAT_ABGR8888:
6937 serge 14807
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14808
		    INTEL_INFO(dev)->gen < 9) {
6084 serge 14809
			DRM_DEBUG("unsupported pixel format: %s\n",
14810
				  drm_get_format_name(mode_cmd->pixel_format));
14811
			return -EINVAL;
14812
		}
14813
		break;
3031 serge 14814
	case DRM_FORMAT_XBGR8888:
2342 Serge 14815
	case DRM_FORMAT_XRGB2101010:
3243 Serge 14816
	case DRM_FORMAT_XBGR2101010:
14817
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 14818
			DRM_DEBUG("unsupported pixel format: %s\n",
14819
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14820
			return -EINVAL;
14821
		}
2335 Serge 14822
		break;
6084 serge 14823
	case DRM_FORMAT_ABGR2101010:
6937 serge 14824
		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6084 serge 14825
			DRM_DEBUG("unsupported pixel format: %s\n",
14826
				  drm_get_format_name(mode_cmd->pixel_format));
14827
			return -EINVAL;
14828
		}
14829
		break;
2342 Serge 14830
	case DRM_FORMAT_YUYV:
14831
	case DRM_FORMAT_UYVY:
14832
	case DRM_FORMAT_YVYU:
14833
	case DRM_FORMAT_VYUY:
3243 Serge 14834
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 14835
			DRM_DEBUG("unsupported pixel format: %s\n",
14836
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14837
			return -EINVAL;
14838
		}
2342 Serge 14839
		break;
2335 Serge 14840
	default:
4104 Serge 14841
		DRM_DEBUG("unsupported pixel format: %s\n",
14842
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 14843
		return -EINVAL;
14844
	}
2327 Serge 14845
 
3243 Serge 14846
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14847
	if (mode_cmd->offsets[0] != 0)
14848
		return -EINVAL;
14849
 
6084 serge 14850
	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14851
					       mode_cmd->pixel_format,
14852
					       mode_cmd->modifier[0]);
4560 Serge 14853
	/* FIXME drm helper for size checks (especially planar formats)? */
14854
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14855
		return -EINVAL;
14856
 
3480 Serge 14857
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14858
	intel_fb->obj = obj;
14859
 
2335 Serge 14860
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14861
	if (ret) {
14862
		DRM_ERROR("framebuffer init failed %d\n", ret);
14863
		return ret;
14864
	}
7144 serge 14865
 
14866
	intel_fb->obj->framebuffer_references++;
14867
 
6283 serge 14868
	kolibri_framebuffer_init(intel_fb);
7144 serge 14869
 
2335 Serge 14870
	return 0;
14871
}
2327 Serge 14872
 
6084 serge 14873
static struct drm_framebuffer *
14874
intel_user_framebuffer_create(struct drm_device *dev,
14875
			      struct drm_file *filp,
6937 serge 14876
			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
6084 serge 14877
{
6937 serge 14878
	struct drm_framebuffer *fb;
6084 serge 14879
	struct drm_i915_gem_object *obj;
14880
	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14881
 
14882
	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14883
						mode_cmd.handles[0]));
14884
	if (&obj->base == NULL)
14885
		return ERR_PTR(-ENOENT);
14886
 
6937 serge 14887
	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14888
	if (IS_ERR(fb))
14889
		drm_gem_object_unreference_unlocked(&obj->base);
14890
 
14891
	return fb;
6084 serge 14892
}
14893
 
14894
#ifndef CONFIG_DRM_FBDEV_EMULATION
4560 Serge 14895
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14896
{
14897
}
14898
#endif
2327 Serge 14899
 
2360 Serge 14900
static const struct drm_mode_config_funcs intel_mode_funcs = {
6084 serge 14901
	.fb_create = intel_user_framebuffer_create,
4560 Serge 14902
	.output_poll_changed = intel_fbdev_output_poll_changed,
6084 serge 14903
	.atomic_check = intel_atomic_check,
14904
	.atomic_commit = intel_atomic_commit,
14905
	.atomic_state_alloc = intel_atomic_state_alloc,
14906
	.atomic_state_clear = intel_atomic_state_clear,
2360 Serge 14907
};
2327 Serge 14908
 
3031 serge 14909
/* Set up chip specific display functions */
14910
static void intel_init_display(struct drm_device *dev)
14911
{
14912
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14913
 
4104 Serge 14914
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14915
		dev_priv->display.find_dpll = g4x_find_best_dpll;
5060 serge 14916
	else if (IS_CHERRYVIEW(dev))
14917
		dev_priv->display.find_dpll = chv_find_best_dpll;
4104 Serge 14918
	else if (IS_VALLEYVIEW(dev))
14919
		dev_priv->display.find_dpll = vlv_find_best_dpll;
14920
	else if (IS_PINEVIEW(dev))
14921
		dev_priv->display.find_dpll = pnv_find_best_dpll;
14922
	else
14923
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
14924
 
6084 serge 14925
	if (INTEL_INFO(dev)->gen >= 9) {
3746 Serge 14926
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
6084 serge 14927
		dev_priv->display.get_initial_plane_config =
14928
			skylake_get_initial_plane_config;
5354 serge 14929
		dev_priv->display.crtc_compute_clock =
14930
			haswell_crtc_compute_clock;
3243 Serge 14931
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14932
		dev_priv->display.crtc_disable = haswell_crtc_disable;
6084 serge 14933
	} else if (HAS_DDI(dev)) {
14934
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14935
		dev_priv->display.get_initial_plane_config =
14936
			ironlake_get_initial_plane_config;
14937
		dev_priv->display.crtc_compute_clock =
14938
			haswell_crtc_compute_clock;
14939
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14940
		dev_priv->display.crtc_disable = haswell_crtc_disable;
3243 Serge 14941
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 14942
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
6084 serge 14943
		dev_priv->display.get_initial_plane_config =
14944
			ironlake_get_initial_plane_config;
5354 serge 14945
		dev_priv->display.crtc_compute_clock =
14946
			ironlake_crtc_compute_clock;
3031 serge 14947
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14948
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
6937 serge 14949
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4104 Serge 14950
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
6084 serge 14951
		dev_priv->display.get_initial_plane_config =
14952
			i9xx_get_initial_plane_config;
5354 serge 14953
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
4104 Serge 14954
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14955
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
3031 serge 14956
	} else {
3746 Serge 14957
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
6084 serge 14958
		dev_priv->display.get_initial_plane_config =
14959
			i9xx_get_initial_plane_config;
5354 serge 14960
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
3031 serge 14961
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14962
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14963
	}
2327 Serge 14964
 
3031 serge 14965
	/* Returns the core display clock speed */
6937 serge 14966
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
3031 serge 14967
		dev_priv->display.get_display_clock_speed =
6084 serge 14968
			skylake_get_display_clock_speed;
14969
	else if (IS_BROXTON(dev))
14970
		dev_priv->display.get_display_clock_speed =
14971
			broxton_get_display_clock_speed;
14972
	else if (IS_BROADWELL(dev))
14973
		dev_priv->display.get_display_clock_speed =
14974
			broadwell_get_display_clock_speed;
14975
	else if (IS_HASWELL(dev))
14976
		dev_priv->display.get_display_clock_speed =
14977
			haswell_get_display_clock_speed;
6937 serge 14978
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 14979
		dev_priv->display.get_display_clock_speed =
3031 serge 14980
			valleyview_get_display_clock_speed;
6084 serge 14981
	else if (IS_GEN5(dev))
3031 serge 14982
		dev_priv->display.get_display_clock_speed =
6084 serge 14983
			ilk_get_display_clock_speed;
14984
	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14985
		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14986
		dev_priv->display.get_display_clock_speed =
3031 serge 14987
			i945_get_display_clock_speed;
6084 serge 14988
	else if (IS_GM45(dev))
14989
		dev_priv->display.get_display_clock_speed =
14990
			gm45_get_display_clock_speed;
14991
	else if (IS_CRESTLINE(dev))
14992
		dev_priv->display.get_display_clock_speed =
14993
			i965gm_get_display_clock_speed;
14994
	else if (IS_PINEVIEW(dev))
14995
		dev_priv->display.get_display_clock_speed =
14996
			pnv_get_display_clock_speed;
14997
	else if (IS_G33(dev) || IS_G4X(dev))
14998
		dev_priv->display.get_display_clock_speed =
14999
			g33_get_display_clock_speed;
3031 serge 15000
	else if (IS_I915G(dev))
15001
		dev_priv->display.get_display_clock_speed =
15002
			i915_get_display_clock_speed;
4104 Serge 15003
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 15004
		dev_priv->display.get_display_clock_speed =
15005
			i9xx_misc_get_display_clock_speed;
15006
	else if (IS_I915GM(dev))
15007
		dev_priv->display.get_display_clock_speed =
15008
			i915gm_get_display_clock_speed;
15009
	else if (IS_I865G(dev))
15010
		dev_priv->display.get_display_clock_speed =
15011
			i865_get_display_clock_speed;
15012
	else if (IS_I85X(dev))
15013
		dev_priv->display.get_display_clock_speed =
6084 serge 15014
			i85x_get_display_clock_speed;
15015
	else { /* 830 */
15016
		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
3031 serge 15017
		dev_priv->display.get_display_clock_speed =
15018
			i830_get_display_clock_speed;
6084 serge 15019
	}
2327 Serge 15020
 
6084 serge 15021
	if (IS_GEN5(dev)) {
15022
		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15023
	} else if (IS_GEN6(dev)) {
15024
		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15025
	} else if (IS_IVYBRIDGE(dev)) {
15026
		/* FIXME: detect B0+ stepping and use auto training */
15027
		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
5354 serge 15028
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 15029
		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15030
		if (IS_BROADWELL(dev)) {
15031
			dev_priv->display.modeset_commit_cdclk =
15032
				broadwell_modeset_commit_cdclk;
15033
			dev_priv->display.modeset_calc_cdclk =
15034
				broadwell_modeset_calc_cdclk;
15035
		}
6937 serge 15036
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 15037
		dev_priv->display.modeset_commit_cdclk =
15038
			valleyview_modeset_commit_cdclk;
15039
		dev_priv->display.modeset_calc_cdclk =
15040
			valleyview_modeset_calc_cdclk;
15041
	} else if (IS_BROXTON(dev)) {
15042
		dev_priv->display.modeset_commit_cdclk =
15043
			broxton_modeset_commit_cdclk;
15044
		dev_priv->display.modeset_calc_cdclk =
15045
			broxton_modeset_calc_cdclk;
3031 serge 15046
	}
2327 Serge 15047
 
6320 serge 15048
	switch (INTEL_INFO(dev)->gen) {
15049
	case 2:
15050
		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15051
		break;
2327 Serge 15052
 
6320 serge 15053
	case 3:
15054
		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15055
		break;
2327 Serge 15056
 
6320 serge 15057
	case 4:
15058
	case 5:
15059
		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15060
		break;
2327 Serge 15061
 
6320 serge 15062
	case 6:
15063
		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15064
		break;
15065
	case 7:
15066
	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15067
		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15068
		break;
15069
	case 9:
15070
		/* Drop through - unsupported since execlist only. */
15071
	default:
15072
		/* Default just returns -ENODEV to indicate unsupported */
15073
		dev_priv->display.queue_flip = intel_default_queue_flip;
15074
	}
2327 Serge 15075
 
5354 serge 15076
	mutex_init(&dev_priv->pps_mutex);
3031 serge 15077
}
15078
 
15079
/*
15080
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15081
 * resume, or other times.  This quirk makes sure that's the case for
15082
 * affected systems.
15083
 */
15084
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 15085
{
15086
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 15087
 
3031 serge 15088
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15089
	DRM_INFO("applying pipe a force quirk\n");
15090
}
2327 Serge 15091
 
5354 serge 15092
static void quirk_pipeb_force(struct drm_device *dev)
15093
{
15094
	struct drm_i915_private *dev_priv = dev->dev_private;
15095
 
15096
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15097
	DRM_INFO("applying pipe b force quirk\n");
15098
}
15099
 
3031 serge 15100
/*
15101
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15102
 */
15103
static void quirk_ssc_force_disable(struct drm_device *dev)
15104
{
15105
	struct drm_i915_private *dev_priv = dev->dev_private;
15106
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15107
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 15108
}
2327 Serge 15109
 
3031 serge 15110
/*
15111
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15112
 * brightness value
15113
 */
15114
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 15115
{
15116
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15117
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15118
	DRM_INFO("applying inverted panel brightness quirk\n");
15119
}
2327 Serge 15120
 
5060 serge 15121
/* Some VBT's incorrectly indicate no backlight is present */
15122
static void quirk_backlight_present(struct drm_device *dev)
15123
{
15124
	struct drm_i915_private *dev_priv = dev->dev_private;
15125
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15126
	DRM_INFO("applying backlight present quirk\n");
15127
}
15128
 
3031 serge 15129
struct intel_quirk {
15130
	int device;
15131
	int subsystem_vendor;
15132
	int subsystem_device;
15133
	void (*hook)(struct drm_device *dev);
15134
};
2327 Serge 15135
 
3031 serge 15136
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15137
struct intel_dmi_quirk {
15138
	void (*hook)(struct drm_device *dev);
15139
	const struct dmi_system_id (*dmi_id_list)[];
15140
};
2327 Serge 15141
 
3031 serge 15142
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15143
{
15144
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15145
	return 1;
2330 Serge 15146
}
2327 Serge 15147
 
3031 serge 15148
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15149
	{
15150
		.dmi_id_list = &(const struct dmi_system_id[]) {
15151
			{
15152
				.callback = intel_dmi_reverse_brightness,
15153
				.ident = "NCR Corporation",
15154
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15155
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15156
				},
15157
			},
15158
			{ }  /* terminating entry */
15159
		},
15160
		.hook = quirk_invert_brightness,
15161
	},
15162
};
2327 Serge 15163
 
3031 serge 15164
static struct intel_quirk intel_quirks[] = {
15165
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15166
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 15167
 
3031 serge 15168
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15169
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 15170
 
5367 serge 15171
	/* 830 needs to leave pipe A & dpll A up */
15172
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15173
 
15174
	/* 830 needs to leave pipe B & dpll B up */
15175
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15176
 
3031 serge 15177
	/* Lenovo U160 cannot use SSC on LVDS */
15178
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 15179
 
3031 serge 15180
	/* Sony Vaio Y cannot use SSC on LVDS */
15181
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 15182
 
3031 serge 15183
	/* Acer Aspire 5734Z must invert backlight brightness */
15184
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 15185
 
15186
	/* Acer/eMachines G725 */
15187
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15188
 
15189
	/* Acer/eMachines e725 */
15190
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15191
 
15192
	/* Acer/Packard Bell NCL20 */
15193
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15194
 
15195
	/* Acer Aspire 4736Z */
15196
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
5060 serge 15197
 
15198
	/* Acer Aspire 5336 */
15199
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15200
 
15201
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15202
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15203
 
5097 serge 15204
	/* Acer C720 Chromebook (Core i3 4005U) */
15205
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15206
 
5354 serge 15207
	/* Apple Macbook 2,1 (Core 2 T7400) */
15208
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15209
 
6084 serge 15210
	/* Apple Macbook 4,1 */
15211
	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15212
 
5060 serge 15213
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15214
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15215
 
15216
	/* HP Chromebook 14 (Celeron 2955U) */
15217
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
6084 serge 15218
 
15219
	/* Dell Chromebook 11 */
15220
	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15221
 
15222
	/* Dell Chromebook 11 (2015 version) */
15223
	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
3031 serge 15224
};
2327 Serge 15225
 
3031 serge 15226
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 15227
{
3031 serge 15228
	struct pci_dev *d = dev->pdev;
15229
	int i;
2327 Serge 15230
 
3031 serge 15231
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15232
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 15233
 
3031 serge 15234
		if (d->device == q->device &&
15235
		    (d->subsystem_vendor == q->subsystem_vendor ||
15236
		     q->subsystem_vendor == PCI_ANY_ID) &&
15237
		    (d->subsystem_device == q->subsystem_device ||
15238
		     q->subsystem_device == PCI_ANY_ID))
15239
			q->hook(dev);
15240
	}
5097 serge 15241
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15242
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15243
			intel_dmi_quirks[i].hook(dev);
15244
	}
2330 Serge 15245
}
2327 Serge 15246
 
3031 serge 15247
/* Disable the VGA plane that we never use */
15248
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 15249
{
15250
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15251
	u8 sr1;
6937 serge 15252
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 15253
 
6084 serge 15254
	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
4560 Serge 15255
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15256
	outb(SR01, VGA_SR_INDEX);
15257
	sr1 = inb(VGA_SR_DATA);
15258
	outb(sr1 | 1<<5, VGA_SR_DATA);
15259
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 15260
	udelay(300);
2327 Serge 15261
 
6084 serge 15262
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
3031 serge 15263
	POSTING_READ(vga_reg);
2330 Serge 15264
}
15265
 
3031 serge 15266
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 15267
{
7144 serge 15268
	struct drm_i915_private *dev_priv = dev->dev_private;
15269
 
6084 serge 15270
	intel_update_cdclk(dev);
7144 serge 15271
 
15272
	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15273
 
3031 serge 15274
	intel_init_clock_gating(dev);
6084 serge 15275
	intel_enable_gt_powersave(dev);
2342 Serge 15276
}
15277
 
7144 serge 15278
/*
15279
 * Calculate what we think the watermarks should be for the state we've read
15280
 * out of the hardware and then immediately program those watermarks so that
15281
 * we ensure the hardware settings match our internal state.
15282
 *
15283
 * We can calculate what we think WM's should be by creating a duplicate of the
15284
 * current state (which was constructed during hardware readout) and running it
15285
 * through the atomic check code to calculate new watermark values in the
15286
 * state object.
15287
 */
15288
static void sanitize_watermarks(struct drm_device *dev)
15289
{
15290
	struct drm_i915_private *dev_priv = to_i915(dev);
15291
	struct drm_atomic_state *state;
15292
	struct drm_crtc *crtc;
15293
	struct drm_crtc_state *cstate;
15294
	struct drm_modeset_acquire_ctx ctx;
15295
	int ret;
15296
	int i;
15297
 
15298
	/* Only supported on platforms that use atomic watermark design */
15299
	if (!dev_priv->display.program_watermarks)
15300
		return;
15301
 
15302
	/*
15303
	 * We need to hold connection_mutex before calling duplicate_state so
15304
	 * that the connector loop is protected.
15305
	 */
15306
	drm_modeset_acquire_init(&ctx, 0);
15307
retry:
15308
	ret = drm_modeset_lock_all_ctx(dev, &ctx);
15309
	if (ret == -EDEADLK) {
15310
		drm_modeset_backoff(&ctx);
15311
		goto retry;
15312
	} else if (WARN_ON(ret)) {
15313
		goto fail;
15314
	}
15315
 
15316
	state = drm_atomic_helper_duplicate_state(dev, &ctx);
15317
	if (WARN_ON(IS_ERR(state)))
15318
		goto fail;
15319
 
15320
	ret = intel_atomic_check(dev, state);
15321
	if (ret) {
15322
		/*
15323
		 * If we fail here, it means that the hardware appears to be
15324
		 * programmed in a way that shouldn't be possible, given our
15325
		 * understanding of watermark requirements.  This might mean a
15326
		 * mistake in the hardware readout code or a mistake in the
15327
		 * watermark calculations for a given platform.  Raise a WARN
15328
		 * so that this is noticeable.
15329
		 *
15330
		 * If this actually happens, we'll have to just leave the
15331
		 * BIOS-programmed watermarks untouched and hope for the best.
15332
		 */
15333
		WARN(true, "Could not determine valid watermarks for inherited state\n");
15334
		goto fail;
15335
	}
15336
 
15337
	/* Write calculated watermark values back */
15338
	to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15339
	for_each_crtc_in_state(state, crtc, cstate, i) {
15340
		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15341
 
15342
		dev_priv->display.program_watermarks(cs);
15343
	}
15344
 
15345
	drm_atomic_state_free(state);
15346
fail:
15347
	drm_modeset_drop_locks(&ctx);
15348
	drm_modeset_acquire_fini(&ctx);
15349
}
15350
 
3031 serge 15351
void intel_modeset_init(struct drm_device *dev)
2330 Serge 15352
{
3031 serge 15353
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 15354
	int sprite, ret;
15355
	enum pipe pipe;
15356
	struct intel_crtc *crtc;
6088 serge 15357
 
3031 serge 15358
	drm_mode_config_init(dev);
2330 Serge 15359
 
3031 serge 15360
	dev->mode_config.min_width = 0;
15361
	dev->mode_config.min_height = 0;
2330 Serge 15362
 
3031 serge 15363
	dev->mode_config.preferred_depth = 24;
15364
	dev->mode_config.prefer_shadow = 1;
2330 Serge 15365
 
6084 serge 15366
	dev->mode_config.allow_fb_modifiers = true;
15367
 
3031 serge 15368
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 15369
 
3031 serge 15370
	intel_init_quirks(dev);
2330 Serge 15371
 
3031 serge 15372
	intel_init_pm(dev);
2330 Serge 15373
 
3746 Serge 15374
	if (INTEL_INFO(dev)->num_pipes == 0)
15375
		return;
15376
 
6084 serge 15377
	/*
15378
	 * There may be no VBT; and if the BIOS enabled SSC we can
15379
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15380
	 * BIOS isn't using it, don't assume it will work even if the VBT
15381
	 * indicates as much.
15382
	 */
15383
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15384
		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15385
					    DREF_SSC1_ENABLE);
15386
 
15387
		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15388
			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15389
				     bios_lvds_use_ssc ? "en" : "dis",
15390
				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15391
			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15392
		}
15393
	}
15394
 
3031 serge 15395
	intel_init_display(dev);
2330 Serge 15396
 
3031 serge 15397
	if (IS_GEN2(dev)) {
15398
		dev->mode_config.max_width = 2048;
15399
		dev->mode_config.max_height = 2048;
15400
	} else if (IS_GEN3(dev)) {
15401
		dev->mode_config.max_width = 4096;
15402
		dev->mode_config.max_height = 4096;
15403
	} else {
15404
		dev->mode_config.max_width = 8192;
15405
		dev->mode_config.max_height = 8192;
15406
	}
5060 serge 15407
 
15408
	if (IS_GEN2(dev)) {
15409
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15410
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15411
	} else {
15412
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15413
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15414
	}
15415
 
3480 Serge 15416
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 15417
 
3031 serge 15418
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 15419
		      INTEL_INFO(dev)->num_pipes,
15420
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 15421
 
5354 serge 15422
	for_each_pipe(dev_priv, pipe) {
5060 serge 15423
		intel_crtc_init(dev, pipe);
6084 serge 15424
		for_each_sprite(dev_priv, pipe, sprite) {
5060 serge 15425
			ret = intel_plane_init(dev, pipe, sprite);
6084 serge 15426
			if (ret)
4104 Serge 15427
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
5060 serge 15428
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
3746 Serge 15429
		}
2330 Serge 15430
	}
15431
 
6084 serge 15432
	intel_update_czclk(dev_priv);
15433
	intel_update_cdclk(dev);
4560 Serge 15434
 
4104 Serge 15435
	intel_shared_dpll_init(dev);
2330 Serge 15436
 
3031 serge 15437
	/* Just disable it once at startup */
15438
	i915_disable_vga(dev);
15439
	intel_setup_outputs(dev);
3480 Serge 15440
 
5060 serge 15441
	drm_modeset_lock_all(dev);
6084 serge 15442
	intel_modeset_setup_hw_state(dev);
5060 serge 15443
	drm_modeset_unlock_all(dev);
15444
 
15445
	for_each_intel_crtc(dev, crtc) {
6084 serge 15446
		struct intel_initial_plane_config plane_config = {};
15447
 
5060 serge 15448
		if (!crtc->active)
15449
			continue;
15450
 
15451
		/*
15452
		 * Note that reserving the BIOS fb up front prevents us
15453
		 * from stuffing other stolen allocations like the ring
15454
		 * on top.  This prevents some ugliness at boot time, and
15455
		 * can even allow for smooth boot transitions if the BIOS
15456
		 * fb is large enough for the active pipe configuration.
15457
		 */
6084 serge 15458
		dev_priv->display.get_initial_plane_config(crtc,
15459
							   &plane_config);
15460
 
15461
		/*
15462
		 * If the fb is shared between multiple heads, we'll
15463
		 * just get the first one.
15464
		 */
15465
		intel_find_initial_plane_obj(crtc, &plane_config);
5060 serge 15466
	}
7144 serge 15467
 
15468
	/*
15469
	 * Make sure hardware watermarks really match the state we read out.
15470
	 * Note that we need to do this after reconstructing the BIOS fb's
15471
	 * since the watermark calculation done here will use pstate->fb.
15472
	 */
15473
	sanitize_watermarks(dev);
2330 Serge 15474
}
15475
 
3031 serge 15476
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 15477
{
3031 serge 15478
	struct intel_connector *connector;
15479
	struct drm_connector *crt = NULL;
15480
	struct intel_load_detect_pipe load_detect_temp;
5060 serge 15481
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
2330 Serge 15482
 
3031 serge 15483
	/* We can't just switch on the pipe A, we need to set things up with a
15484
	 * proper mode and output configuration. As a gross hack, enable pipe A
15485
	 * by enabling the load detect pipe once. */
6084 serge 15486
	for_each_intel_connector(dev, connector) {
3031 serge 15487
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15488
			crt = &connector->base;
15489
			break;
2330 Serge 15490
		}
15491
	}
15492
 
3031 serge 15493
	if (!crt)
15494
		return;
2330 Serge 15495
 
5060 serge 15496
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
6084 serge 15497
		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
2327 Serge 15498
}
15499
 
3031 serge 15500
static bool
15501
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 15502
{
3746 Serge 15503
	struct drm_device *dev = crtc->base.dev;
15504
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 15505
	u32 val;
2327 Serge 15506
 
3746 Serge 15507
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 15508
		return true;
2327 Serge 15509
 
6084 serge 15510
	val = I915_READ(DSPCNTR(!crtc->plane));
2327 Serge 15511
 
3031 serge 15512
	if ((val & DISPLAY_PLANE_ENABLE) &&
15513
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15514
		return false;
2327 Serge 15515
 
3031 serge 15516
	return true;
2327 Serge 15517
}
15518
 
6084 serge 15519
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15520
{
15521
	struct drm_device *dev = crtc->base.dev;
15522
	struct intel_encoder *encoder;
15523
 
15524
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15525
		return true;
15526
 
15527
	return false;
15528
}
15529
 
7144 serge 15530
static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15531
{
15532
	struct drm_device *dev = encoder->base.dev;
15533
	struct intel_connector *connector;
15534
 
15535
	for_each_connector_on_encoder(dev, &encoder->base, connector)
15536
		return true;
15537
 
15538
	return false;
15539
}
15540
 
3031 serge 15541
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 15542
{
3031 serge 15543
	struct drm_device *dev = crtc->base.dev;
2327 Serge 15544
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 15545
	i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
2327 Serge 15546
 
3031 serge 15547
	/* Clear any frame start delays used for debugging left by the BIOS */
15548
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 15549
 
5060 serge 15550
	/* restore vblank interrupts to correct state */
6084 serge 15551
	drm_crtc_vblank_reset(&crtc->base);
5354 serge 15552
	if (crtc->active) {
6084 serge 15553
		struct intel_plane *plane;
5060 serge 15554
 
6084 serge 15555
		drm_crtc_vblank_on(&crtc->base);
15556
 
15557
		/* Disable everything but the primary plane */
15558
		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15559
			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15560
				continue;
15561
 
15562
			plane->disable_plane(&plane->base, &crtc->base);
15563
		}
15564
	}
15565
 
3031 serge 15566
	/* We need to sanitize the plane -> pipe mapping first because this will
15567
	 * disable the crtc (and hence change the state) if it is wrong. Note
15568
	 * that gen4+ has a fixed plane -> pipe mapping.  */
15569
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15570
		bool plane;
2327 Serge 15571
 
3031 serge 15572
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15573
			      crtc->base.base.id);
2327 Serge 15574
 
3031 serge 15575
		/* Pipe has the wrong plane attached and the plane is active.
15576
		 * Temporarily change the plane mapping and disable everything
15577
		 * ...  */
15578
		plane = crtc->plane;
6084 serge 15579
		to_intel_plane_state(crtc->base.primary->state)->visible = true;
3031 serge 15580
		crtc->plane = !plane;
6084 serge 15581
		intel_crtc_disable_noatomic(&crtc->base);
3031 serge 15582
		crtc->plane = plane;
15583
	}
2327 Serge 15584
 
3031 serge 15585
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15586
	    crtc->pipe == PIPE_A && !crtc->active) {
15587
		/* BIOS forgot to enable pipe A, this mostly happens after
15588
		 * resume. Force-enable the pipe to fix this, the update_dpms
15589
		 * call below we restore the pipe to the right state, but leave
15590
		 * the required bits on. */
15591
		intel_enable_pipe_a(dev);
15592
	}
2327 Serge 15593
 
3031 serge 15594
	/* Adjust the state of the output pipe according to whether we
15595
	 * have active connectors/encoders. */
6084 serge 15596
	if (!intel_crtc_has_encoders(crtc))
15597
		intel_crtc_disable_noatomic(&crtc->base);
2327 Serge 15598
 
6084 serge 15599
	if (crtc->active != crtc->base.state->active) {
3031 serge 15600
		struct intel_encoder *encoder;
2327 Serge 15601
 
3031 serge 15602
		/* This can happen either due to bugs in the get_hw_state
6084 serge 15603
		 * functions or because of calls to intel_crtc_disable_noatomic,
15604
		 * or because the pipe is force-enabled due to the
3031 serge 15605
		 * pipe A quirk. */
15606
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15607
			      crtc->base.base.id,
6084 serge 15608
			      crtc->base.state->enable ? "enabled" : "disabled",
3031 serge 15609
			      crtc->active ? "enabled" : "disabled");
2327 Serge 15610
 
6084 serge 15611
		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15612
		crtc->base.state->active = crtc->active;
3031 serge 15613
		crtc->base.enabled = crtc->active;
6937 serge 15614
		crtc->base.state->connector_mask = 0;
7144 serge 15615
		crtc->base.state->encoder_mask = 0;
2327 Serge 15616
 
3031 serge 15617
		/* Because we only establish the connector -> encoder ->
15618
		 * crtc links if something is active, this means the
15619
		 * crtc is now deactivated. Break the links. connector
15620
		 * -> encoder links are only establish when things are
15621
		 *  actually up, hence no need to break them. */
15622
		WARN_ON(crtc->active);
2327 Serge 15623
 
6084 serge 15624
		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
3031 serge 15625
			encoder->base.crtc = NULL;
15626
	}
5060 serge 15627
 
5354 serge 15628
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
5060 serge 15629
		/*
15630
		 * We start out with underrun reporting disabled to avoid races.
15631
		 * For correct bookkeeping mark this on active crtcs.
15632
		 *
15633
		 * Also on gmch platforms we dont have any hardware bits to
15634
		 * disable the underrun reporting. Which means we need to start
15635
		 * out with underrun reporting disabled also on inactive pipes,
15636
		 * since otherwise we'll complain about the garbage we read when
15637
		 * e.g. coming up after runtime pm.
15638
		 *
15639
		 * No protection against concurrent access is required - at
15640
		 * worst a fifo underrun happens which also sets this to false.
15641
		 */
15642
		crtc->cpu_fifo_underrun_disabled = true;
15643
		crtc->pch_fifo_underrun_disabled = true;
15644
	}
2327 Serge 15645
}
15646
 
3031 serge 15647
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 15648
{
3031 serge 15649
	struct intel_connector *connector;
15650
	struct drm_device *dev = encoder->base.dev;
2327 Serge 15651
 
3031 serge 15652
	/* We need to check both for a crtc link (meaning that the
15653
	 * encoder is active and trying to read from a pipe) and the
15654
	 * pipe itself being active. */
15655
	bool has_active_crtc = encoder->base.crtc &&
15656
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 15657
 
7144 serge 15658
	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
3031 serge 15659
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15660
			      encoder->base.base.id,
5060 serge 15661
			      encoder->base.name);
2327 Serge 15662
 
3031 serge 15663
		/* Connector is active, but has no active pipe. This is
15664
		 * fallout from our resume register restoring. Disable
15665
		 * the encoder manually again. */
15666
		if (encoder->base.crtc) {
15667
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15668
				      encoder->base.base.id,
5060 serge 15669
				      encoder->base.name);
3031 serge 15670
			encoder->disable(encoder);
5060 serge 15671
			if (encoder->post_disable)
15672
				encoder->post_disable(encoder);
3031 serge 15673
		}
5060 serge 15674
		encoder->base.crtc = NULL;
2327 Serge 15675
 
3031 serge 15676
		/* Inconsistent output/port/pipe state happens presumably due to
15677
		 * a bug in one of the get_hw_state functions. Or someplace else
15678
		 * in our code, like the register restore mess on resume. Clamp
15679
		 * things to off as a safer default. */
6084 serge 15680
		for_each_intel_connector(dev, connector) {
3031 serge 15681
			if (connector->encoder != encoder)
15682
				continue;
5060 serge 15683
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15684
			connector->base.encoder = NULL;
3031 serge 15685
		}
15686
	}
15687
	/* Enabled encoders without active connectors will be fixed in
15688
	 * the crtc fixup. */
2327 Serge 15689
}
15690
 
5060 serge 15691
void i915_redisable_vga_power_on(struct drm_device *dev)
3746 Serge 15692
{
15693
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 15694
	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
3746 Serge 15695
 
5060 serge 15696
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15697
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15698
		i915_disable_vga(dev);
15699
	}
15700
}
15701
 
15702
void i915_redisable_vga(struct drm_device *dev)
15703
{
15704
	struct drm_i915_private *dev_priv = dev->dev_private;
15705
 
4104 Serge 15706
	/* This function can be called both from intel_modeset_setup_hw_state or
15707
	 * at a very early point in our resume sequence, where the power well
15708
	 * structures are not yet restored. Since this function is at a very
15709
	 * paranoid "someone might have enabled VGA while we were not looking"
15710
	 * level, just check if the power well is enabled instead of trying to
15711
	 * follow the "don't touch the power well if we don't need it" policy
15712
	 * the rest of the driver uses. */
6937 serge 15713
	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
4104 Serge 15714
		return;
15715
 
5060 serge 15716
	i915_redisable_vga_power_on(dev);
6937 serge 15717
 
15718
	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
3746 Serge 15719
}
15720
 
6084 serge 15721
static bool primary_get_hw_state(struct intel_plane *plane)
5060 serge 15722
{
6084 serge 15723
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5060 serge 15724
 
6084 serge 15725
	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15726
}
5060 serge 15727
 
6084 serge 15728
/* FIXME read out full plane state for all planes */
15729
static void readout_plane_state(struct intel_crtc *crtc)
15730
{
15731
	struct drm_plane *primary = crtc->base.primary;
15732
	struct intel_plane_state *plane_state =
15733
		to_intel_plane_state(primary->state);
15734
 
6937 serge 15735
	plane_state->visible = crtc->active &&
6084 serge 15736
		primary_get_hw_state(to_intel_plane(primary));
15737
 
15738
	if (plane_state->visible)
15739
		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
5060 serge 15740
}
15741
 
4104 Serge 15742
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 15743
{
15744
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15745
	enum pipe pipe;
15746
	struct intel_crtc *crtc;
15747
	struct intel_encoder *encoder;
15748
	struct intel_connector *connector;
4104 Serge 15749
	int i;
2327 Serge 15750
 
7144 serge 15751
	dev_priv->active_crtcs = 0;
15752
 
5060 serge 15753
	for_each_intel_crtc(dev, crtc) {
7144 serge 15754
		struct intel_crtc_state *crtc_state = crtc->config;
15755
		int pixclk = 0;
2327 Serge 15756
 
7144 serge 15757
		__drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
15758
		memset(crtc_state, 0, sizeof(*crtc_state));
15759
		crtc_state->base.crtc = &crtc->base;
2327 Serge 15760
 
7144 serge 15761
		crtc_state->base.active = crtc_state->base.enable =
15762
			dev_priv->display.get_pipe_config(crtc, crtc_state);
2330 Serge 15763
 
7144 serge 15764
		crtc->base.enabled = crtc_state->base.enable;
15765
		crtc->active = crtc_state->base.active;
15766
 
15767
		if (crtc_state->base.active) {
15768
			dev_priv->active_crtcs |= 1 << crtc->pipe;
15769
 
15770
			if (IS_BROADWELL(dev_priv)) {
15771
				pixclk = ilk_pipe_pixel_rate(crtc_state);
15772
 
15773
				/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15774
				if (crtc_state->ips_enabled)
15775
					pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15776
			} else if (IS_VALLEYVIEW(dev_priv) ||
15777
				   IS_CHERRYVIEW(dev_priv) ||
15778
				   IS_BROXTON(dev_priv))
15779
				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15780
			else
15781
				WARN_ON(dev_priv->display.modeset_calc_cdclk);
15782
		}
15783
 
15784
		dev_priv->min_pixclk[crtc->pipe] = pixclk;
15785
 
6084 serge 15786
		readout_plane_state(crtc);
15787
 
3031 serge 15788
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15789
			      crtc->base.base.id,
15790
			      crtc->active ? "enabled" : "disabled");
2339 Serge 15791
	}
2332 Serge 15792
 
4104 Serge 15793
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15794
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15795
 
5354 serge 15796
		pll->on = pll->get_hw_state(dev_priv, pll,
15797
					    &pll->config.hw_state);
4104 Serge 15798
		pll->active = 0;
5354 serge 15799
		pll->config.crtc_mask = 0;
5060 serge 15800
		for_each_intel_crtc(dev, crtc) {
5354 serge 15801
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
4104 Serge 15802
				pll->active++;
5354 serge 15803
				pll->config.crtc_mask |= 1 << crtc->pipe;
15804
			}
4104 Serge 15805
		}
15806
 
5354 serge 15807
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15808
			      pll->name, pll->config.crtc_mask, pll->on);
5060 serge 15809
 
5354 serge 15810
		if (pll->config.crtc_mask)
5060 serge 15811
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
4104 Serge 15812
	}
15813
 
5354 serge 15814
	for_each_intel_encoder(dev, encoder) {
3031 serge 15815
		pipe = 0;
2332 Serge 15816
 
3031 serge 15817
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 15818
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15819
			encoder->base.crtc = &crtc->base;
6084 serge 15820
			encoder->get_config(encoder, crtc->config);
3031 serge 15821
		} else {
15822
			encoder->base.crtc = NULL;
15823
		}
2332 Serge 15824
 
4560 Serge 15825
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 15826
			      encoder->base.base.id,
5060 serge 15827
			      encoder->base.name,
3031 serge 15828
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 15829
			      pipe_name(pipe));
3031 serge 15830
	}
2332 Serge 15831
 
6084 serge 15832
	for_each_intel_connector(dev, connector) {
3031 serge 15833
		if (connector->get_hw_state(connector)) {
15834
			connector->base.dpms = DRM_MODE_DPMS_ON;
6937 serge 15835
 
15836
			encoder = connector->encoder;
15837
			connector->base.encoder = &encoder->base;
15838
 
15839
			if (encoder->base.crtc &&
15840
			    encoder->base.crtc->state->active) {
15841
				/*
15842
				 * This has to be done during hardware readout
15843
				 * because anything calling .crtc_disable may
15844
				 * rely on the connector_mask being accurate.
15845
				 */
15846
				encoder->base.crtc->state->connector_mask |=
15847
					1 << drm_connector_index(&connector->base);
7144 serge 15848
				encoder->base.crtc->state->encoder_mask |=
15849
					1 << drm_encoder_index(&encoder->base);
6937 serge 15850
			}
15851
 
3031 serge 15852
		} else {
15853
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15854
			connector->base.encoder = NULL;
15855
		}
15856
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15857
			      connector->base.base.id,
5060 serge 15858
			      connector->base.name,
3031 serge 15859
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 15860
	}
6084 serge 15861
 
15862
	for_each_intel_crtc(dev, crtc) {
15863
		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15864
 
15865
		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15866
		if (crtc->base.state->active) {
15867
			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15868
			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15869
			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15870
 
15871
			/*
15872
			 * The initial mode needs to be set in order to keep
15873
			 * the atomic core happy. It wants a valid mode if the
15874
			 * crtc's enabled, so we do the above call.
15875
			 *
15876
			 * At this point some state updated by the connectors
15877
			 * in their ->detect() callback has not run yet, so
15878
			 * no recalculation can be done yet.
15879
			 *
15880
			 * Even if we could do a recalculation and modeset
15881
			 * right now it would cause a double modeset if
15882
			 * fbdev or userspace chooses a different initial mode.
15883
			 *
15884
			 * If that happens, someone indicated they wanted a
15885
			 * mode change, which means it's safe to do a full
15886
			 * recalculation.
15887
			 */
15888
			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15889
 
15890
			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15891
			update_scanline_offset(crtc);
15892
		}
15893
	}
4104 Serge 15894
}
2332 Serge 15895
 
6084 serge 15896
/* Scan out the current hw modeset state,
15897
 * and sanitizes it to the current state
15898
 */
15899
static void
15900
intel_modeset_setup_hw_state(struct drm_device *dev)
4104 Serge 15901
{
15902
	struct drm_i915_private *dev_priv = dev->dev_private;
15903
	enum pipe pipe;
15904
	struct intel_crtc *crtc;
15905
	struct intel_encoder *encoder;
15906
	int i;
15907
 
15908
	intel_modeset_readout_hw_state(dev);
15909
 
3031 serge 15910
	/* HW state is read out, now we need to sanitize this mess. */
5354 serge 15911
	for_each_intel_encoder(dev, encoder) {
3031 serge 15912
		intel_sanitize_encoder(encoder);
2332 Serge 15913
	}
15914
 
5354 serge 15915
	for_each_pipe(dev_priv, pipe) {
3031 serge 15916
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15917
		intel_sanitize_crtc(crtc);
6084 serge 15918
		intel_dump_pipe_config(crtc, crtc->config,
15919
				       "[setup_hw_state]");
2332 Serge 15920
	}
15921
 
6084 serge 15922
	intel_modeset_update_connector_atomic_state(dev);
15923
 
4104 Serge 15924
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15925
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15926
 
15927
		if (!pll->on || pll->active)
15928
			continue;
15929
 
15930
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15931
 
15932
		pll->disable(dev_priv, pll);
15933
		pll->on = false;
15934
	}
15935
 
6937 serge 15936
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 15937
		vlv_wm_get_hw_state(dev);
15938
	else if (IS_GEN9(dev))
5354 serge 15939
		skl_wm_get_hw_state(dev);
15940
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 15941
		ilk_wm_get_hw_state(dev);
15942
 
6084 serge 15943
	for_each_intel_crtc(dev, crtc) {
15944
		unsigned long put_domains;
4560 Serge 15945
 
7144 serge 15946
		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
6084 serge 15947
		if (WARN_ON(put_domains))
15948
			modeset_put_power_domains(dev_priv, put_domains);
15949
	}
15950
	intel_display_set_init_power(dev_priv, false);
7144 serge 15951
 
15952
	intel_fbc_init_pipe_state(dev_priv);
6084 serge 15953
}
3746 Serge 15954
 
6084 serge 15955
void intel_display_resume(struct drm_device *dev)
15956
{
7144 serge 15957
	struct drm_i915_private *dev_priv = to_i915(dev);
15958
	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15959
	struct drm_modeset_acquire_ctx ctx;
6084 serge 15960
	int ret;
7144 serge 15961
	bool setup = false;
6084 serge 15962
 
7144 serge 15963
	dev_priv->modeset_restore_state = NULL;
6084 serge 15964
 
7144 serge 15965
	/*
15966
	 * This is a cludge because with real atomic modeset mode_config.mutex
15967
	 * won't be taken. Unfortunately some probed state like
15968
	 * audio_codec_enable is still protected by mode_config.mutex, so lock
15969
	 * it here for now.
15970
	 */
15971
	mutex_lock(&dev->mode_config.mutex);
15972
	drm_modeset_acquire_init(&ctx, 0);
6084 serge 15973
 
7144 serge 15974
retry:
15975
	ret = drm_modeset_lock_all_ctx(dev, &ctx);
6084 serge 15976
 
7144 serge 15977
	/*
15978
	 * With MST, the number of connectors can change between suspend and
15979
	 * resume, which means that the state we want to restore might now be
15980
	 * impossible to use since it'll be pointing to non-existant
15981
	 * connectors.
15982
	 */
15983
	if (ret == 0 && state &&
15984
	    state->num_connector != dev->mode_config.num_connector) {
15985
		drm_atomic_state_free(state);
15986
		state = NULL;
15987
	}
6084 serge 15988
 
7144 serge 15989
	if (ret == 0 && !setup) {
15990
		setup = true;
6084 serge 15991
 
7144 serge 15992
		intel_modeset_setup_hw_state(dev);
15993
		i915_redisable_vga(dev);
3243 Serge 15994
	}
2332 Serge 15995
 
7144 serge 15996
	if (ret == 0 && state) {
15997
		struct drm_crtc_state *crtc_state;
15998
		struct drm_crtc *crtc;
15999
		int i;
16000
 
16001
		state->acquire_ctx = &ctx;
16002
 
16003
		for_each_crtc_in_state(state, crtc, crtc_state, i) {
16004
			/*
16005
			 * Force recalculation even if we restore
16006
			 * current state. With fast modeset this may not result
16007
			 * in a modeset when the state is compatible.
16008
			 */
16009
			crtc_state->mode_changed = true;
16010
		}
16011
 
16012
		ret = drm_atomic_commit(state);
6084 serge 16013
	}
16014
 
7144 serge 16015
	if (ret == -EDEADLK) {
16016
		drm_modeset_backoff(&ctx);
16017
		goto retry;
6084 serge 16018
	}
16019
 
7144 serge 16020
	drm_modeset_drop_locks(&ctx);
16021
	drm_modeset_acquire_fini(&ctx);
16022
	mutex_unlock(&dev->mode_config.mutex);
6084 serge 16023
 
7144 serge 16024
	if (ret) {
16025
		DRM_ERROR("Restoring old state failed with %i\n", ret);
16026
		drm_atomic_state_free(state);
16027
	}
2332 Serge 16028
}
16029
 
3031 serge 16030
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 16031
{
5060 serge 16032
	struct drm_crtc *c;
16033
	struct drm_i915_gem_object *obj;
6084 serge 16034
	int ret;
5060 serge 16035
 
16036
	intel_init_gt_powersave(dev);
16037
 
3031 serge 16038
	intel_modeset_init_hw(dev);
2330 Serge 16039
 
3031 serge 16040
//   intel_setup_overlay(dev);
2330 Serge 16041
 
5060 serge 16042
	/*
16043
	 * Make sure any fbs we allocated at startup are properly
16044
	 * pinned & fenced.  When we do the allocation it's too early
16045
	 * for this.
16046
	 */
16047
	for_each_crtc(dev, c) {
16048
		obj = intel_fb_obj(c->primary->fb);
16049
		if (obj == NULL)
16050
			continue;
16051
 
6084 serge 16052
		mutex_lock(&dev->struct_mutex);
16053
		ret = intel_pin_and_fence_fb_obj(c->primary,
16054
						 c->primary->fb,
6937 serge 16055
						 c->primary->state);
6084 serge 16056
		mutex_unlock(&dev->struct_mutex);
16057
		if (ret) {
5060 serge 16058
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
16059
				  to_intel_crtc(c)->pipe);
16060
			drm_framebuffer_unreference(c->primary->fb);
16061
			c->primary->fb = NULL;
6084 serge 16062
			c->primary->crtc = c->primary->state->crtc = NULL;
16063
			update_state_fb(c->primary);
16064
			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
5060 serge 16065
		}
16066
	}
6084 serge 16067
 
16068
	intel_backlight_register(dev);
2330 Serge 16069
}
16070
 
5060 serge 16071
void intel_connector_unregister(struct intel_connector *intel_connector)
16072
{
16073
	struct drm_connector *connector = &intel_connector->base;
16074
 
16075
	intel_panel_destroy_backlight(connector);
16076
	drm_connector_unregister(connector);
16077
}
16078
 
3031 serge 16079
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 16080
{
3031 serge 16081
#if 0
16082
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 16083
	struct intel_connector *connector;
2327 Serge 16084
 
5354 serge 16085
	intel_disable_gt_powersave(dev);
16086
 
16087
	intel_backlight_unregister(dev);
16088
 
4104 Serge 16089
	/*
16090
	 * Interrupts and polling as the first thing to avoid creating havoc.
5354 serge 16091
	 * Too much stuff here (turning of connectors, ...) would
4104 Serge 16092
	 * experience fancy races otherwise.
16093
	 */
5354 serge 16094
	intel_irq_uninstall(dev_priv);
5060 serge 16095
 
4104 Serge 16096
	/*
16097
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16098
	 * poll handlers. Hence disable polling after hpd handling is shut down.
16099
	 */
4560 Serge 16100
	drm_kms_helper_poll_fini(dev);
4104 Serge 16101
 
4560 Serge 16102
	intel_unregister_dsm_handler();
2327 Serge 16103
 
7144 serge 16104
	intel_fbc_global_disable(dev_priv);
2342 Serge 16105
 
4104 Serge 16106
	/* flush any delayed tasks or pending work */
16107
	flush_scheduled_work();
2327 Serge 16108
 
4560 Serge 16109
	/* destroy the backlight and sysfs files before encoders/connectors */
6937 serge 16110
	for_each_intel_connector(dev, connector)
16111
		connector->unregister(connector);
5060 serge 16112
 
3031 serge 16113
	drm_mode_config_cleanup(dev);
5060 serge 16114
 
16115
	intel_cleanup_overlay(dev);
16116
 
16117
	intel_cleanup_gt_powersave(dev);
2327 Serge 16118
#endif
16119
}
16120
 
16121
/*
3031 serge 16122
 * Return which encoder is currently attached for connector.
2327 Serge 16123
 */
3031 serge 16124
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 16125
{
3031 serge 16126
	return &intel_attached_encoder(connector)->base;
16127
}
2327 Serge 16128
 
3031 serge 16129
void intel_connector_attach_encoder(struct intel_connector *connector,
16130
				    struct intel_encoder *encoder)
16131
{
16132
	connector->encoder = encoder;
16133
	drm_mode_connector_attach_encoder(&connector->base,
16134
					  &encoder->base);
2327 Serge 16135
}
16136
 
16137
/*
3031 serge 16138
 * set vga decode state - true == enable VGA decode
2327 Serge 16139
 */
3031 serge 16140
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 16141
{
2330 Serge 16142
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 16143
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 16144
	u16 gmch_ctrl;
2327 Serge 16145
 
5060 serge 16146
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16147
		DRM_ERROR("failed to read control word\n");
16148
		return -EIO;
16149
	}
16150
 
16151
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16152
		return 0;
16153
 
3031 serge 16154
	if (state)
16155
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 16156
	else
3031 serge 16157
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5060 serge 16158
 
16159
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16160
		DRM_ERROR("failed to write control word\n");
16161
		return -EIO;
16162
	}
16163
 
3031 serge 16164
	return 0;
2330 Serge 16165
}
16166
 
3031 serge 16167
struct intel_display_error_state {
4104 Serge 16168
 
16169
	u32 power_well_driver;
16170
 
16171
	int num_transcoders;
16172
 
3031 serge 16173
	struct intel_cursor_error_state {
16174
		u32 control;
16175
		u32 position;
16176
		u32 base;
16177
		u32 size;
16178
	} cursor[I915_MAX_PIPES];
2327 Serge 16179
 
3031 serge 16180
	struct intel_pipe_error_state {
4560 Serge 16181
		bool power_domain_on;
3031 serge 16182
		u32 source;
5060 serge 16183
		u32 stat;
3031 serge 16184
	} pipe[I915_MAX_PIPES];
2327 Serge 16185
 
3031 serge 16186
	struct intel_plane_error_state {
16187
		u32 control;
16188
		u32 stride;
16189
		u32 size;
16190
		u32 pos;
16191
		u32 addr;
16192
		u32 surface;
16193
		u32 tile_offset;
16194
	} plane[I915_MAX_PIPES];
4104 Serge 16195
 
16196
	struct intel_transcoder_error_state {
4560 Serge 16197
		bool power_domain_on;
4104 Serge 16198
		enum transcoder cpu_transcoder;
16199
 
16200
		u32 conf;
16201
 
16202
		u32 htotal;
16203
		u32 hblank;
16204
		u32 hsync;
16205
		u32 vtotal;
16206
		u32 vblank;
16207
		u32 vsync;
16208
	} transcoder[4];
3031 serge 16209
};
2327 Serge 16210
 
3031 serge 16211
struct intel_display_error_state *
16212
intel_display_capture_error_state(struct drm_device *dev)
16213
{
5060 serge 16214
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 16215
	struct intel_display_error_state *error;
4104 Serge 16216
	int transcoders[] = {
16217
		TRANSCODER_A,
16218
		TRANSCODER_B,
16219
		TRANSCODER_C,
16220
		TRANSCODER_EDP,
16221
	};
3031 serge 16222
	int i;
2327 Serge 16223
 
4104 Serge 16224
	if (INTEL_INFO(dev)->num_pipes == 0)
16225
		return NULL;
16226
 
4560 Serge 16227
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 16228
	if (error == NULL)
16229
		return NULL;
2327 Serge 16230
 
4560 Serge 16231
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 16232
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16233
 
5354 serge 16234
	for_each_pipe(dev_priv, i) {
4560 Serge 16235
		error->pipe[i].power_domain_on =
5354 serge 16236
			__intel_display_power_is_enabled(dev_priv,
6084 serge 16237
							 POWER_DOMAIN_PIPE(i));
4560 Serge 16238
		if (!error->pipe[i].power_domain_on)
16239
			continue;
16240
 
3031 serge 16241
		error->cursor[i].control = I915_READ(CURCNTR(i));
16242
		error->cursor[i].position = I915_READ(CURPOS(i));
16243
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 16244
 
3031 serge 16245
		error->plane[i].control = I915_READ(DSPCNTR(i));
16246
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 16247
		if (INTEL_INFO(dev)->gen <= 3) {
6084 serge 16248
			error->plane[i].size = I915_READ(DSPSIZE(i));
16249
			error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 16250
		}
16251
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
6084 serge 16252
			error->plane[i].addr = I915_READ(DSPADDR(i));
3031 serge 16253
		if (INTEL_INFO(dev)->gen >= 4) {
16254
			error->plane[i].surface = I915_READ(DSPSURF(i));
16255
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16256
		}
2327 Serge 16257
 
3031 serge 16258
		error->pipe[i].source = I915_READ(PIPESRC(i));
5060 serge 16259
 
16260
		if (HAS_GMCH_DISPLAY(dev))
16261
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
3031 serge 16262
	}
2327 Serge 16263
 
4104 Serge 16264
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16265
	if (HAS_DDI(dev_priv->dev))
16266
		error->num_transcoders++; /* Account for eDP. */
16267
 
16268
	for (i = 0; i < error->num_transcoders; i++) {
16269
		enum transcoder cpu_transcoder = transcoders[i];
16270
 
4560 Serge 16271
		error->transcoder[i].power_domain_on =
5354 serge 16272
			__intel_display_power_is_enabled(dev_priv,
4560 Serge 16273
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16274
		if (!error->transcoder[i].power_domain_on)
16275
			continue;
16276
 
4104 Serge 16277
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16278
 
16279
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16280
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16281
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16282
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16283
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16284
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16285
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16286
	}
16287
 
3031 serge 16288
	return error;
2330 Serge 16289
}
2327 Serge 16290
 
4104 Serge 16291
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16292
 
3031 serge 16293
void
4104 Serge 16294
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 16295
				struct drm_device *dev,
16296
				struct intel_display_error_state *error)
2332 Serge 16297
{
5354 serge 16298
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 16299
	int i;
2330 Serge 16300
 
4104 Serge 16301
	if (!error)
16302
		return;
16303
 
16304
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 16305
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 16306
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16307
			   error->power_well_driver);
5354 serge 16308
	for_each_pipe(dev_priv, i) {
4104 Serge 16309
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 16310
		err_printf(m, "  Power: %s\n",
7144 serge 16311
			   onoff(error->pipe[i].power_domain_on));
4104 Serge 16312
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
5060 serge 16313
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
2332 Serge 16314
 
4104 Serge 16315
		err_printf(m, "Plane [%d]:\n", i);
16316
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16317
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 16318
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 16319
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16320
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 16321
		}
16322
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 16323
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 16324
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 16325
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16326
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 16327
		}
2332 Serge 16328
 
4104 Serge 16329
		err_printf(m, "Cursor [%d]:\n", i);
16330
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16331
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16332
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 16333
	}
4104 Serge 16334
 
16335
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 16336
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 16337
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 16338
		err_printf(m, "  Power: %s\n",
7144 serge 16339
			   onoff(error->transcoder[i].power_domain_on));
4104 Serge 16340
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16341
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16342
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16343
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16344
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16345
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16346
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16347
	}
2327 Serge 16348
}