Subversion Repositories Kolibri OS

Rev

Rev 6660 | Rev 6937 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
6084 serge 24
 *	Eric Anholt 
2327 Serge 25
 */
26
 
5097 serge 27
#include 
2327 Serge 28
#include 
6088 serge 29
#include 
2327 Serge 30
#include 
31
#include 
2330 Serge 32
#include 
5354 serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
6084 serge 40
#include 
41
#include 
3031 serge 42
#include 
43
#include 
5060 serge 44
#include 
45
#include 
46
#include 
2327 Serge 47
 
5060 serge 48
/* Primary plane formats for gen <= 3 */
6084 serge 49
static const uint32_t i8xx_primary_formats[] = {
50
	DRM_FORMAT_C8,
51
	DRM_FORMAT_RGB565,
5060 serge 52
	DRM_FORMAT_XRGB1555,
6084 serge 53
	DRM_FORMAT_XRGB8888,
5060 serge 54
};
55
 
56
/* Primary plane formats for gen >= 4 */
6084 serge 57
static const uint32_t i965_primary_formats[] = {
58
	DRM_FORMAT_C8,
59
	DRM_FORMAT_RGB565,
60
	DRM_FORMAT_XRGB8888,
5060 serge 61
	DRM_FORMAT_XBGR8888,
6084 serge 62
	DRM_FORMAT_XRGB2101010,
63
	DRM_FORMAT_XBGR2101010,
64
};
65
 
66
static const uint32_t skl_primary_formats[] = {
67
	DRM_FORMAT_C8,
68
	DRM_FORMAT_RGB565,
69
	DRM_FORMAT_XRGB8888,
70
	DRM_FORMAT_XBGR8888,
71
	DRM_FORMAT_ARGB8888,
5060 serge 72
	DRM_FORMAT_ABGR8888,
73
	DRM_FORMAT_XRGB2101010,
74
	DRM_FORMAT_XBGR2101010,
6084 serge 75
	DRM_FORMAT_YUYV,
76
	DRM_FORMAT_YVYU,
77
	DRM_FORMAT_UYVY,
78
	DRM_FORMAT_VYUY,
5060 serge 79
};
80
 
81
/* Cursor formats */
82
static const uint32_t intel_cursor_formats[] = {
83
	DRM_FORMAT_ARGB8888,
84
};
85
 
86
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 87
 
4104 Serge 88
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 89
				struct intel_crtc_state *pipe_config);
4560 Serge 90
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
6084 serge 91
				   struct intel_crtc_state *pipe_config);
2327 Serge 92
 
5060 serge 93
static int intel_framebuffer_init(struct drm_device *dev,
94
				  struct intel_framebuffer *ifb,
95
				  struct drm_mode_fb_cmd2 *mode_cmd,
96
				  struct drm_i915_gem_object *obj);
97
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
98
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
99
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 100
					 struct intel_link_m_n *m_n,
101
					 struct intel_link_m_n *m2_n2);
5060 serge 102
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
103
static void haswell_set_pipeconf(struct drm_crtc *crtc);
104
static void intel_set_pipe_csc(struct drm_crtc *crtc);
5354 serge 105
static void vlv_prepare_pll(struct intel_crtc *crtc,
6084 serge 106
			    const struct intel_crtc_state *pipe_config);
5354 serge 107
static void chv_prepare_pll(struct intel_crtc *crtc,
6084 serge 108
			    const struct intel_crtc_state *pipe_config);
109
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112
	struct intel_crtc_state *crtc_state);
113
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
114
			   int num_connectors);
115
static void skylake_pfit_enable(struct intel_crtc *crtc);
116
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117
static void ironlake_pfit_enable(struct intel_crtc *crtc);
118
static void intel_modeset_setup_hw_state(struct drm_device *dev);
119
static void intel_pre_disable_primary(struct drm_crtc *crtc);
4104 Serge 120
 
2327 Serge 121
typedef struct {
6084 serge 122
	int	min, max;
2327 Serge 123
} intel_range_t;
124
 
125
typedef struct {
6084 serge 126
	int	dot_limit;
127
	int	p2_slow, p2_fast;
2327 Serge 128
} intel_p2_t;
129
 
130
typedef struct intel_limit intel_limit_t;
131
struct intel_limit {
6084 serge 132
	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
133
	intel_p2_t	    p2;
2327 Serge 134
};
135
 
6084 serge 136
/* returns HPLL frequency in kHz */
137
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
138
{
139
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140
 
141
	/* Obtain SKU information */
142
	mutex_lock(&dev_priv->sb_lock);
143
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144
		CCK_FUSE_HPLL_FREQ_MASK;
145
	mutex_unlock(&dev_priv->sb_lock);
146
 
147
	return vco_freq[hpll_freq] * 1000;
148
}
149
 
150
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
151
				  const char *name, u32 reg)
152
{
153
	u32 val;
154
	int divider;
155
 
156
	if (dev_priv->hpll_freq == 0)
157
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
158
 
159
	mutex_lock(&dev_priv->sb_lock);
160
	val = vlv_cck_read(dev_priv, reg);
161
	mutex_unlock(&dev_priv->sb_lock);
162
 
163
	divider = val & CCK_FREQUENCY_VALUES;
164
 
165
	WARN((val & CCK_FREQUENCY_STATUS) !=
166
	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
167
	     "%s change in progress\n", name);
168
 
169
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
170
}
171
 
3243 Serge 172
int
173
intel_pch_rawclk(struct drm_device *dev)
174
{
175
	struct drm_i915_private *dev_priv = dev->dev_private;
176
 
177
	WARN_ON(!HAS_PCH_SPLIT(dev));
178
 
179
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
180
}
181
 
6084 serge 182
/* hrawclock is 1/4 the FSB frequency */
183
int intel_hrawclk(struct drm_device *dev)
184
{
185
	struct drm_i915_private *dev_priv = dev->dev_private;
186
	uint32_t clkcfg;
187
 
188
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
189
	if (IS_VALLEYVIEW(dev))
190
		return 200;
191
 
192
	clkcfg = I915_READ(CLKCFG);
193
	switch (clkcfg & CLKCFG_FSB_MASK) {
194
	case CLKCFG_FSB_400:
195
		return 100;
196
	case CLKCFG_FSB_533:
197
		return 133;
198
	case CLKCFG_FSB_667:
199
		return 166;
200
	case CLKCFG_FSB_800:
201
		return 200;
202
	case CLKCFG_FSB_1067:
203
		return 266;
204
	case CLKCFG_FSB_1333:
205
		return 333;
206
	/* these two are just a guess; one of them might be right */
207
	case CLKCFG_FSB_1600:
208
	case CLKCFG_FSB_1600_ALT:
209
		return 400;
210
	default:
211
		return 133;
212
	}
213
}
214
 
215
static void intel_update_czclk(struct drm_i915_private *dev_priv)
216
{
217
	if (!IS_VALLEYVIEW(dev_priv))
218
		return;
219
 
220
	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
221
						      CCK_CZ_CLOCK_CONTROL);
222
 
223
	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
224
}
225
 
2327 Serge 226
static inline u32 /* units of 100MHz */
227
intel_fdi_link_freq(struct drm_device *dev)
228
{
229
	if (IS_GEN5(dev)) {
230
		struct drm_i915_private *dev_priv = dev->dev_private;
231
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
232
	} else
233
		return 27;
234
}
235
 
4104 Serge 236
static const intel_limit_t intel_limits_i8xx_dac = {
237
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 238
	.vco = { .min = 908000, .max = 1512000 },
239
	.n = { .min = 2, .max = 16 },
4104 Serge 240
	.m = { .min = 96, .max = 140 },
241
	.m1 = { .min = 18, .max = 26 },
242
	.m2 = { .min = 6, .max = 16 },
243
	.p = { .min = 4, .max = 128 },
244
	.p1 = { .min = 2, .max = 33 },
245
	.p2 = { .dot_limit = 165000,
246
		.p2_slow = 4, .p2_fast = 2 },
247
};
248
 
2327 Serge 249
static const intel_limit_t intel_limits_i8xx_dvo = {
6084 serge 250
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 251
	.vco = { .min = 908000, .max = 1512000 },
252
	.n = { .min = 2, .max = 16 },
6084 serge 253
	.m = { .min = 96, .max = 140 },
254
	.m1 = { .min = 18, .max = 26 },
255
	.m2 = { .min = 6, .max = 16 },
256
	.p = { .min = 4, .max = 128 },
257
	.p1 = { .min = 2, .max = 33 },
2327 Serge 258
	.p2 = { .dot_limit = 165000,
4104 Serge 259
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 260
};
261
 
262
static const intel_limit_t intel_limits_i8xx_lvds = {
6084 serge 263
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 264
	.vco = { .min = 908000, .max = 1512000 },
265
	.n = { .min = 2, .max = 16 },
6084 serge 266
	.m = { .min = 96, .max = 140 },
267
	.m1 = { .min = 18, .max = 26 },
268
	.m2 = { .min = 6, .max = 16 },
269
	.p = { .min = 4, .max = 128 },
270
	.p1 = { .min = 1, .max = 6 },
2327 Serge 271
	.p2 = { .dot_limit = 165000,
272
		.p2_slow = 14, .p2_fast = 7 },
273
};
274
 
275
static const intel_limit_t intel_limits_i9xx_sdvo = {
6084 serge 276
	.dot = { .min = 20000, .max = 400000 },
277
	.vco = { .min = 1400000, .max = 2800000 },
278
	.n = { .min = 1, .max = 6 },
279
	.m = { .min = 70, .max = 120 },
3480 Serge 280
	.m1 = { .min = 8, .max = 18 },
281
	.m2 = { .min = 3, .max = 7 },
6084 serge 282
	.p = { .min = 5, .max = 80 },
283
	.p1 = { .min = 1, .max = 8 },
2327 Serge 284
	.p2 = { .dot_limit = 200000,
285
		.p2_slow = 10, .p2_fast = 5 },
286
};
287
 
288
static const intel_limit_t intel_limits_i9xx_lvds = {
6084 serge 289
	.dot = { .min = 20000, .max = 400000 },
290
	.vco = { .min = 1400000, .max = 2800000 },
291
	.n = { .min = 1, .max = 6 },
292
	.m = { .min = 70, .max = 120 },
3480 Serge 293
	.m1 = { .min = 8, .max = 18 },
294
	.m2 = { .min = 3, .max = 7 },
6084 serge 295
	.p = { .min = 7, .max = 98 },
296
	.p1 = { .min = 1, .max = 8 },
2327 Serge 297
	.p2 = { .dot_limit = 112000,
298
		.p2_slow = 14, .p2_fast = 7 },
299
};
300
 
301
 
302
static const intel_limit_t intel_limits_g4x_sdvo = {
303
	.dot = { .min = 25000, .max = 270000 },
304
	.vco = { .min = 1750000, .max = 3500000},
305
	.n = { .min = 1, .max = 4 },
306
	.m = { .min = 104, .max = 138 },
307
	.m1 = { .min = 17, .max = 23 },
308
	.m2 = { .min = 5, .max = 11 },
309
	.p = { .min = 10, .max = 30 },
310
	.p1 = { .min = 1, .max = 3},
311
	.p2 = { .dot_limit = 270000,
312
		.p2_slow = 10,
313
		.p2_fast = 10
314
	},
315
};
316
 
317
static const intel_limit_t intel_limits_g4x_hdmi = {
318
	.dot = { .min = 22000, .max = 400000 },
319
	.vco = { .min = 1750000, .max = 3500000},
320
	.n = { .min = 1, .max = 4 },
321
	.m = { .min = 104, .max = 138 },
322
	.m1 = { .min = 16, .max = 23 },
323
	.m2 = { .min = 5, .max = 11 },
324
	.p = { .min = 5, .max = 80 },
325
	.p1 = { .min = 1, .max = 8},
326
	.p2 = { .dot_limit = 165000,
327
		.p2_slow = 10, .p2_fast = 5 },
328
};
329
 
330
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
331
	.dot = { .min = 20000, .max = 115000 },
332
	.vco = { .min = 1750000, .max = 3500000 },
333
	.n = { .min = 1, .max = 3 },
334
	.m = { .min = 104, .max = 138 },
335
	.m1 = { .min = 17, .max = 23 },
336
	.m2 = { .min = 5, .max = 11 },
337
	.p = { .min = 28, .max = 112 },
338
	.p1 = { .min = 2, .max = 8 },
339
	.p2 = { .dot_limit = 0,
340
		.p2_slow = 14, .p2_fast = 14
341
	},
342
};
343
 
344
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
345
	.dot = { .min = 80000, .max = 224000 },
346
	.vco = { .min = 1750000, .max = 3500000 },
347
	.n = { .min = 1, .max = 3 },
348
	.m = { .min = 104, .max = 138 },
349
	.m1 = { .min = 17, .max = 23 },
350
	.m2 = { .min = 5, .max = 11 },
351
	.p = { .min = 14, .max = 42 },
352
	.p1 = { .min = 2, .max = 6 },
353
	.p2 = { .dot_limit = 0,
354
		.p2_slow = 7, .p2_fast = 7
355
	},
356
};
357
 
358
static const intel_limit_t intel_limits_pineview_sdvo = {
6084 serge 359
	.dot = { .min = 20000, .max = 400000},
360
	.vco = { .min = 1700000, .max = 3500000 },
2327 Serge 361
	/* Pineview's Ncounter is a ring counter */
6084 serge 362
	.n = { .min = 3, .max = 6 },
363
	.m = { .min = 2, .max = 256 },
2327 Serge 364
	/* Pineview only has one combined m divider, which we treat as m2. */
6084 serge 365
	.m1 = { .min = 0, .max = 0 },
366
	.m2 = { .min = 0, .max = 254 },
367
	.p = { .min = 5, .max = 80 },
368
	.p1 = { .min = 1, .max = 8 },
2327 Serge 369
	.p2 = { .dot_limit = 200000,
370
		.p2_slow = 10, .p2_fast = 5 },
371
};
372
 
373
static const intel_limit_t intel_limits_pineview_lvds = {
6084 serge 374
	.dot = { .min = 20000, .max = 400000 },
375
	.vco = { .min = 1700000, .max = 3500000 },
376
	.n = { .min = 3, .max = 6 },
377
	.m = { .min = 2, .max = 256 },
378
	.m1 = { .min = 0, .max = 0 },
379
	.m2 = { .min = 0, .max = 254 },
380
	.p = { .min = 7, .max = 112 },
381
	.p1 = { .min = 1, .max = 8 },
2327 Serge 382
	.p2 = { .dot_limit = 112000,
383
		.p2_slow = 14, .p2_fast = 14 },
384
};
385
 
386
/* Ironlake / Sandybridge
387
 *
388
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
389
 * the range value for them is (actual_value - 2).
390
 */
391
static const intel_limit_t intel_limits_ironlake_dac = {
392
	.dot = { .min = 25000, .max = 350000 },
393
	.vco = { .min = 1760000, .max = 3510000 },
394
	.n = { .min = 1, .max = 5 },
395
	.m = { .min = 79, .max = 127 },
396
	.m1 = { .min = 12, .max = 22 },
397
	.m2 = { .min = 5, .max = 9 },
398
	.p = { .min = 5, .max = 80 },
399
	.p1 = { .min = 1, .max = 8 },
400
	.p2 = { .dot_limit = 225000,
401
		.p2_slow = 10, .p2_fast = 5 },
402
};
403
 
404
static const intel_limit_t intel_limits_ironlake_single_lvds = {
405
	.dot = { .min = 25000, .max = 350000 },
406
	.vco = { .min = 1760000, .max = 3510000 },
407
	.n = { .min = 1, .max = 3 },
408
	.m = { .min = 79, .max = 118 },
409
	.m1 = { .min = 12, .max = 22 },
410
	.m2 = { .min = 5, .max = 9 },
411
	.p = { .min = 28, .max = 112 },
412
	.p1 = { .min = 2, .max = 8 },
413
	.p2 = { .dot_limit = 225000,
414
		.p2_slow = 14, .p2_fast = 14 },
415
};
416
 
417
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
418
	.dot = { .min = 25000, .max = 350000 },
419
	.vco = { .min = 1760000, .max = 3510000 },
420
	.n = { .min = 1, .max = 3 },
421
	.m = { .min = 79, .max = 127 },
422
	.m1 = { .min = 12, .max = 22 },
423
	.m2 = { .min = 5, .max = 9 },
424
	.p = { .min = 14, .max = 56 },
425
	.p1 = { .min = 2, .max = 8 },
426
	.p2 = { .dot_limit = 225000,
427
		.p2_slow = 7, .p2_fast = 7 },
428
};
429
 
430
/* LVDS 100mhz refclk limits. */
431
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
432
	.dot = { .min = 25000, .max = 350000 },
433
	.vco = { .min = 1760000, .max = 3510000 },
434
	.n = { .min = 1, .max = 2 },
435
	.m = { .min = 79, .max = 126 },
436
	.m1 = { .min = 12, .max = 22 },
437
	.m2 = { .min = 5, .max = 9 },
438
	.p = { .min = 28, .max = 112 },
2342 Serge 439
	.p1 = { .min = 2, .max = 8 },
2327 Serge 440
	.p2 = { .dot_limit = 225000,
441
		.p2_slow = 14, .p2_fast = 14 },
442
};
443
 
444
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
445
	.dot = { .min = 25000, .max = 350000 },
446
	.vco = { .min = 1760000, .max = 3510000 },
447
	.n = { .min = 1, .max = 3 },
448
	.m = { .min = 79, .max = 126 },
449
	.m1 = { .min = 12, .max = 22 },
450
	.m2 = { .min = 5, .max = 9 },
451
	.p = { .min = 14, .max = 42 },
2342 Serge 452
	.p1 = { .min = 2, .max = 6 },
2327 Serge 453
	.p2 = { .dot_limit = 225000,
454
		.p2_slow = 7, .p2_fast = 7 },
455
};
456
 
4560 Serge 457
static const intel_limit_t intel_limits_vlv = {
458
	 /*
459
	  * These are the data rate limits (measured in fast clocks)
460
	  * since those are the strictest limits we have. The fast
461
	  * clock and actual rate limits are more relaxed, so checking
462
	  * them would make no difference.
463
	  */
464
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 465
	.vco = { .min = 4000000, .max = 6000000 },
466
	.n = { .min = 1, .max = 7 },
467
	.m1 = { .min = 2, .max = 3 },
468
	.m2 = { .min = 11, .max = 156 },
469
	.p1 = { .min = 2, .max = 3 },
4560 Serge 470
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 471
};
472
 
5060 serge 473
static const intel_limit_t intel_limits_chv = {
474
	/*
475
	 * These are the data rate limits (measured in fast clocks)
476
	 * since those are the strictest limits we have.  The fast
477
	 * clock and actual rate limits are more relaxed, so checking
478
	 * them would make no difference.
479
	 */
480
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
6084 serge 481
	.vco = { .min = 4800000, .max = 6480000 },
5060 serge 482
	.n = { .min = 1, .max = 1 },
483
	.m1 = { .min = 2, .max = 2 },
484
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
485
	.p1 = { .min = 2, .max = 4 },
486
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
487
};
488
 
6084 serge 489
static const intel_limit_t intel_limits_bxt = {
490
	/* FIXME: find real dot limits */
491
	.dot = { .min = 0, .max = INT_MAX },
492
	.vco = { .min = 4800000, .max = 6700000 },
493
	.n = { .min = 1, .max = 1 },
494
	.m1 = { .min = 2, .max = 2 },
495
	/* FIXME: find real m2 limits */
496
	.m2 = { .min = 2 << 22, .max = 255 << 22 },
497
	.p1 = { .min = 2, .max = 4 },
498
	.p2 = { .p2_slow = 1, .p2_fast = 20 },
499
};
500
 
501
static bool
502
needs_modeset(struct drm_crtc_state *state)
4560 Serge 503
{
6084 serge 504
	return drm_atomic_crtc_needs_modeset(state);
4560 Serge 505
}
3031 serge 506
 
4560 Serge 507
/**
508
 * Returns whether any output on the specified pipe is of the specified type
509
 */
5354 serge 510
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
4560 Serge 511
{
5354 serge 512
	struct drm_device *dev = crtc->base.dev;
4560 Serge 513
	struct intel_encoder *encoder;
514
 
5354 serge 515
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4560 Serge 516
		if (encoder->type == type)
517
			return true;
518
 
519
	return false;
520
}
521
 
5354 serge 522
/**
523
 * Returns whether any output on the specified pipe will have the specified
524
 * type after a staged modeset is complete, i.e., the same as
525
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
526
 * encoder->crtc.
527
 */
6084 serge 528
static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
529
				      int type)
5354 serge 530
{
6084 serge 531
	struct drm_atomic_state *state = crtc_state->base.state;
532
	struct drm_connector *connector;
533
	struct drm_connector_state *connector_state;
5354 serge 534
	struct intel_encoder *encoder;
6084 serge 535
	int i, num_connectors = 0;
5354 serge 536
 
6084 serge 537
	for_each_connector_in_state(state, connector, connector_state, i) {
538
		if (connector_state->crtc != crtc_state->base.crtc)
539
			continue;
540
 
541
		num_connectors++;
542
 
543
		encoder = to_intel_encoder(connector_state->best_encoder);
544
		if (encoder->type == type)
5354 serge 545
			return true;
6084 serge 546
	}
5354 serge 547
 
6084 serge 548
	WARN_ON(num_connectors == 0);
549
 
5354 serge 550
	return false;
551
}
552
 
6084 serge 553
static const intel_limit_t *
554
intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
2327 Serge 555
{
6084 serge 556
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 557
	const intel_limit_t *limit;
558
 
6084 serge 559
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3480 Serge 560
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 561
			if (refclk == 100000)
562
				limit = &intel_limits_ironlake_dual_lvds_100m;
563
			else
564
				limit = &intel_limits_ironlake_dual_lvds;
565
		} else {
566
			if (refclk == 100000)
567
				limit = &intel_limits_ironlake_single_lvds_100m;
568
			else
569
				limit = &intel_limits_ironlake_single_lvds;
570
		}
4104 Serge 571
	} else
2327 Serge 572
		limit = &intel_limits_ironlake_dac;
573
 
574
	return limit;
575
}
576
 
6084 serge 577
static const intel_limit_t *
578
intel_g4x_limit(struct intel_crtc_state *crtc_state)
2327 Serge 579
{
6084 serge 580
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 581
	const intel_limit_t *limit;
582
 
6084 serge 583
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3480 Serge 584
		if (intel_is_dual_link_lvds(dev))
2327 Serge 585
			limit = &intel_limits_g4x_dual_channel_lvds;
586
		else
587
			limit = &intel_limits_g4x_single_channel_lvds;
6084 serge 588
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
589
		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
2327 Serge 590
		limit = &intel_limits_g4x_hdmi;
6084 serge 591
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
2327 Serge 592
		limit = &intel_limits_g4x_sdvo;
593
	} else /* The option is for other outputs */
594
		limit = &intel_limits_i9xx_sdvo;
595
 
596
	return limit;
597
}
598
 
6084 serge 599
static const intel_limit_t *
600
intel_limit(struct intel_crtc_state *crtc_state, int refclk)
2327 Serge 601
{
6084 serge 602
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 603
	const intel_limit_t *limit;
604
 
6084 serge 605
	if (IS_BROXTON(dev))
606
		limit = &intel_limits_bxt;
607
	else if (HAS_PCH_SPLIT(dev))
608
		limit = intel_ironlake_limit(crtc_state, refclk);
2327 Serge 609
	else if (IS_G4X(dev)) {
6084 serge 610
		limit = intel_g4x_limit(crtc_state);
2327 Serge 611
	} else if (IS_PINEVIEW(dev)) {
6084 serge 612
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 613
			limit = &intel_limits_pineview_lvds;
614
		else
615
			limit = &intel_limits_pineview_sdvo;
5060 serge 616
	} else if (IS_CHERRYVIEW(dev)) {
617
		limit = &intel_limits_chv;
3031 serge 618
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 619
		limit = &intel_limits_vlv;
2327 Serge 620
	} else if (!IS_GEN2(dev)) {
6084 serge 621
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 622
			limit = &intel_limits_i9xx_lvds;
623
		else
624
			limit = &intel_limits_i9xx_sdvo;
625
	} else {
6084 serge 626
		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
2327 Serge 627
			limit = &intel_limits_i8xx_lvds;
6084 serge 628
		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4104 Serge 629
			limit = &intel_limits_i8xx_dvo;
2327 Serge 630
		else
4104 Serge 631
			limit = &intel_limits_i8xx_dac;
2327 Serge 632
	}
633
	return limit;
634
}
635
 
6084 serge 636
/*
637
 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
638
 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
639
 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
640
 * The helpers' return value is the rate of the clock that is fed to the
641
 * display engine's pipe which can be the above fast dot clock rate or a
642
 * divided-down version of it.
643
 */
2327 Serge 644
/* m1 is reserved as 0 in Pineview, n is a ring counter */
6084 serge 645
static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
2327 Serge 646
{
647
	clock->m = clock->m2 + 2;
648
	clock->p = clock->p1 * clock->p2;
4560 Serge 649
	if (WARN_ON(clock->n == 0 || clock->p == 0))
6084 serge 650
		return 0;
4560 Serge 651
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
652
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 653
 
654
	return clock->dot;
2327 Serge 655
}
656
 
4104 Serge 657
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 658
{
4104 Serge 659
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
660
}
661
 
6084 serge 662
static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
4104 Serge 663
{
664
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 665
	clock->p = clock->p1 * clock->p2;
4560 Serge 666
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
6084 serge 667
		return 0;
4560 Serge 668
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
669
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 670
 
671
	return clock->dot;
2327 Serge 672
}
673
 
6084 serge 674
static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
5060 serge 675
{
676
	clock->m = clock->m1 * clock->m2;
677
	clock->p = clock->p1 * clock->p2;
678
	if (WARN_ON(clock->n == 0 || clock->p == 0))
6084 serge 679
		return 0;
680
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
681
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
682
 
683
	return clock->dot / 5;
684
}
685
 
686
int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
687
{
688
	clock->m = clock->m1 * clock->m2;
689
	clock->p = clock->p1 * clock->p2;
690
	if (WARN_ON(clock->n == 0 || clock->p == 0))
691
		return 0;
5060 serge 692
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
693
			clock->n << 22);
694
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
6084 serge 695
 
696
	return clock->dot / 5;
5060 serge 697
}
698
 
2327 Serge 699
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
700
/**
701
 * Returns whether the given set of divisors are valid for a given refclk with
702
 * the given connectors.
703
 */
704
 
705
static bool intel_PLL_is_valid(struct drm_device *dev,
706
			       const intel_limit_t *limit,
707
			       const intel_clock_t *clock)
708
{
4560 Serge 709
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
710
		INTELPllInvalid("n out of range\n");
2327 Serge 711
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 712
		INTELPllInvalid("p1 out of range\n");
2327 Serge 713
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 714
		INTELPllInvalid("m2 out of range\n");
2327 Serge 715
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 716
		INTELPllInvalid("m1 out of range\n");
4560 Serge 717
 
6084 serge 718
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
4560 Serge 719
		if (clock->m1 <= clock->m2)
6084 serge 720
			INTELPllInvalid("m1 <= m2\n");
4560 Serge 721
 
6084 serge 722
	if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
4560 Serge 723
		if (clock->p < limit->p.min || limit->p.max < clock->p)
724
			INTELPllInvalid("p out of range\n");
6084 serge 725
		if (clock->m < limit->m.min || limit->m.max < clock->m)
726
			INTELPllInvalid("m out of range\n");
4560 Serge 727
	}
728
 
2327 Serge 729
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 730
		INTELPllInvalid("vco out of range\n");
2327 Serge 731
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
732
	 * connector, etc., rather than just a single range.
733
	 */
734
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 735
		INTELPllInvalid("dot out of range\n");
2327 Serge 736
 
737
	return true;
738
}
739
 
6084 serge 740
static int
741
i9xx_select_p2_div(const intel_limit_t *limit,
742
		   const struct intel_crtc_state *crtc_state,
743
		   int target)
2327 Serge 744
{
6084 serge 745
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 746
 
6084 serge 747
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
2327 Serge 748
		/*
3480 Serge 749
		 * For LVDS just rely on its current settings for dual-channel.
750
		 * We haven't figured out how to reliably set up different
751
		 * single/dual channel state, if we even can.
2327 Serge 752
		 */
3480 Serge 753
		if (intel_is_dual_link_lvds(dev))
6084 serge 754
			return limit->p2.p2_fast;
2327 Serge 755
		else
6084 serge 756
			return limit->p2.p2_slow;
2327 Serge 757
	} else {
758
		if (target < limit->p2.dot_limit)
6084 serge 759
			return limit->p2.p2_slow;
2327 Serge 760
		else
6084 serge 761
			return limit->p2.p2_fast;
2327 Serge 762
	}
6084 serge 763
}
2327 Serge 764
 
6084 serge 765
static bool
766
i9xx_find_best_dpll(const intel_limit_t *limit,
767
		    struct intel_crtc_state *crtc_state,
768
		    int target, int refclk, intel_clock_t *match_clock,
769
		    intel_clock_t *best_clock)
770
{
771
	struct drm_device *dev = crtc_state->base.crtc->dev;
772
	intel_clock_t clock;
773
	int err = target;
774
 
2342 Serge 775
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 776
 
6084 serge 777
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
778
 
2327 Serge 779
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
780
	     clock.m1++) {
781
		for (clock.m2 = limit->m2.min;
782
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 783
			if (clock.m2 >= clock.m1)
2327 Serge 784
				break;
785
			for (clock.n = limit->n.min;
786
			     clock.n <= limit->n.max; clock.n++) {
787
				for (clock.p1 = limit->p1.min;
788
					clock.p1 <= limit->p1.max; clock.p1++) {
789
					int this_err;
790
 
6084 serge 791
					i9xx_calc_dpll_params(refclk, &clock);
2327 Serge 792
					if (!intel_PLL_is_valid(dev, limit,
793
								&clock))
794
						continue;
3031 serge 795
					if (match_clock &&
796
					    clock.p != match_clock->p)
797
						continue;
2327 Serge 798
 
799
					this_err = abs(clock.dot - target);
800
					if (this_err < err) {
801
						*best_clock = clock;
802
						err = this_err;
803
					}
804
				}
805
			}
806
		}
807
	}
808
 
809
	return (err != target);
810
}
811
 
812
static bool
6084 serge 813
pnv_find_best_dpll(const intel_limit_t *limit,
814
		   struct intel_crtc_state *crtc_state,
4104 Serge 815
		   int target, int refclk, intel_clock_t *match_clock,
816
		   intel_clock_t *best_clock)
817
{
6084 serge 818
	struct drm_device *dev = crtc_state->base.crtc->dev;
4104 Serge 819
	intel_clock_t clock;
820
	int err = target;
821
 
822
	memset(best_clock, 0, sizeof(*best_clock));
823
 
6084 serge 824
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
825
 
4104 Serge 826
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
827
	     clock.m1++) {
828
		for (clock.m2 = limit->m2.min;
829
		     clock.m2 <= limit->m2.max; clock.m2++) {
830
			for (clock.n = limit->n.min;
831
			     clock.n <= limit->n.max; clock.n++) {
832
				for (clock.p1 = limit->p1.min;
833
					clock.p1 <= limit->p1.max; clock.p1++) {
834
					int this_err;
835
 
6084 serge 836
					pnv_calc_dpll_params(refclk, &clock);
4104 Serge 837
					if (!intel_PLL_is_valid(dev, limit,
838
								&clock))
839
						continue;
840
					if (match_clock &&
841
					    clock.p != match_clock->p)
842
						continue;
843
 
844
					this_err = abs(clock.dot - target);
845
					if (this_err < err) {
846
						*best_clock = clock;
847
						err = this_err;
848
					}
849
				}
850
			}
851
		}
852
	}
853
 
854
	return (err != target);
855
}
856
 
857
static bool
6084 serge 858
g4x_find_best_dpll(const intel_limit_t *limit,
859
		   struct intel_crtc_state *crtc_state,
860
		   int target, int refclk, intel_clock_t *match_clock,
861
		   intel_clock_t *best_clock)
2327 Serge 862
{
6084 serge 863
	struct drm_device *dev = crtc_state->base.crtc->dev;
2327 Serge 864
	intel_clock_t clock;
865
	int max_n;
6084 serge 866
	bool found = false;
2327 Serge 867
	/* approximately equals target * 0.00585 */
868
	int err_most = (target >> 8) + (target >> 9);
869
 
6084 serge 870
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 871
 
6084 serge 872
	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
873
 
2327 Serge 874
	max_n = limit->n.max;
875
	/* based on hardware requirement, prefer smaller n to precision */
876
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
877
		/* based on hardware requirement, prefere larger m1,m2 */
878
		for (clock.m1 = limit->m1.max;
879
		     clock.m1 >= limit->m1.min; clock.m1--) {
880
			for (clock.m2 = limit->m2.max;
881
			     clock.m2 >= limit->m2.min; clock.m2--) {
882
				for (clock.p1 = limit->p1.max;
883
				     clock.p1 >= limit->p1.min; clock.p1--) {
884
					int this_err;
885
 
6084 serge 886
					i9xx_calc_dpll_params(refclk, &clock);
2327 Serge 887
					if (!intel_PLL_is_valid(dev, limit,
888
								&clock))
889
						continue;
890
 
891
					this_err = abs(clock.dot - target);
892
					if (this_err < err_most) {
893
						*best_clock = clock;
894
						err_most = this_err;
895
						max_n = clock.n;
896
						found = true;
897
					}
898
				}
899
			}
900
		}
901
	}
902
	return found;
903
}
904
 
6084 serge 905
/*
906
 * Check if the calculated PLL configuration is more optimal compared to the
907
 * best configuration and error found so far. Return the calculated error.
908
 */
909
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
910
			       const intel_clock_t *calculated_clock,
911
			       const intel_clock_t *best_clock,
912
			       unsigned int best_error_ppm,
913
			       unsigned int *error_ppm)
914
{
915
	/*
916
	 * For CHV ignore the error and consider only the P value.
917
	 * Prefer a bigger P value based on HW requirements.
918
	 */
919
	if (IS_CHERRYVIEW(dev)) {
920
		*error_ppm = 0;
921
 
922
		return calculated_clock->p > best_clock->p;
923
	}
924
 
925
	if (WARN_ON_ONCE(!target_freq))
926
		return false;
927
 
928
	*error_ppm = div_u64(1000000ULL *
929
				abs(target_freq - calculated_clock->dot),
930
			     target_freq);
931
	/*
932
	 * Prefer a better P value over a better (smaller) error if the error
933
	 * is small. Ensure this preference for future configurations too by
934
	 * setting the error to 0.
935
	 */
936
	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
937
		*error_ppm = 0;
938
 
939
		return true;
940
	}
941
 
942
	return *error_ppm + 10 < best_error_ppm;
943
}
944
 
2327 Serge 945
static bool
6084 serge 946
vlv_find_best_dpll(const intel_limit_t *limit,
947
		   struct intel_crtc_state *crtc_state,
948
		   int target, int refclk, intel_clock_t *match_clock,
949
		   intel_clock_t *best_clock)
3031 serge 950
{
6084 serge 951
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5354 serge 952
	struct drm_device *dev = crtc->base.dev;
4560 Serge 953
	intel_clock_t clock;
954
	unsigned int bestppm = 1000000;
955
	/* min update 19.2 MHz */
956
	int max_n = min(limit->n.max, refclk / 19200);
957
	bool found = false;
2327 Serge 958
 
4560 Serge 959
	target *= 5; /* fast clock */
3031 serge 960
 
4560 Serge 961
	memset(best_clock, 0, sizeof(*best_clock));
962
 
3031 serge 963
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 964
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
965
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
966
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
967
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
968
				clock.p = clock.p1 * clock.p2;
3031 serge 969
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 970
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
6084 serge 971
					unsigned int ppm;
4560 Serge 972
 
973
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
974
								     refclk * clock.m1);
975
 
6084 serge 976
					vlv_calc_dpll_params(refclk, &clock);
4560 Serge 977
 
978
					if (!intel_PLL_is_valid(dev, limit,
979
								&clock))
980
						continue;
981
 
6084 serge 982
					if (!vlv_PLL_is_optimal(dev, target,
983
								&clock,
984
								best_clock,
985
								bestppm, &ppm))
986
						continue;
4560 Serge 987
 
6084 serge 988
					*best_clock = clock;
989
					bestppm = ppm;
990
					found = true;
3031 serge 991
				}
992
			}
6084 serge 993
		}
994
	}
3031 serge 995
 
4560 Serge 996
	return found;
3031 serge 997
}
998
 
5060 serge 999
static bool
6084 serge 1000
chv_find_best_dpll(const intel_limit_t *limit,
1001
		   struct intel_crtc_state *crtc_state,
5060 serge 1002
		   int target, int refclk, intel_clock_t *match_clock,
1003
		   intel_clock_t *best_clock)
1004
{
6084 serge 1005
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5354 serge 1006
	struct drm_device *dev = crtc->base.dev;
6084 serge 1007
	unsigned int best_error_ppm;
5060 serge 1008
	intel_clock_t clock;
1009
	uint64_t m2;
1010
	int found = false;
1011
 
1012
	memset(best_clock, 0, sizeof(*best_clock));
6084 serge 1013
	best_error_ppm = 1000000;
5060 serge 1014
 
1015
	/*
1016
	 * Based on hardware doc, the n always set to 1, and m1 always
1017
	 * set to 2.  If requires to support 200Mhz refclk, we need to
1018
	 * revisit this because n may not 1 anymore.
1019
	 */
1020
	clock.n = 1, clock.m1 = 2;
1021
	target *= 5;	/* fast clock */
1022
 
1023
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1024
		for (clock.p2 = limit->p2.p2_fast;
1025
				clock.p2 >= limit->p2.p2_slow;
1026
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6084 serge 1027
			unsigned int error_ppm;
5060 serge 1028
 
1029
			clock.p = clock.p1 * clock.p2;
1030
 
1031
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1032
					clock.n) << 22, refclk * clock.m1);
1033
 
1034
			if (m2 > INT_MAX/clock.m1)
1035
				continue;
1036
 
1037
			clock.m2 = m2;
1038
 
6084 serge 1039
			chv_calc_dpll_params(refclk, &clock);
5060 serge 1040
 
1041
			if (!intel_PLL_is_valid(dev, limit, &clock))
1042
				continue;
1043
 
6084 serge 1044
			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1045
						best_error_ppm, &error_ppm))
1046
				continue;
1047
 
1048
			*best_clock = clock;
1049
			best_error_ppm = error_ppm;
1050
			found = true;
5060 serge 1051
		}
1052
	}
1053
 
1054
	return found;
1055
}
1056
 
6084 serge 1057
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1058
			intel_clock_t *best_clock)
1059
{
1060
	int refclk = i9xx_get_refclk(crtc_state, 0);
1061
 
1062
	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1063
				  target_clock, refclk, NULL, best_clock);
1064
}
1065
 
4560 Serge 1066
bool intel_crtc_active(struct drm_crtc *crtc)
1067
{
1068
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1069
 
1070
	/* Be paranoid as we can arrive here with only partial
1071
	 * state retrieved from the hardware during setup.
1072
	 *
1073
	 * We can ditch the adjusted_mode.crtc_clock check as soon
1074
	 * as Haswell has gained clock readout/fastboot support.
1075
	 *
5060 serge 1076
	 * We can ditch the crtc->primary->fb check as soon as we can
4560 Serge 1077
	 * properly reconstruct framebuffers.
6084 serge 1078
	 *
1079
	 * FIXME: The intel_crtc->active here should be switched to
1080
	 * crtc->state->active once we have proper CRTC states wired up
1081
	 * for atomic.
4560 Serge 1082
	 */
6084 serge 1083
	return intel_crtc->active && crtc->primary->state->fb &&
1084
		intel_crtc->config->base.adjusted_mode.crtc_clock;
4560 Serge 1085
}
1086
 
3243 Serge 1087
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1088
					     enum pipe pipe)
1089
{
1090
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1091
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1092
 
6084 serge 1093
	return intel_crtc->config->cpu_transcoder;
3243 Serge 1094
}
1095
 
4560 Serge 1096
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1097
{
1098
	struct drm_i915_private *dev_priv = dev->dev_private;
1099
	u32 reg = PIPEDSL(pipe);
1100
	u32 line1, line2;
1101
	u32 line_mask;
1102
 
1103
	if (IS_GEN2(dev))
1104
		line_mask = DSL_LINEMASK_GEN2;
1105
	else
1106
		line_mask = DSL_LINEMASK_GEN3;
1107
 
1108
	line1 = I915_READ(reg) & line_mask;
6084 serge 1109
	msleep(5);
4560 Serge 1110
	line2 = I915_READ(reg) & line_mask;
1111
 
1112
	return line1 == line2;
1113
}
1114
 
2327 Serge 1115
/*
1116
 * intel_wait_for_pipe_off - wait for pipe to turn off
5354 serge 1117
 * @crtc: crtc whose pipe to wait for
2327 Serge 1118
 *
1119
 * After disabling a pipe, we can't wait for vblank in the usual way,
1120
 * spinning on the vblank interrupt status bit, since we won't actually
1121
 * see an interrupt when the pipe is disabled.
1122
 *
1123
 * On Gen4 and above:
1124
 *   wait for the pipe register state bit to turn off
1125
 *
1126
 * Otherwise:
1127
 *   wait for the display line value to settle (it usually
1128
 *   ends up stopping at the start of the next frame).
1129
 *
1130
 */
5354 serge 1131
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
2327 Serge 1132
{
5354 serge 1133
	struct drm_device *dev = crtc->base.dev;
2327 Serge 1134
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1135
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
5354 serge 1136
	enum pipe pipe = crtc->pipe;
2327 Serge 1137
 
1138
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 1139
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 1140
 
1141
		/* Wait for the Pipe State to go off */
1142
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1143
			     100))
3031 serge 1144
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1145
	} else {
1146
		/* Wait for the display line to settle */
4560 Serge 1147
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 1148
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1149
	}
1150
}
1151
 
1152
static const char *state_string(bool enabled)
1153
{
1154
	return enabled ? "on" : "off";
1155
}
1156
 
1157
/* Only for pre-ILK configs */
4104 Serge 1158
void assert_pll(struct drm_i915_private *dev_priv,
6084 serge 1159
		enum pipe pipe, bool state)
2327 Serge 1160
{
1161
	u32 val;
1162
	bool cur_state;
1163
 
6084 serge 1164
	val = I915_READ(DPLL(pipe));
2327 Serge 1165
	cur_state = !!(val & DPLL_VCO_ENABLE);
6084 serge 1166
	I915_STATE_WARN(cur_state != state,
2327 Serge 1167
	     "PLL state assertion failure (expected %s, current %s)\n",
1168
	     state_string(state), state_string(cur_state));
1169
}
1170
 
4560 Serge 1171
/* XXX: the dsi pll is shared between MIPI DSI ports */
1172
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1173
{
1174
	u32 val;
1175
	bool cur_state;
1176
 
6084 serge 1177
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 1178
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
6084 serge 1179
	mutex_unlock(&dev_priv->sb_lock);
4560 Serge 1180
 
1181
	cur_state = val & DSI_PLL_VCO_EN;
6084 serge 1182
	I915_STATE_WARN(cur_state != state,
4560 Serge 1183
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1184
	     state_string(state), state_string(cur_state));
1185
}
1186
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1187
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1188
 
4104 Serge 1189
struct intel_shared_dpll *
1190
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1191
{
1192
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1193
 
6084 serge 1194
	if (crtc->config->shared_dpll < 0)
4104 Serge 1195
		return NULL;
1196
 
6084 serge 1197
	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
4104 Serge 1198
}
1199
 
2327 Serge 1200
/* For ILK+ */
4104 Serge 1201
void assert_shared_dpll(struct drm_i915_private *dev_priv,
6084 serge 1202
			struct intel_shared_dpll *pll,
1203
			bool state)
2327 Serge 1204
{
1205
	bool cur_state;
4104 Serge 1206
	struct intel_dpll_hw_state hw_state;
2327 Serge 1207
 
3031 serge 1208
	if (WARN (!pll,
4104 Serge 1209
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 1210
		return;
2342 Serge 1211
 
4104 Serge 1212
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
6084 serge 1213
	I915_STATE_WARN(cur_state != state,
4104 Serge 1214
	     "%s assertion failure (expected %s, current %s)\n",
1215
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 1216
}
1217
 
1218
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1219
			  enum pipe pipe, bool state)
1220
{
1221
	bool cur_state;
3243 Serge 1222
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1223
								      pipe);
2327 Serge 1224
 
3480 Serge 1225
	if (HAS_DDI(dev_priv->dev)) {
1226
		/* DDI does not have a specific FDI_TX register */
6084 serge 1227
		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
3243 Serge 1228
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1229
	} else {
6084 serge 1230
		u32 val = I915_READ(FDI_TX_CTL(pipe));
1231
		cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1232
	}
6084 serge 1233
	I915_STATE_WARN(cur_state != state,
2327 Serge 1234
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1235
	     state_string(state), state_string(cur_state));
1236
}
1237
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1238
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1239
 
1240
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1241
			  enum pipe pipe, bool state)
1242
{
1243
	u32 val;
1244
	bool cur_state;
1245
 
6084 serge 1246
	val = I915_READ(FDI_RX_CTL(pipe));
2327 Serge 1247
	cur_state = !!(val & FDI_RX_ENABLE);
6084 serge 1248
	I915_STATE_WARN(cur_state != state,
2327 Serge 1249
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1250
	     state_string(state), state_string(cur_state));
1251
}
1252
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1253
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1254
 
1255
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1256
				      enum pipe pipe)
1257
{
1258
	u32 val;
1259
 
1260
	/* ILK FDI PLL is always enabled */
5060 serge 1261
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
2327 Serge 1262
		return;
1263
 
3031 serge 1264
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1265
	if (HAS_DDI(dev_priv->dev))
3031 serge 1266
		return;
1267
 
6084 serge 1268
	val = I915_READ(FDI_TX_CTL(pipe));
1269
	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
2327 Serge 1270
}
1271
 
4104 Serge 1272
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1273
		       enum pipe pipe, bool state)
2327 Serge 1274
{
1275
	u32 val;
4104 Serge 1276
	bool cur_state;
2327 Serge 1277
 
6084 serge 1278
	val = I915_READ(FDI_RX_CTL(pipe));
4104 Serge 1279
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
6084 serge 1280
	I915_STATE_WARN(cur_state != state,
4104 Serge 1281
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1282
	     state_string(state), state_string(cur_state));
2327 Serge 1283
}
1284
 
5354 serge 1285
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
6084 serge 1286
			   enum pipe pipe)
2327 Serge 1287
{
5354 serge 1288
	struct drm_device *dev = dev_priv->dev;
1289
	int pp_reg;
2327 Serge 1290
	u32 val;
1291
	enum pipe panel_pipe = PIPE_A;
1292
	bool locked = true;
1293
 
5354 serge 1294
	if (WARN_ON(HAS_DDI(dev)))
1295
		return;
1296
 
1297
	if (HAS_PCH_SPLIT(dev)) {
1298
		u32 port_sel;
1299
 
2327 Serge 1300
		pp_reg = PCH_PP_CONTROL;
5354 serge 1301
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1302
 
1303
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1304
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1305
			panel_pipe = PIPE_B;
1306
		/* XXX: else fix for eDP */
1307
	} else if (IS_VALLEYVIEW(dev)) {
1308
		/* presumably write lock depends on pipe, not port select */
1309
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1310
		panel_pipe = pipe;
2327 Serge 1311
	} else {
1312
		pp_reg = PP_CONTROL;
5354 serge 1313
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1314
			panel_pipe = PIPE_B;
2327 Serge 1315
	}
1316
 
1317
	val = I915_READ(pp_reg);
1318
	if (!(val & PANEL_POWER_ON) ||
5354 serge 1319
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
2327 Serge 1320
		locked = false;
1321
 
6084 serge 1322
	I915_STATE_WARN(panel_pipe == pipe && locked,
2327 Serge 1323
	     "panel assertion failure, pipe %c regs locked\n",
1324
	     pipe_name(pipe));
1325
}
1326
 
4560 Serge 1327
static void assert_cursor(struct drm_i915_private *dev_priv,
1328
			  enum pipe pipe, bool state)
1329
{
1330
	struct drm_device *dev = dev_priv->dev;
1331
	bool cur_state;
1332
 
5060 serge 1333
	if (IS_845G(dev) || IS_I865G(dev))
6084 serge 1334
		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
4560 Serge 1335
	else
1336
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1337
 
6084 serge 1338
	I915_STATE_WARN(cur_state != state,
4560 Serge 1339
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1340
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1341
}
1342
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1343
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1344
 
2342 Serge 1345
void assert_pipe(struct drm_i915_private *dev_priv,
6084 serge 1346
		 enum pipe pipe, bool state)
2327 Serge 1347
{
1348
	bool cur_state;
3243 Serge 1349
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1350
								      pipe);
2327 Serge 1351
 
5354 serge 1352
	/* if we need the pipe quirk it must be always on */
1353
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1354
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3031 serge 1355
		state = true;
1356
 
5354 serge 1357
	if (!intel_display_power_is_enabled(dev_priv,
4104 Serge 1358
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
3480 Serge 1359
		cur_state = false;
1360
	} else {
6084 serge 1361
		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1362
		cur_state = !!(val & PIPECONF_ENABLE);
3480 Serge 1363
	}
1364
 
6084 serge 1365
	I915_STATE_WARN(cur_state != state,
2327 Serge 1366
	     "pipe %c assertion failure (expected %s, current %s)\n",
1367
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1368
}
1369
 
3031 serge 1370
static void assert_plane(struct drm_i915_private *dev_priv,
1371
			 enum plane plane, bool state)
2327 Serge 1372
{
1373
	u32 val;
3031 serge 1374
	bool cur_state;
2327 Serge 1375
 
6084 serge 1376
	val = I915_READ(DSPCNTR(plane));
3031 serge 1377
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
6084 serge 1378
	I915_STATE_WARN(cur_state != state,
3031 serge 1379
	     "plane %c assertion failure (expected %s, current %s)\n",
1380
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1381
}
1382
 
3031 serge 1383
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1384
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1385
 
2327 Serge 1386
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1387
				   enum pipe pipe)
1388
{
4104 Serge 1389
	struct drm_device *dev = dev_priv->dev;
6084 serge 1390
	int i;
2327 Serge 1391
 
4104 Serge 1392
	/* Primary planes are fixed to pipes on gen4+ */
1393
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 1394
		u32 val = I915_READ(DSPCNTR(pipe));
1395
		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
3031 serge 1396
		     "plane %c assertion failure, should be disabled but not\n",
1397
		     plane_name(pipe));
2327 Serge 1398
		return;
3031 serge 1399
	}
2327 Serge 1400
 
1401
	/* Need to check both planes against the pipe */
5354 serge 1402
	for_each_pipe(dev_priv, i) {
6084 serge 1403
		u32 val = I915_READ(DSPCNTR(i));
1404
		enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
2327 Serge 1405
			DISPPLANE_SEL_PIPE_SHIFT;
6084 serge 1406
		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
2327 Serge 1407
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1408
		     plane_name(i), pipe_name(pipe));
1409
	}
1410
}
1411
 
3746 Serge 1412
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1413
				    enum pipe pipe)
1414
{
4104 Serge 1415
	struct drm_device *dev = dev_priv->dev;
6084 serge 1416
	int sprite;
3746 Serge 1417
 
5354 serge 1418
	if (INTEL_INFO(dev)->gen >= 9) {
6084 serge 1419
		for_each_sprite(dev_priv, pipe, sprite) {
1420
			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1421
			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
5354 serge 1422
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1423
			     sprite, pipe_name(pipe));
1424
		}
1425
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 1426
		for_each_sprite(dev_priv, pipe, sprite) {
1427
			u32 val = I915_READ(SPCNTR(pipe, sprite));
1428
			I915_STATE_WARN(val & SP_ENABLE,
4104 Serge 1429
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
5060 serge 1430
			     sprite_name(pipe, sprite), pipe_name(pipe));
4104 Serge 1431
		}
1432
	} else if (INTEL_INFO(dev)->gen >= 7) {
6084 serge 1433
		u32 val = I915_READ(SPRCTL(pipe));
1434
		I915_STATE_WARN(val & SPRITE_ENABLE,
4104 Serge 1435
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1436
		     plane_name(pipe), pipe_name(pipe));
1437
	} else if (INTEL_INFO(dev)->gen >= 5) {
6084 serge 1438
		u32 val = I915_READ(DVSCNTR(pipe));
1439
		I915_STATE_WARN(val & DVS_ENABLE,
4104 Serge 1440
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1441
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1442
	}
1443
}
1444
 
5354 serge 1445
static void assert_vblank_disabled(struct drm_crtc *crtc)
1446
{
6084 serge 1447
	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
5354 serge 1448
		drm_crtc_vblank_put(crtc);
1449
}
1450
 
4560 Serge 1451
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1452
{
1453
	u32 val;
1454
	bool enabled;
1455
 
6084 serge 1456
	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1457
 
2327 Serge 1458
	val = I915_READ(PCH_DREF_CONTROL);
1459
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1460
			    DREF_SUPERSPREAD_SOURCE_MASK));
6084 serge 1461
	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
2327 Serge 1462
}
1463
 
4104 Serge 1464
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
6084 serge 1465
					   enum pipe pipe)
2327 Serge 1466
{
1467
	u32 val;
1468
	bool enabled;
1469
 
6084 serge 1470
	val = I915_READ(PCH_TRANSCONF(pipe));
2327 Serge 1471
	enabled = !!(val & TRANS_ENABLE);
6084 serge 1472
	I915_STATE_WARN(enabled,
2327 Serge 1473
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1474
	     pipe_name(pipe));
1475
}
1476
 
1477
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1478
			    enum pipe pipe, u32 port_sel, u32 val)
1479
{
1480
	if ((val & DP_PORT_EN) == 0)
1481
		return false;
1482
 
1483
	if (HAS_PCH_CPT(dev_priv->dev)) {
1484
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1485
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1486
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1487
			return false;
5060 serge 1488
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1489
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1490
			return false;
2327 Serge 1491
	} else {
1492
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1493
			return false;
1494
	}
1495
	return true;
1496
}
1497
 
1498
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1499
			      enum pipe pipe, u32 val)
1500
{
3746 Serge 1501
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1502
		return false;
1503
 
1504
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1505
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1506
			return false;
5060 serge 1507
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1508
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1509
			return false;
2327 Serge 1510
	} else {
3746 Serge 1511
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1512
			return false;
1513
	}
1514
	return true;
1515
}
1516
 
1517
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1518
			      enum pipe pipe, u32 val)
1519
{
1520
	if ((val & LVDS_PORT_EN) == 0)
1521
		return false;
1522
 
1523
	if (HAS_PCH_CPT(dev_priv->dev)) {
1524
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1525
			return false;
1526
	} else {
1527
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1528
			return false;
1529
	}
1530
	return true;
1531
}
1532
 
1533
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1534
			      enum pipe pipe, u32 val)
1535
{
1536
	if ((val & ADPA_DAC_ENABLE) == 0)
1537
		return false;
1538
	if (HAS_PCH_CPT(dev_priv->dev)) {
1539
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1540
			return false;
1541
	} else {
1542
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1543
			return false;
1544
	}
1545
	return true;
1546
}
1547
 
1548
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1549
				   enum pipe pipe, int reg, u32 port_sel)
1550
{
1551
	u32 val = I915_READ(reg);
6084 serge 1552
	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
2327 Serge 1553
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1554
	     reg, pipe_name(pipe));
3031 serge 1555
 
6084 serge 1556
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
3031 serge 1557
	     && (val & DP_PIPEB_SELECT),
1558
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1559
}
1560
 
1561
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1562
				     enum pipe pipe, int reg)
1563
{
1564
	u32 val = I915_READ(reg);
6084 serge 1565
	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
3031 serge 1566
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1567
	     reg, pipe_name(pipe));
3031 serge 1568
 
6084 serge 1569
	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1570
	     && (val & SDVO_PIPE_B_SELECT),
1571
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1572
}
1573
 
1574
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1575
				      enum pipe pipe)
1576
{
1577
	u32 val;
1578
 
1579
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1580
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1581
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1582
 
6084 serge 1583
	val = I915_READ(PCH_ADPA);
1584
	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1585
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1586
	     pipe_name(pipe));
1587
 
6084 serge 1588
	val = I915_READ(PCH_LVDS);
1589
	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1590
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1591
	     pipe_name(pipe));
1592
 
3746 Serge 1593
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1594
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1595
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1596
}
1597
 
5354 serge 1598
static void vlv_enable_pll(struct intel_crtc *crtc,
6084 serge 1599
			   const struct intel_crtc_state *pipe_config)
4560 Serge 1600
{
4104 Serge 1601
	struct drm_device *dev = crtc->base.dev;
1602
	struct drm_i915_private *dev_priv = dev->dev_private;
1603
	int reg = DPLL(crtc->pipe);
5354 serge 1604
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2327 Serge 1605
 
4104 Serge 1606
	assert_pipe_disabled(dev_priv, crtc->pipe);
1607
 
6084 serge 1608
	/* No really, not for ILK+ */
4104 Serge 1609
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
2327 Serge 1610
 
6084 serge 1611
	/* PLL is protected by panel, make sure we can write it */
5354 serge 1612
	if (IS_MOBILE(dev_priv->dev))
4104 Serge 1613
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1614
 
4104 Serge 1615
	I915_WRITE(reg, dpll);
1616
	POSTING_READ(reg);
1617
	udelay(150);
2327 Serge 1618
 
4104 Serge 1619
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1620
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1621
 
5354 serge 1622
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
4104 Serge 1623
	POSTING_READ(DPLL_MD(crtc->pipe));
1624
 
1625
	/* We do this three times for luck */
1626
	I915_WRITE(reg, dpll);
1627
	POSTING_READ(reg);
1628
	udelay(150); /* wait for warmup */
1629
	I915_WRITE(reg, dpll);
1630
	POSTING_READ(reg);
1631
	udelay(150); /* wait for warmup */
1632
	I915_WRITE(reg, dpll);
1633
	POSTING_READ(reg);
1634
	udelay(150); /* wait for warmup */
1635
}
1636
 
5354 serge 1637
static void chv_enable_pll(struct intel_crtc *crtc,
6084 serge 1638
			   const struct intel_crtc_state *pipe_config)
5060 serge 1639
{
1640
	struct drm_device *dev = crtc->base.dev;
1641
	struct drm_i915_private *dev_priv = dev->dev_private;
1642
	int pipe = crtc->pipe;
1643
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1644
	u32 tmp;
1645
 
1646
	assert_pipe_disabled(dev_priv, crtc->pipe);
1647
 
1648
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1649
 
6084 serge 1650
	mutex_lock(&dev_priv->sb_lock);
5060 serge 1651
 
1652
	/* Enable back the 10bit clock to display controller */
1653
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1654
	tmp |= DPIO_DCLKP_EN;
1655
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1656
 
6084 serge 1657
	mutex_unlock(&dev_priv->sb_lock);
1658
 
5060 serge 1659
	/*
1660
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1661
	 */
1662
	udelay(1);
1663
 
1664
	/* Enable PLL */
5354 serge 1665
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
5060 serge 1666
 
1667
	/* Check PLL is locked */
1668
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1669
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1670
 
1671
	/* not sure when this should be written */
5354 serge 1672
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
5060 serge 1673
	POSTING_READ(DPLL_MD(pipe));
1674
}
1675
 
5354 serge 1676
static int intel_num_dvo_pipes(struct drm_device *dev)
1677
{
1678
	struct intel_crtc *crtc;
1679
	int count = 0;
1680
 
1681
	for_each_intel_crtc(dev, crtc)
6084 serge 1682
		count += crtc->base.state->active &&
5354 serge 1683
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1684
 
1685
	return count;
1686
}
1687
 
4104 Serge 1688
static void i9xx_enable_pll(struct intel_crtc *crtc)
1689
{
1690
	struct drm_device *dev = crtc->base.dev;
1691
	struct drm_i915_private *dev_priv = dev->dev_private;
1692
	int reg = DPLL(crtc->pipe);
6084 serge 1693
	u32 dpll = crtc->config->dpll_hw_state.dpll;
4104 Serge 1694
 
1695
	assert_pipe_disabled(dev_priv, crtc->pipe);
1696
 
1697
	/* No really, not for ILK+ */
5060 serge 1698
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
4104 Serge 1699
 
1700
	/* PLL is protected by panel, make sure we can write it */
1701
	if (IS_MOBILE(dev) && !IS_I830(dev))
1702
		assert_panel_unlocked(dev_priv, crtc->pipe);
1703
 
5354 serge 1704
	/* Enable DVO 2x clock on both PLLs if necessary */
1705
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1706
		/*
1707
		 * It appears to be important that we don't enable this
1708
		 * for the current pipe before otherwise configuring the
1709
		 * PLL. No idea how this should be handled if multiple
1710
		 * DVO outputs are enabled simultaneosly.
1711
		 */
1712
		dpll |= DPLL_DVO_2X_MODE;
1713
		I915_WRITE(DPLL(!crtc->pipe),
1714
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1715
	}
4104 Serge 1716
 
6084 serge 1717
	/*
1718
	 * Apparently we need to have VGA mode enabled prior to changing
1719
	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1720
	 * dividers, even though the register value does change.
1721
	 */
1722
	I915_WRITE(reg, 0);
1723
 
1724
	I915_WRITE(reg, dpll);
1725
 
4104 Serge 1726
	/* Wait for the clocks to stabilize. */
1727
	POSTING_READ(reg);
1728
	udelay(150);
1729
 
1730
	if (INTEL_INFO(dev)->gen >= 4) {
1731
		I915_WRITE(DPLL_MD(crtc->pipe),
6084 serge 1732
			   crtc->config->dpll_hw_state.dpll_md);
4104 Serge 1733
	} else {
1734
		/* The pixel multiplier can only be updated once the
1735
		 * DPLL is enabled and the clocks are stable.
1736
		 *
1737
		 * So write it again.
1738
		 */
1739
		I915_WRITE(reg, dpll);
1740
	}
1741
 
6084 serge 1742
	/* We do this three times for luck */
4104 Serge 1743
	I915_WRITE(reg, dpll);
6084 serge 1744
	POSTING_READ(reg);
1745
	udelay(150); /* wait for warmup */
4104 Serge 1746
	I915_WRITE(reg, dpll);
6084 serge 1747
	POSTING_READ(reg);
1748
	udelay(150); /* wait for warmup */
4104 Serge 1749
	I915_WRITE(reg, dpll);
6084 serge 1750
	POSTING_READ(reg);
1751
	udelay(150); /* wait for warmup */
2327 Serge 1752
}
1753
 
1754
/**
4104 Serge 1755
 * i9xx_disable_pll - disable a PLL
2327 Serge 1756
 * @dev_priv: i915 private structure
1757
 * @pipe: pipe PLL to disable
1758
 *
1759
 * Disable the PLL for @pipe, making sure the pipe is off first.
1760
 *
1761
 * Note!  This is for pre-ILK only.
1762
 */
5354 serge 1763
static void i9xx_disable_pll(struct intel_crtc *crtc)
2327 Serge 1764
{
5354 serge 1765
	struct drm_device *dev = crtc->base.dev;
1766
	struct drm_i915_private *dev_priv = dev->dev_private;
1767
	enum pipe pipe = crtc->pipe;
1768
 
1769
	/* Disable DVO 2x clock on both PLLs if necessary */
1770
	if (IS_I830(dev) &&
1771
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
6084 serge 1772
	    !intel_num_dvo_pipes(dev)) {
5354 serge 1773
		I915_WRITE(DPLL(PIPE_B),
1774
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1775
		I915_WRITE(DPLL(PIPE_A),
1776
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1777
	}
1778
 
1779
	/* Don't disable pipe or pipe PLLs if needed */
1780
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1781
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2327 Serge 1782
		return;
1783
 
1784
	/* Make sure the pipe isn't still relying on us */
1785
	assert_pipe_disabled(dev_priv, pipe);
1786
 
6084 serge 1787
	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
4104 Serge 1788
	POSTING_READ(DPLL(pipe));
2327 Serge 1789
}
1790
 
4539 Serge 1791
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1792
{
6084 serge 1793
	u32 val;
4539 Serge 1794
 
1795
	/* Make sure the pipe isn't still relying on us */
1796
	assert_pipe_disabled(dev_priv, pipe);
1797
 
4560 Serge 1798
	/*
1799
	 * Leave integrated clock source and reference clock enabled for pipe B.
1800
	 * The latter is needed for VGA hotplug / manual detection.
1801
	 */
6084 serge 1802
	val = DPLL_VGA_MODE_DIS;
4539 Serge 1803
	if (pipe == PIPE_B)
6084 serge 1804
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
4539 Serge 1805
	I915_WRITE(DPLL(pipe), val);
1806
	POSTING_READ(DPLL(pipe));
5060 serge 1807
 
4539 Serge 1808
}
1809
 
5060 serge 1810
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1811
{
1812
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1813
	u32 val;
1814
 
1815
	/* Make sure the pipe isn't still relying on us */
1816
	assert_pipe_disabled(dev_priv, pipe);
1817
 
1818
	/* Set PLL en = 0 */
6084 serge 1819
	val = DPLL_SSC_REF_CLK_CHV |
1820
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
5060 serge 1821
	if (pipe != PIPE_A)
1822
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1823
	I915_WRITE(DPLL(pipe), val);
1824
	POSTING_READ(DPLL(pipe));
1825
 
6084 serge 1826
	mutex_lock(&dev_priv->sb_lock);
5060 serge 1827
 
1828
	/* Disable 10bit clock to display controller */
1829
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1830
	val &= ~DPIO_DCLKP_EN;
1831
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1832
 
6084 serge 1833
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 1834
}
1835
 
4560 Serge 1836
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
6084 serge 1837
			 struct intel_digital_port *dport,
1838
			 unsigned int expected_mask)
3031 serge 1839
{
4104 Serge 1840
	u32 port_mask;
5060 serge 1841
	int dpll_reg;
3031 serge 1842
 
4560 Serge 1843
	switch (dport->port) {
1844
	case PORT_B:
4104 Serge 1845
		port_mask = DPLL_PORTB_READY_MASK;
5060 serge 1846
		dpll_reg = DPLL(0);
4560 Serge 1847
		break;
1848
	case PORT_C:
4104 Serge 1849
		port_mask = DPLL_PORTC_READY_MASK;
5060 serge 1850
		dpll_reg = DPLL(0);
6084 serge 1851
		expected_mask <<= 4;
4560 Serge 1852
		break;
5060 serge 1853
	case PORT_D:
1854
		port_mask = DPLL_PORTD_READY_MASK;
1855
		dpll_reg = DPIO_PHY_STATUS;
1856
		break;
4560 Serge 1857
	default:
1858
		BUG();
1859
	}
3243 Serge 1860
 
6084 serge 1861
	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1862
		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1863
		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
3031 serge 1864
}
1865
 
5060 serge 1866
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1867
{
1868
	struct drm_device *dev = crtc->base.dev;
1869
	struct drm_i915_private *dev_priv = dev->dev_private;
1870
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1871
 
1872
	if (WARN_ON(pll == NULL))
1873
		return;
1874
 
5354 serge 1875
	WARN_ON(!pll->config.crtc_mask);
5060 serge 1876
	if (pll->active == 0) {
1877
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1878
		WARN_ON(pll->on);
1879
		assert_shared_dpll_disabled(dev_priv, pll);
1880
 
1881
		pll->mode_set(dev_priv, pll);
1882
	}
1883
}
1884
 
2327 Serge 1885
/**
5060 serge 1886
 * intel_enable_shared_dpll - enable PCH PLL
2327 Serge 1887
 * @dev_priv: i915 private structure
1888
 * @pipe: pipe PLL to enable
1889
 *
1890
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1891
 * drives the transcoder clock.
1892
 */
5060 serge 1893
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1894
{
5060 serge 1895
	struct drm_device *dev = crtc->base.dev;
1896
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1897
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1898
 
4104 Serge 1899
	if (WARN_ON(pll == NULL))
2342 Serge 1900
		return;
1901
 
5354 serge 1902
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1903
		return;
2327 Serge 1904
 
5354 serge 1905
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
4104 Serge 1906
		      pll->name, pll->active, pll->on,
1907
		      crtc->base.base.id);
3031 serge 1908
 
4104 Serge 1909
	if (pll->active++) {
1910
		WARN_ON(!pll->on);
1911
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1912
		return;
1913
	}
4104 Serge 1914
	WARN_ON(pll->on);
3031 serge 1915
 
5060 serge 1916
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1917
 
4104 Serge 1918
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1919
	pll->enable(dev_priv, pll);
3031 serge 1920
	pll->on = true;
2327 Serge 1921
}
1922
 
5354 serge 1923
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1924
{
5060 serge 1925
	struct drm_device *dev = crtc->base.dev;
1926
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1927
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1928
 
1929
	/* PCH only available on ILK+ */
6084 serge 1930
	if (INTEL_INFO(dev)->gen < 5)
1931
		return;
2327 Serge 1932
 
6084 serge 1933
	if (pll == NULL)
3031 serge 1934
		return;
2327 Serge 1935
 
6084 serge 1936
	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1937
		return;
1938
 
4104 Serge 1939
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1940
		      pll->name, pll->active, pll->on,
1941
		      crtc->base.base.id);
2342 Serge 1942
 
3031 serge 1943
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1944
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1945
		return;
1946
	}
2342 Serge 1947
 
4104 Serge 1948
	assert_shared_dpll_enabled(dev_priv, pll);
1949
	WARN_ON(!pll->on);
1950
	if (--pll->active)
2342 Serge 1951
		return;
1952
 
4104 Serge 1953
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1954
	pll->disable(dev_priv, pll);
3031 serge 1955
	pll->on = false;
5060 serge 1956
 
1957
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2327 Serge 1958
}
1959
 
3243 Serge 1960
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
6084 serge 1961
					   enum pipe pipe)
2327 Serge 1962
{
3243 Serge 1963
	struct drm_device *dev = dev_priv->dev;
3031 serge 1964
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1965
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 1966
	uint32_t reg, val, pipeconf_val;
2327 Serge 1967
 
1968
	/* PCH only available on ILK+ */
5354 serge 1969
	BUG_ON(!HAS_PCH_SPLIT(dev));
2327 Serge 1970
 
1971
	/* Make sure PCH DPLL is enabled */
4104 Serge 1972
	assert_shared_dpll_enabled(dev_priv,
1973
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1974
 
1975
	/* FDI must be feeding us bits for PCH ports */
1976
	assert_fdi_tx_enabled(dev_priv, pipe);
1977
	assert_fdi_rx_enabled(dev_priv, pipe);
1978
 
3243 Serge 1979
	if (HAS_PCH_CPT(dev)) {
1980
		/* Workaround: Set the timing override bit before enabling the
1981
		 * pch transcoder. */
1982
		reg = TRANS_CHICKEN2(pipe);
1983
		val = I915_READ(reg);
1984
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1985
		I915_WRITE(reg, val);
3031 serge 1986
	}
3243 Serge 1987
 
4104 Serge 1988
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1989
	val = I915_READ(reg);
3031 serge 1990
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1991
 
1992
	if (HAS_PCH_IBX(dev_priv->dev)) {
1993
		/*
6084 serge 1994
		 * Make the BPC in transcoder be consistent with
1995
		 * that in pipeconf reg. For HDMI we must use 8bpc
1996
		 * here for both 8bpc and 12bpc.
2327 Serge 1997
		 */
3480 Serge 1998
		val &= ~PIPECONF_BPC_MASK;
6084 serge 1999
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2000
			val |= PIPECONF_8BPC;
2001
		else
2002
			val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 2003
	}
3031 serge 2004
 
2005
	val &= ~TRANS_INTERLACE_MASK;
2006
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2007
		if (HAS_PCH_IBX(dev_priv->dev) &&
5354 serge 2008
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3031 serge 2009
			val |= TRANS_LEGACY_INTERLACED_ILK;
2010
		else
2011
			val |= TRANS_INTERLACED;
2012
	else
2013
		val |= TRANS_PROGRESSIVE;
2014
 
2327 Serge 2015
	I915_WRITE(reg, val | TRANS_ENABLE);
2016
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 2017
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 2018
}
2019
 
3243 Serge 2020
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2021
				      enum transcoder cpu_transcoder)
2022
{
2023
	u32 val, pipeconf_val;
2024
 
2025
	/* PCH only available on ILK+ */
5354 serge 2026
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
3243 Serge 2027
 
2028
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 2029
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 2030
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2031
 
2032
	/* Workaround: set timing override bit. */
6084 serge 2033
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
3243 Serge 2034
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6084 serge 2035
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
3243 Serge 2036
 
2037
	val = TRANS_ENABLE;
2038
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2039
 
2040
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2041
	    PIPECONF_INTERLACED_ILK)
2042
		val |= TRANS_INTERLACED;
2043
	else
2044
		val |= TRANS_PROGRESSIVE;
2045
 
4104 Serge 2046
	I915_WRITE(LPT_TRANSCONF, val);
2047
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 2048
		DRM_ERROR("Failed to enable PCH transcoder\n");
2049
}
2050
 
2051
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
6084 serge 2052
					    enum pipe pipe)
2327 Serge 2053
{
3243 Serge 2054
	struct drm_device *dev = dev_priv->dev;
2055
	uint32_t reg, val;
2327 Serge 2056
 
2057
	/* FDI relies on the transcoder */
2058
	assert_fdi_tx_disabled(dev_priv, pipe);
2059
	assert_fdi_rx_disabled(dev_priv, pipe);
2060
 
2061
	/* Ports must be off as well */
2062
	assert_pch_ports_disabled(dev_priv, pipe);
2063
 
4104 Serge 2064
	reg = PCH_TRANSCONF(pipe);
2327 Serge 2065
	val = I915_READ(reg);
2066
	val &= ~TRANS_ENABLE;
2067
	I915_WRITE(reg, val);
2068
	/* wait for PCH transcoder off, transcoder state */
2069
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 2070
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 2071
 
2072
	if (!HAS_PCH_IBX(dev)) {
2073
		/* Workaround: Clear the timing override chicken bit again. */
2074
		reg = TRANS_CHICKEN2(pipe);
2075
		val = I915_READ(reg);
2076
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2077
		I915_WRITE(reg, val);
2078
	}
2327 Serge 2079
}
2080
 
3243 Serge 2081
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2082
{
2083
	u32 val;
2084
 
4104 Serge 2085
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 2086
	val &= ~TRANS_ENABLE;
4104 Serge 2087
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 2088
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 2089
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 2090
		DRM_ERROR("Failed to disable PCH transcoder\n");
2091
 
2092
	/* Workaround: clear timing override bit. */
6084 serge 2093
	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
3243 Serge 2094
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
6084 serge 2095
	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
3243 Serge 2096
}
2097
 
2327 Serge 2098
/**
2099
 * intel_enable_pipe - enable a pipe, asserting requirements
5060 serge 2100
 * @crtc: crtc responsible for the pipe
2327 Serge 2101
 *
5060 serge 2102
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2327 Serge 2103
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2104
 */
5060 serge 2105
static void intel_enable_pipe(struct intel_crtc *crtc)
2327 Serge 2106
{
5060 serge 2107
	struct drm_device *dev = crtc->base.dev;
2108
	struct drm_i915_private *dev_priv = dev->dev_private;
2109
	enum pipe pipe = crtc->pipe;
3243 Serge 2110
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2111
								      pipe);
3480 Serge 2112
	enum pipe pch_transcoder;
2327 Serge 2113
	int reg;
2114
	u32 val;
2115
 
6084 serge 2116
	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2117
 
4104 Serge 2118
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2119
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 2120
	assert_sprites_disabled(dev_priv, pipe);
2121
 
3480 Serge 2122
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 2123
		pch_transcoder = TRANSCODER_A;
2124
	else
2125
		pch_transcoder = pipe;
2126
 
2327 Serge 2127
	/*
2128
	 * A pipe without a PLL won't actually be able to drive bits from
2129
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2130
	 * need the check.
2131
	 */
6084 serge 2132
	if (HAS_GMCH_DISPLAY(dev_priv->dev))
5354 serge 2133
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4560 Serge 2134
			assert_dsi_pll_enabled(dev_priv);
2135
		else
6084 serge 2136
			assert_pll_enabled(dev_priv, pipe);
2327 Serge 2137
	else {
6084 serge 2138
		if (crtc->config->has_pch_encoder) {
2327 Serge 2139
			/* if driving the PCH, we need FDI enabled */
3243 Serge 2140
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 2141
			assert_fdi_tx_pll_enabled(dev_priv,
2142
						  (enum pipe) cpu_transcoder);
2327 Serge 2143
		}
2144
		/* FIXME: assert CPU port conditions for SNB+ */
2145
	}
2146
 
3243 Serge 2147
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2148
	val = I915_READ(reg);
5060 serge 2149
	if (val & PIPECONF_ENABLE) {
5354 serge 2150
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2151
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2327 Serge 2152
		return;
5060 serge 2153
	}
2327 Serge 2154
 
2155
	I915_WRITE(reg, val | PIPECONF_ENABLE);
5060 serge 2156
	POSTING_READ(reg);
2327 Serge 2157
}
2158
 
2159
/**
2160
 * intel_disable_pipe - disable a pipe, asserting requirements
5354 serge 2161
 * @crtc: crtc whose pipes is to be disabled
2327 Serge 2162
 *
5354 serge 2163
 * Disable the pipe of @crtc, making sure that various hardware
2164
 * specific requirements are met, if applicable, e.g. plane
2165
 * disabled, panel fitter off, etc.
2327 Serge 2166
 *
2167
 * Will wait until the pipe has shut down before returning.
2168
 */
5354 serge 2169
static void intel_disable_pipe(struct intel_crtc *crtc)
2327 Serge 2170
{
5354 serge 2171
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
6084 serge 2172
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
5354 serge 2173
	enum pipe pipe = crtc->pipe;
2327 Serge 2174
	int reg;
2175
	u32 val;
2176
 
6084 serge 2177
	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2178
 
2179
	/*
2327 Serge 2180
	 * Make sure planes won't keep trying to pump pixels to us,
2181
	 * or we might hang the display.
2182
	 */
2183
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2184
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 2185
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 2186
 
3243 Serge 2187
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2188
	val = I915_READ(reg);
2189
	if ((val & PIPECONF_ENABLE) == 0)
2190
		return;
2191
 
5354 serge 2192
	/*
2193
	 * Double wide has implications for planes
2194
	 * so best keep it disabled when not needed.
2195
	 */
6084 serge 2196
	if (crtc->config->double_wide)
5354 serge 2197
		val &= ~PIPECONF_DOUBLE_WIDE;
2198
 
2199
	/* Don't disable pipe or pipe PLLs if needed */
2200
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2201
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2202
		val &= ~PIPECONF_ENABLE;
2203
 
2204
	I915_WRITE(reg, val);
2205
	if ((val & PIPECONF_ENABLE) == 0)
2206
		intel_wait_for_pipe_off(crtc);
2327 Serge 2207
}
2208
 
6084 serge 2209
static bool need_vtd_wa(struct drm_device *dev)
2327 Serge 2210
{
6084 serge 2211
#ifdef CONFIG_INTEL_IOMMU
2212
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2213
		return true;
2214
#endif
2215
	return false;
2327 Serge 2216
}
2217
 
6084 serge 2218
unsigned int
2219
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2220
		  uint64_t fb_format_modifier, unsigned int plane)
2327 Serge 2221
{
6084 serge 2222
	unsigned int tile_height;
2223
	uint32_t pixel_bytes;
2327 Serge 2224
 
6084 serge 2225
	switch (fb_format_modifier) {
2226
	case DRM_FORMAT_MOD_NONE:
2227
		tile_height = 1;
2228
		break;
2229
	case I915_FORMAT_MOD_X_TILED:
2230
		tile_height = IS_GEN2(dev) ? 16 : 8;
2231
		break;
2232
	case I915_FORMAT_MOD_Y_TILED:
2233
		tile_height = 32;
2234
		break;
2235
	case I915_FORMAT_MOD_Yf_TILED:
2236
		pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2237
		switch (pixel_bytes) {
2238
		default:
2239
		case 1:
2240
			tile_height = 64;
2241
			break;
2242
		case 2:
2243
		case 4:
2244
			tile_height = 32;
2245
			break;
2246
		case 8:
2247
			tile_height = 16;
2248
			break;
2249
		case 16:
2250
			WARN_ONCE(1,
2251
				  "128-bit pixels are not supported for display!");
2252
			tile_height = 16;
2253
			break;
2254
		}
2255
		break;
2256
	default:
2257
		MISSING_CASE(fb_format_modifier);
2258
		tile_height = 1;
2259
		break;
2260
	}
2327 Serge 2261
 
6084 serge 2262
	return tile_height;
2263
}
4560 Serge 2264
 
6084 serge 2265
unsigned int
2266
intel_fb_align_height(struct drm_device *dev, unsigned int height,
2267
		      uint32_t pixel_format, uint64_t fb_format_modifier)
2268
{
2269
	return ALIGN(height, intel_tile_height(dev, pixel_format,
2270
					       fb_format_modifier, 0));
2327 Serge 2271
}
2272
 
6084 serge 2273
static int
2274
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2275
			const struct drm_plane_state *plane_state)
2327 Serge 2276
{
6084 serge 2277
	struct intel_rotation_info *info = &view->rotation_info;
2278
	unsigned int tile_height, tile_pitch;
2327 Serge 2279
 
6084 serge 2280
	*view = i915_ggtt_view_normal;
5354 serge 2281
 
6084 serge 2282
	if (!plane_state)
2283
		return 0;
4560 Serge 2284
 
6084 serge 2285
	if (!intel_rotation_90_or_270(plane_state->rotation))
2286
		return 0;
4560 Serge 2287
 
6084 serge 2288
	*view = i915_ggtt_view_rotated;
2327 Serge 2289
 
6084 serge 2290
	info->height = fb->height;
2291
	info->pixel_format = fb->pixel_format;
2292
	info->pitch = fb->pitches[0];
2293
	info->uv_offset = fb->offsets[1];
2294
	info->fb_modifier = fb->modifier[0];
2295
 
2296
	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2297
					fb->modifier[0], 0);
2298
	tile_pitch = PAGE_SIZE / tile_height;
2299
	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2300
	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2301
	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2302
 
2303
	if (info->pixel_format == DRM_FORMAT_NV12) {
2304
		tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2305
						fb->modifier[0], 1);
2306
		tile_pitch = PAGE_SIZE / tile_height;
2307
		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2308
		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2309
						     tile_height);
2310
		info->size_uv = info->width_pages_uv * info->height_pages_uv *
2311
				PAGE_SIZE;
2312
	}
2313
 
2314
	return 0;
3746 Serge 2315
}
2316
 
6084 serge 2317
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
5060 serge 2318
{
6084 serge 2319
	if (INTEL_INFO(dev_priv)->gen >= 9)
2320
		return 256 * 1024;
2321
	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2322
		 IS_VALLEYVIEW(dev_priv))
2323
		return 128 * 1024;
2324
	else if (INTEL_INFO(dev_priv)->gen >= 4)
2325
		return 4 * 1024;
2326
	else
2327
		return 0;
5060 serge 2328
}
2329
 
2335 Serge 2330
int
5354 serge 2331
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2332
			   struct drm_framebuffer *fb,
6084 serge 2333
			   const struct drm_plane_state *plane_state,
2334
			   struct intel_engine_cs *pipelined,
2335
			   struct drm_i915_gem_request **pipelined_request)
2335 Serge 2336
{
5354 serge 2337
	struct drm_device *dev = fb->dev;
2335 Serge 2338
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2339
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 2340
	struct i915_ggtt_view view;
2335 Serge 2341
	u32 alignment;
2342
	int ret;
2327 Serge 2343
 
5060 serge 2344
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2345
 
6084 serge 2346
	switch (fb->modifier[0]) {
2347
	case DRM_FORMAT_MOD_NONE:
2348
		alignment = intel_linear_alignment(dev_priv);
2335 Serge 2349
		break;
6084 serge 2350
	case I915_FORMAT_MOD_X_TILED:
5354 serge 2351
		if (INTEL_INFO(dev)->gen >= 9)
2352
			alignment = 256 * 1024;
2353
		else {
6084 serge 2354
			/* pin() will align the object as required by fence */
2355
			alignment = 0;
5354 serge 2356
		}
2335 Serge 2357
		break;
6084 serge 2358
	case I915_FORMAT_MOD_Y_TILED:
2359
	case I915_FORMAT_MOD_Yf_TILED:
2360
		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2361
			  "Y tiling bo slipped through, driver bug!\n"))
2362
			return -EINVAL;
2363
		alignment = 1 * 1024 * 1024;
2364
		break;
2365
	default:
2366
		MISSING_CASE(fb->modifier[0]);
2335 Serge 2367
		return -EINVAL;
2368
	}
2327 Serge 2369
 
6084 serge 2370
	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2371
	if (ret)
2372
		return ret;
2373
 
3746 Serge 2374
	/* Note that the w/a also requires 64 PTE of padding following the
2375
	 * bo. We currently fill all unused PTE with the shadow page and so
2376
	 * we should always have valid PTE following the scanout preventing
2377
	 * the VT-d warning.
2378
	 */
2379
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2380
		alignment = 256 * 1024;
2381
 
5097 serge 2382
	/*
2383
	 * Global gtt pte registers are special registers which actually forward
2384
	 * writes to a chunk of system memory. Which means that there is no risk
2385
	 * that the register values disappear as soon as we call
2386
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2387
	 * pin/unpin/fence and not more.
2388
	 */
2389
	intel_runtime_pm_get(dev_priv);
2390
 
2335 Serge 2391
	dev_priv->mm.interruptible = false;
6084 serge 2392
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
2393
						   pipelined_request, &view);
2335 Serge 2394
	if (ret)
2395
		goto err_interruptible;
2327 Serge 2396
 
2335 Serge 2397
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2398
	 * fence, whereas 965+ only requires a fence if using
2399
	 * framebuffer compression.  For simplicity, we always install
2400
	 * a fence as the cost is not that onerous.
2401
	 */
6084 serge 2402
	if (view.type == I915_GGTT_VIEW_NORMAL) {
2403
		ret = i915_gem_object_get_fence(obj);
2404
		if (ret == -EDEADLK) {
2405
			/*
2406
			 * -EDEADLK means there are no free fences
2407
			 * no pending flips.
2408
			 *
2409
			 * This is propagated to atomic, but it uses
2410
			 * -EDEADLK to force a locking recovery, so
2411
			 * change the returned error to -EBUSY.
2412
			 */
2413
			ret = -EBUSY;
2414
			goto err_unpin;
2415
		} else if (ret)
2416
			goto err_unpin;
2327 Serge 2417
 
6084 serge 2418
		i915_gem_object_pin_fence(obj);
2419
	}
3480 Serge 2420
 
2335 Serge 2421
	dev_priv->mm.interruptible = true;
5097 serge 2422
	intel_runtime_pm_put(dev_priv);
2335 Serge 2423
	return 0;
2327 Serge 2424
 
2335 Serge 2425
err_unpin:
6084 serge 2426
	i915_gem_object_unpin_from_display_plane(obj, &view);
2335 Serge 2427
err_interruptible:
2428
	dev_priv->mm.interruptible = true;
5097 serge 2429
	intel_runtime_pm_put(dev_priv);
2335 Serge 2430
	return ret;
2431
}
2327 Serge 2432
 
6084 serge 2433
static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2434
			       const struct drm_plane_state *plane_state)
3031 serge 2435
{
6084 serge 2436
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2437
	struct i915_ggtt_view view;
2438
	int ret;
2439
 
5060 serge 2440
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2441
 
6084 serge 2442
	ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2443
	WARN_ONCE(ret, "Couldn't get view from plane state!");
2444
 
2445
	if (view.type == I915_GGTT_VIEW_NORMAL)
2446
		i915_gem_object_unpin_fence(obj);
2447
 
2448
	i915_gem_object_unpin_from_display_plane(obj, &view);
3031 serge 2449
}
2450
 
2451
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2452
 * is assumed to be a power-of-two. */
6084 serge 2453
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2454
					     int *x, int *y,
3480 Serge 2455
					     unsigned int tiling_mode,
2456
					     unsigned int cpp,
6084 serge 2457
					     unsigned int pitch)
3031 serge 2458
{
3480 Serge 2459
	if (tiling_mode != I915_TILING_NONE) {
2460
		unsigned int tile_rows, tiles;
3031 serge 2461
 
6084 serge 2462
		tile_rows = *y / 8;
2463
		*y %= 8;
3031 serge 2464
 
3480 Serge 2465
		tiles = *x / (512/cpp);
2466
		*x %= 512/cpp;
2467
 
6084 serge 2468
		return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 2469
	} else {
6084 serge 2470
		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
3480 Serge 2471
		unsigned int offset;
2472
 
2473
		offset = *y * pitch + *x * cpp;
6084 serge 2474
		*y = (offset & alignment) / pitch;
2475
		*x = ((offset & alignment) - *y * pitch) / cpp;
2476
		return offset & ~alignment;
3480 Serge 2477
	}
3031 serge 2478
}
2479
 
6084 serge 2480
static int i9xx_format_to_fourcc(int format)
2327 Serge 2481
{
5060 serge 2482
	switch (format) {
2483
	case DISPPLANE_8BPP:
2484
		return DRM_FORMAT_C8;
2485
	case DISPPLANE_BGRX555:
2486
		return DRM_FORMAT_XRGB1555;
2487
	case DISPPLANE_BGRX565:
2488
		return DRM_FORMAT_RGB565;
2489
	default:
2490
	case DISPPLANE_BGRX888:
2491
		return DRM_FORMAT_XRGB8888;
2492
	case DISPPLANE_RGBX888:
2493
		return DRM_FORMAT_XBGR8888;
2494
	case DISPPLANE_BGRX101010:
2495
		return DRM_FORMAT_XRGB2101010;
2496
	case DISPPLANE_RGBX101010:
2497
		return DRM_FORMAT_XBGR2101010;
2498
	}
2499
}
2500
 
6084 serge 2501
static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
5060 serge 2502
{
6084 serge 2503
	switch (format) {
2504
	case PLANE_CTL_FORMAT_RGB_565:
2505
		return DRM_FORMAT_RGB565;
2506
	default:
2507
	case PLANE_CTL_FORMAT_XRGB_8888:
2508
		if (rgb_order) {
2509
			if (alpha)
2510
				return DRM_FORMAT_ABGR8888;
2511
			else
2512
				return DRM_FORMAT_XBGR8888;
2513
		} else {
2514
			if (alpha)
2515
				return DRM_FORMAT_ARGB8888;
2516
			else
2517
				return DRM_FORMAT_XRGB8888;
2518
		}
2519
	case PLANE_CTL_FORMAT_XRGB_2101010:
2520
		if (rgb_order)
2521
			return DRM_FORMAT_XBGR2101010;
2522
		else
2523
			return DRM_FORMAT_XRGB2101010;
2524
	}
2525
}
2526
 
2527
static bool
2528
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2529
			      struct intel_initial_plane_config *plane_config)
2530
{
5060 serge 2531
	struct drm_device *dev = crtc->base.dev;
6084 serge 2532
	struct drm_i915_private *dev_priv = to_i915(dev);
5060 serge 2533
	struct drm_i915_gem_object *obj = NULL;
2534
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
6084 serge 2535
	struct drm_framebuffer *fb = &plane_config->fb->base;
2536
	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2537
	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2538
				    PAGE_SIZE);
5060 serge 2539
 
6084 serge 2540
	size_aligned -= base_aligned;
2541
 
5060 serge 2542
	if (plane_config->size == 0)
2543
		return false;
2544
 
6084 serge 2545
	/* If the FB is too big, just don't use it since fbdev is not very
2546
	 * important and we should probably use that space with FBC or other
2547
	 * features. */
2548
	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2549
		return false;
2550
 
2551
	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2552
							     base_aligned,
2553
							     base_aligned,
2554
							     size_aligned);
5060 serge 2555
	if (!obj)
2556
		return false;
2557
 
6084 serge 2558
	obj->tiling_mode = plane_config->tiling;
2559
	if (obj->tiling_mode == I915_TILING_X)
2560
		obj->stride = fb->pitches[0];
5060 serge 2561
 
6084 serge 2562
	mode_cmd.pixel_format = fb->pixel_format;
2563
	mode_cmd.width = fb->width;
2564
	mode_cmd.height = fb->height;
2565
	mode_cmd.pitches[0] = fb->pitches[0];
2566
	mode_cmd.modifier[0] = fb->modifier[0];
2567
	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
5060 serge 2568
 
2569
	mutex_lock(&dev->struct_mutex);
6084 serge 2570
	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
5060 serge 2571
				   &mode_cmd, obj)) {
2572
		DRM_DEBUG_KMS("intel fb init failed\n");
2573
		goto out_unref_obj;
2574
	}
2575
	mutex_unlock(&dev->struct_mutex);
2576
 
6084 serge 2577
	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
5060 serge 2578
	return true;
2579
 
2580
out_unref_obj:
2581
	drm_gem_object_unreference(&obj->base);
2582
	mutex_unlock(&dev->struct_mutex);
2583
	return false;
2584
}
2585
 
6084 serge 2586
/* Update plane->state->fb to match plane->fb after driver-internal updates */
2587
static void
2588
update_state_fb(struct drm_plane *plane)
5060 serge 2589
{
6084 serge 2590
	if (plane->fb == plane->state->fb)
2591
		return;
2592
 
2593
	if (plane->state->fb)
2594
		drm_framebuffer_unreference(plane->state->fb);
2595
	plane->state->fb = plane->fb;
2596
	if (plane->state->fb)
2597
		drm_framebuffer_reference(plane->state->fb);
2598
}
2599
 
2600
static void
2601
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2602
			     struct intel_initial_plane_config *plane_config)
2603
{
5060 serge 2604
	struct drm_device *dev = intel_crtc->base.dev;
5354 serge 2605
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2606
	struct drm_crtc *c;
2607
	struct intel_crtc *i;
2608
	struct drm_i915_gem_object *obj;
6084 serge 2609
	struct drm_plane *primary = intel_crtc->base.primary;
2610
	struct drm_plane_state *plane_state = primary->state;
2611
	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2612
	struct intel_plane *intel_plane = to_intel_plane(primary);
2613
	struct drm_framebuffer *fb;
5060 serge 2614
 
6084 serge 2615
	if (!plane_config->fb)
5060 serge 2616
		return;
2617
 
6084 serge 2618
	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2619
		fb = &plane_config->fb->base;
2620
		goto valid_fb;
2621
	}
5060 serge 2622
 
6084 serge 2623
	kfree(plane_config->fb);
5060 serge 2624
 
2625
	/*
2626
	 * Failed to alloc the obj, check to see if we should share
2627
	 * an fb with another CRTC instead
2628
	 */
2629
	for_each_crtc(dev, c) {
2630
		i = to_intel_crtc(c);
2631
 
2632
		if (c == &intel_crtc->base)
2633
			continue;
2634
 
2635
		if (!i->active)
2636
			continue;
2637
 
6084 serge 2638
		fb = c->primary->fb;
2639
		if (!fb)
5060 serge 2640
			continue;
2641
 
6084 serge 2642
		obj = intel_fb_obj(fb);
5060 serge 2643
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
6084 serge 2644
			drm_framebuffer_reference(fb);
2645
			goto valid_fb;
5060 serge 2646
		}
2647
	}
6084 serge 2648
 
2649
	/*
2650
	 * We've failed to reconstruct the BIOS FB.  Current display state
2651
	 * indicates that the primary plane is visible, but has a NULL FB,
2652
	 * which will lead to problems later if we don't fix it up.  The
2653
	 * simplest solution is to just disable the primary plane now and
2654
	 * pretend the BIOS never had it enabled.
2655
	 */
2656
	to_intel_plane_state(plane_state)->visible = false;
2657
	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2658
	intel_pre_disable_primary(&intel_crtc->base);
2659
	intel_plane->disable_plane(primary, &intel_crtc->base);
2660
 
2661
	return;
2662
 
2663
valid_fb:
2664
	plane_state->src_x = 0;
2665
	plane_state->src_y = 0;
2666
	plane_state->src_w = fb->width << 16;
2667
	plane_state->src_h = fb->height << 16;
2668
 
2669
	plane_state->crtc_x = 0;
2670
	plane_state->crtc_y = 0;
2671
	plane_state->crtc_w = fb->width;
2672
	plane_state->crtc_h = fb->height;
2673
 
2674
	obj = intel_fb_obj(fb);
2675
	if (obj->tiling_mode != I915_TILING_NONE)
2676
		dev_priv->preserve_bios_swizzle = true;
2677
 
2678
	drm_framebuffer_reference(fb);
2679
	primary->fb = primary->state->fb = fb;
2680
	primary->crtc = primary->state->crtc = &intel_crtc->base;
2681
	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2682
	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
5060 serge 2683
}
2684
 
2685
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
6084 serge 2686
				      struct drm_framebuffer *fb,
2687
				      int x, int y)
5060 serge 2688
{
6084 serge 2689
	struct drm_device *dev = crtc->dev;
2690
	struct drm_i915_private *dev_priv = dev->dev_private;
2691
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2692
	struct drm_plane *primary = crtc->primary;
2693
	bool visible = to_intel_plane_state(primary->state)->visible;
5354 serge 2694
	struct drm_i915_gem_object *obj;
6084 serge 2695
	int plane = intel_crtc->plane;
3031 serge 2696
	unsigned long linear_offset;
6084 serge 2697
	u32 dspcntr;
5354 serge 2698
	u32 reg = DSPCNTR(plane);
2699
	int pixel_size;
2327 Serge 2700
 
6084 serge 2701
	if (!visible || !fb) {
5354 serge 2702
		I915_WRITE(reg, 0);
2703
		if (INTEL_INFO(dev)->gen >= 4)
2704
			I915_WRITE(DSPSURF(plane), 0);
2705
		else
2706
			I915_WRITE(DSPADDR(plane), 0);
2707
		POSTING_READ(reg);
2708
		return;
2709
	}
2710
 
2711
	obj = intel_fb_obj(fb);
2712
	if (WARN_ON(obj == NULL))
2713
		return;
2714
 
2715
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2716
 
2717
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2718
 
2719
	dspcntr |= DISPLAY_PLANE_ENABLE;
2720
 
2721
	if (INTEL_INFO(dev)->gen < 4) {
2722
		if (intel_crtc->pipe == PIPE_B)
2723
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2724
 
2725
		/* pipesrc and dspsize control the size that is scaled from,
2726
		 * which should always be the user's requested size.
2727
		 */
2728
		I915_WRITE(DSPSIZE(plane),
6084 serge 2729
			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2730
			   (intel_crtc->config->pipe_src_w - 1));
5354 serge 2731
		I915_WRITE(DSPPOS(plane), 0);
2732
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2733
		I915_WRITE(PRIMSIZE(plane),
6084 serge 2734
			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
2735
			   (intel_crtc->config->pipe_src_w - 1));
5354 serge 2736
		I915_WRITE(PRIMPOS(plane), 0);
2737
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2738
	}
2739
 
3243 Serge 2740
	switch (fb->pixel_format) {
2741
	case DRM_FORMAT_C8:
6084 serge 2742
		dspcntr |= DISPPLANE_8BPP;
2743
		break;
3243 Serge 2744
	case DRM_FORMAT_XRGB1555:
2745
		dspcntr |= DISPPLANE_BGRX555;
2746
		break;
2747
	case DRM_FORMAT_RGB565:
2748
		dspcntr |= DISPPLANE_BGRX565;
2749
		break;
2750
	case DRM_FORMAT_XRGB8888:
2751
		dspcntr |= DISPPLANE_BGRX888;
2752
		break;
2753
	case DRM_FORMAT_XBGR8888:
2754
		dspcntr |= DISPPLANE_RGBX888;
2755
		break;
2756
	case DRM_FORMAT_XRGB2101010:
2757
		dspcntr |= DISPPLANE_BGRX101010;
6084 serge 2758
		break;
3243 Serge 2759
	case DRM_FORMAT_XBGR2101010:
2760
		dspcntr |= DISPPLANE_RGBX101010;
6084 serge 2761
		break;
2762
	default:
3746 Serge 2763
		BUG();
6084 serge 2764
	}
3243 Serge 2765
 
5354 serge 2766
	if (INTEL_INFO(dev)->gen >= 4 &&
2767
	    obj->tiling_mode != I915_TILING_NONE)
6084 serge 2768
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2769
 
4104 Serge 2770
	if (IS_G4X(dev))
2771
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2772
 
5354 serge 2773
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2327 Serge 2774
 
3031 serge 2775
	if (INTEL_INFO(dev)->gen >= 4) {
2776
		intel_crtc->dspaddr_offset =
6084 serge 2777
			intel_gen4_compute_page_offset(dev_priv,
2778
						       &x, &y, obj->tiling_mode,
5354 serge 2779
						       pixel_size,
6084 serge 2780
						       fb->pitches[0]);
3031 serge 2781
		linear_offset -= intel_crtc->dspaddr_offset;
2782
	} else {
2783
		intel_crtc->dspaddr_offset = linear_offset;
2784
	}
2785
 
6084 serge 2786
	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
5354 serge 2787
		dspcntr |= DISPPLANE_ROTATE_180;
2788
 
6084 serge 2789
		x += (intel_crtc->config->pipe_src_w - 1);
2790
		y += (intel_crtc->config->pipe_src_h - 1);
5354 serge 2791
 
2792
		/* Finding the last pixel of the last line of the display
2793
		data and adding to linear_offset*/
2794
		linear_offset +=
6084 serge 2795
			(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2796
			(intel_crtc->config->pipe_src_w - 1) * pixel_size;
5354 serge 2797
	}
2798
 
6084 serge 2799
	intel_crtc->adjusted_x = x;
2800
	intel_crtc->adjusted_y = y;
2801
 
5354 serge 2802
	I915_WRITE(reg, dspcntr);
2803
 
2342 Serge 2804
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
6084 serge 2805
	if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2806
		I915_WRITE(DSPSURF(plane),
6084 serge 2807
			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2808
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2809
		I915_WRITE(DSPLINOFF(plane), linear_offset);
6084 serge 2810
	} else
4104 Serge 2811
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
6084 serge 2812
	POSTING_READ(reg);
2327 Serge 2813
}
2814
 
5060 serge 2815
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
6084 serge 2816
					  struct drm_framebuffer *fb,
2817
					  int x, int y)
2327 Serge 2818
{
6084 serge 2819
	struct drm_device *dev = crtc->dev;
2820
	struct drm_i915_private *dev_priv = dev->dev_private;
2821
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2822
	struct drm_plane *primary = crtc->primary;
2823
	bool visible = to_intel_plane_state(primary->state)->visible;
5354 serge 2824
	struct drm_i915_gem_object *obj;
6084 serge 2825
	int plane = intel_crtc->plane;
3031 serge 2826
	unsigned long linear_offset;
6084 serge 2827
	u32 dspcntr;
5354 serge 2828
	u32 reg = DSPCNTR(plane);
2829
	int pixel_size;
2327 Serge 2830
 
6084 serge 2831
	if (!visible || !fb) {
5354 serge 2832
		I915_WRITE(reg, 0);
2833
		I915_WRITE(DSPSURF(plane), 0);
2834
		POSTING_READ(reg);
2835
		return;
2836
	}
2837
 
2838
	obj = intel_fb_obj(fb);
2839
	if (WARN_ON(obj == NULL))
2840
		return;
2841
 
2842
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2843
 
2844
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2845
 
2846
	dspcntr |= DISPLAY_PLANE_ENABLE;
2847
 
2848
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2849
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2850
 
3243 Serge 2851
	switch (fb->pixel_format) {
2852
	case DRM_FORMAT_C8:
6084 serge 2853
		dspcntr |= DISPPLANE_8BPP;
2854
		break;
3243 Serge 2855
	case DRM_FORMAT_RGB565:
2856
		dspcntr |= DISPPLANE_BGRX565;
6084 serge 2857
		break;
3243 Serge 2858
	case DRM_FORMAT_XRGB8888:
2859
		dspcntr |= DISPPLANE_BGRX888;
2860
		break;
2861
	case DRM_FORMAT_XBGR8888:
2862
		dspcntr |= DISPPLANE_RGBX888;
2863
		break;
2864
	case DRM_FORMAT_XRGB2101010:
2865
		dspcntr |= DISPPLANE_BGRX101010;
2866
		break;
2867
	case DRM_FORMAT_XBGR2101010:
2868
		dspcntr |= DISPPLANE_RGBX101010;
6084 serge 2869
		break;
2870
	default:
3746 Serge 2871
		BUG();
6084 serge 2872
	}
2327 Serge 2873
 
3480 Serge 2874
	if (obj->tiling_mode != I915_TILING_NONE)
2875
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2876
 
5354 serge 2877
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
6084 serge 2878
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2327 Serge 2879
 
5354 serge 2880
	linear_offset = y * fb->pitches[0] + x * pixel_size;
3031 serge 2881
	intel_crtc->dspaddr_offset =
6084 serge 2882
		intel_gen4_compute_page_offset(dev_priv,
2883
					       &x, &y, obj->tiling_mode,
5354 serge 2884
					       pixel_size,
6084 serge 2885
					       fb->pitches[0]);
3031 serge 2886
	linear_offset -= intel_crtc->dspaddr_offset;
6084 serge 2887
	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
5354 serge 2888
		dspcntr |= DISPPLANE_ROTATE_180;
2327 Serge 2889
 
5354 serge 2890
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
6084 serge 2891
			x += (intel_crtc->config->pipe_src_w - 1);
2892
			y += (intel_crtc->config->pipe_src_h - 1);
5354 serge 2893
 
2894
			/* Finding the last pixel of the last line of the display
2895
			data and adding to linear_offset*/
2896
			linear_offset +=
6084 serge 2897
				(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2898
				(intel_crtc->config->pipe_src_w - 1) * pixel_size;
5354 serge 2899
		}
2900
	}
2901
 
6084 serge 2902
	intel_crtc->adjusted_x = x;
2903
	intel_crtc->adjusted_y = y;
2904
 
5354 serge 2905
	I915_WRITE(reg, dspcntr);
2906
 
2342 Serge 2907
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2908
	I915_WRITE(DSPSURF(plane),
6084 serge 2909
		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2910
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2911
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2912
	} else {
6084 serge 2913
		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2914
		I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2915
	}
2330 Serge 2916
	POSTING_READ(reg);
2327 Serge 2917
}
2918
 
6084 serge 2919
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2920
			      uint32_t pixel_format)
2921
{
2922
	u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2923
 
2924
	/*
2925
	 * The stride is either expressed as a multiple of 64 bytes
2926
	 * chunks for linear buffers or in number of tiles for tiled
2927
	 * buffers.
2928
	 */
2929
	switch (fb_modifier) {
2930
	case DRM_FORMAT_MOD_NONE:
2931
		return 64;
2932
	case I915_FORMAT_MOD_X_TILED:
2933
		if (INTEL_INFO(dev)->gen == 2)
2934
			return 128;
2935
		return 512;
2936
	case I915_FORMAT_MOD_Y_TILED:
2937
		/* No need to check for old gens and Y tiling since this is
2938
		 * about the display engine and those will be blocked before
2939
		 * we get here.
2940
		 */
2941
		return 128;
2942
	case I915_FORMAT_MOD_Yf_TILED:
2943
		if (bits_per_pixel == 8)
2944
			return 64;
2945
		else
2946
			return 128;
2947
	default:
2948
		MISSING_CASE(fb_modifier);
2949
		return 64;
2950
	}
2951
}
2952
 
6660 serge 2953
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
6084 serge 2954
				     struct drm_i915_gem_object *obj,
2955
				     unsigned int plane)
2956
{
2957
	const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2958
	struct i915_vma *vma;
6660 serge 2959
	u64 offset;
6084 serge 2960
 
2961
	if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2962
		view = &i915_ggtt_view_rotated;
2963
 
2964
	vma = i915_gem_obj_to_ggtt_view(obj, view);
2965
	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2966
		view->type))
2967
		return -1;
2968
 
6660 serge 2969
	offset = vma->node.start;
6084 serge 2970
 
2971
	if (plane == 1) {
2972
		offset += vma->ggtt_view.rotation_info.uv_start_page *
2973
			  PAGE_SIZE;
2974
	}
2975
 
6660 serge 2976
	WARN_ON(upper_32_bits(offset));
2977
 
2978
	return lower_32_bits(offset);
6084 serge 2979
}
2980
 
2981
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2982
{
2983
	struct drm_device *dev = intel_crtc->base.dev;
2984
	struct drm_i915_private *dev_priv = dev->dev_private;
2985
 
2986
	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2987
	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2988
	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2989
}
2990
 
2991
/*
2992
 * This function detaches (aka. unbinds) unused scalers in hardware
2993
 */
2994
static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2995
{
2996
	struct intel_crtc_scaler_state *scaler_state;
2997
	int i;
2998
 
2999
	scaler_state = &intel_crtc->config->scaler_state;
3000
 
3001
	/* loop through and disable scalers that aren't in use */
3002
	for (i = 0; i < intel_crtc->num_scalers; i++) {
3003
		if (!scaler_state->scalers[i].in_use)
3004
			skl_detach_scaler(intel_crtc, i);
3005
	}
3006
}
3007
 
3008
u32 skl_plane_ctl_format(uint32_t pixel_format)
3009
{
3010
	switch (pixel_format) {
3011
	case DRM_FORMAT_C8:
3012
		return PLANE_CTL_FORMAT_INDEXED;
3013
	case DRM_FORMAT_RGB565:
3014
		return PLANE_CTL_FORMAT_RGB_565;
3015
	case DRM_FORMAT_XBGR8888:
3016
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3017
	case DRM_FORMAT_XRGB8888:
3018
		return PLANE_CTL_FORMAT_XRGB_8888;
3019
	/*
3020
	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3021
	 * to be already pre-multiplied. We need to add a knob (or a different
3022
	 * DRM_FORMAT) for user-space to configure that.
3023
	 */
3024
	case DRM_FORMAT_ABGR8888:
3025
		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3026
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3027
	case DRM_FORMAT_ARGB8888:
3028
		return PLANE_CTL_FORMAT_XRGB_8888 |
3029
			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3030
	case DRM_FORMAT_XRGB2101010:
3031
		return PLANE_CTL_FORMAT_XRGB_2101010;
3032
	case DRM_FORMAT_XBGR2101010:
3033
		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3034
	case DRM_FORMAT_YUYV:
3035
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3036
	case DRM_FORMAT_YVYU:
3037
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3038
	case DRM_FORMAT_UYVY:
3039
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3040
	case DRM_FORMAT_VYUY:
3041
		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3042
	default:
3043
		MISSING_CASE(pixel_format);
3044
	}
3045
 
3046
	return 0;
3047
}
3048
 
3049
u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3050
{
3051
	switch (fb_modifier) {
3052
	case DRM_FORMAT_MOD_NONE:
3053
		break;
3054
	case I915_FORMAT_MOD_X_TILED:
3055
		return PLANE_CTL_TILED_X;
3056
	case I915_FORMAT_MOD_Y_TILED:
3057
		return PLANE_CTL_TILED_Y;
3058
	case I915_FORMAT_MOD_Yf_TILED:
3059
		return PLANE_CTL_TILED_YF;
3060
	default:
3061
		MISSING_CASE(fb_modifier);
3062
	}
3063
 
3064
	return 0;
3065
}
3066
 
3067
u32 skl_plane_ctl_rotation(unsigned int rotation)
3068
{
3069
	switch (rotation) {
3070
	case BIT(DRM_ROTATE_0):
3071
		break;
3072
	/*
3073
	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3074
	 * while i915 HW rotation is clockwise, thats why this swapping.
3075
	 */
3076
	case BIT(DRM_ROTATE_90):
3077
		return PLANE_CTL_ROTATE_270;
3078
	case BIT(DRM_ROTATE_180):
3079
		return PLANE_CTL_ROTATE_180;
3080
	case BIT(DRM_ROTATE_270):
3081
		return PLANE_CTL_ROTATE_90;
3082
	default:
3083
		MISSING_CASE(rotation);
3084
	}
3085
 
3086
	return 0;
3087
}
3088
 
5354 serge 3089
static void skylake_update_primary_plane(struct drm_crtc *crtc,
3090
					 struct drm_framebuffer *fb,
3091
					 int x, int y)
3092
{
3093
	struct drm_device *dev = crtc->dev;
3094
	struct drm_i915_private *dev_priv = dev->dev_private;
3095
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 3096
	struct drm_plane *plane = crtc->primary;
3097
	bool visible = to_intel_plane_state(plane->state)->visible;
5354 serge 3098
	struct drm_i915_gem_object *obj;
3099
	int pipe = intel_crtc->pipe;
6084 serge 3100
	u32 plane_ctl, stride_div, stride;
3101
	u32 tile_height, plane_offset, plane_size;
3102
	unsigned int rotation;
3103
	int x_offset, y_offset;
6660 serge 3104
	u32 surf_addr;
6084 serge 3105
	struct intel_crtc_state *crtc_state = intel_crtc->config;
3106
	struct intel_plane_state *plane_state;
3107
	int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3108
	int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3109
	int scaler_id = -1;
5354 serge 3110
 
6084 serge 3111
	plane_state = to_intel_plane_state(plane->state);
3112
 
3113
	if (!visible || !fb) {
5354 serge 3114
		I915_WRITE(PLANE_CTL(pipe, 0), 0);
3115
		I915_WRITE(PLANE_SURF(pipe, 0), 0);
3116
		POSTING_READ(PLANE_CTL(pipe, 0));
3117
		return;
3118
	}
3119
 
3120
	plane_ctl = PLANE_CTL_ENABLE |
3121
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3122
		    PLANE_CTL_PIPE_CSC_ENABLE;
3123
 
6084 serge 3124
	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3125
	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3126
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
5354 serge 3127
 
6084 serge 3128
	rotation = plane->state->rotation;
3129
	plane_ctl |= skl_plane_ctl_rotation(rotation);
5354 serge 3130
 
6084 serge 3131
	obj = intel_fb_obj(fb);
3132
	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3133
					       fb->pixel_format);
3134
	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3135
 
3136
	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3137
 
3138
	scaler_id = plane_state->scaler_id;
3139
	src_x = plane_state->src.x1 >> 16;
3140
	src_y = plane_state->src.y1 >> 16;
3141
	src_w = drm_rect_width(&plane_state->src) >> 16;
3142
	src_h = drm_rect_height(&plane_state->src) >> 16;
3143
	dst_x = plane_state->dst.x1;
3144
	dst_y = plane_state->dst.y1;
3145
	dst_w = drm_rect_width(&plane_state->dst);
3146
	dst_h = drm_rect_height(&plane_state->dst);
3147
 
3148
	WARN_ON(x != src_x || y != src_y);
3149
 
3150
	if (intel_rotation_90_or_270(rotation)) {
3151
		/* stride = Surface height in tiles */
3152
		tile_height = intel_tile_height(dev, fb->pixel_format,
3153
						fb->modifier[0], 0);
3154
		stride = DIV_ROUND_UP(fb->height, tile_height);
3155
		x_offset = stride * tile_height - y - src_h;
3156
		y_offset = x;
3157
		plane_size = (src_w - 1) << 16 | (src_h - 1);
3158
	} else {
3159
		stride = fb->pitches[0] / stride_div;
3160
		x_offset = x;
3161
		y_offset = y;
3162
		plane_size = (src_h - 1) << 16 | (src_w - 1);
5354 serge 3163
	}
6084 serge 3164
	plane_offset = y_offset << 16 | x_offset;
5354 serge 3165
 
6084 serge 3166
	intel_crtc->adjusted_x = x_offset;
3167
	intel_crtc->adjusted_y = y_offset;
5354 serge 3168
 
3169
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
6084 serge 3170
	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3171
	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3172
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
5354 serge 3173
 
6084 serge 3174
	if (scaler_id >= 0) {
3175
		uint32_t ps_ctrl = 0;
5354 serge 3176
 
6084 serge 3177
		WARN_ON(!dst_w || !dst_h);
3178
		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3179
			crtc_state->scaler_state.scalers[scaler_id].mode;
3180
		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3181
		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3182
		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3183
		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3184
		I915_WRITE(PLANE_POS(pipe, 0), 0);
3185
	} else {
3186
		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3187
	}
5354 serge 3188
 
6084 serge 3189
	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3190
 
5354 serge 3191
	POSTING_READ(PLANE_SURF(pipe, 0));
3192
}
3193
 
2327 Serge 3194
/* Assume fb object is pinned & idle & fenced and just update base pointers */
3195
static int
3196
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3197
			   int x, int y, enum mode_set_atomic state)
3198
{
3199
	struct drm_device *dev = crtc->dev;
3200
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3201
 
6084 serge 3202
	if (dev_priv->fbc.disable_fbc)
3203
		dev_priv->fbc.disable_fbc(dev_priv);
3031 serge 3204
 
5060 serge 3205
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
3206
 
3207
	return 0;
3031 serge 3208
}
3209
 
5354 serge 3210
static void intel_complete_page_flips(struct drm_device *dev)
4104 Serge 3211
{
3212
	struct drm_crtc *crtc;
3213
 
5060 serge 3214
	for_each_crtc(dev, crtc) {
4104 Serge 3215
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3216
		enum plane plane = intel_crtc->plane;
3217
 
3218
		intel_prepare_page_flip(dev, plane);
3219
		intel_finish_page_flip_plane(dev, plane);
3220
	}
5354 serge 3221
}
4104 Serge 3222
 
5354 serge 3223
static void intel_update_primary_planes(struct drm_device *dev)
3224
{
3225
	struct drm_crtc *crtc;
3226
 
5060 serge 3227
	for_each_crtc(dev, crtc) {
6084 serge 3228
		struct intel_plane *plane = to_intel_plane(crtc->primary);
3229
		struct intel_plane_state *plane_state;
4104 Serge 3230
 
6084 serge 3231
		drm_modeset_lock_crtc(crtc, &plane->base);
3232
 
3233
		plane_state = to_intel_plane_state(plane->base.state);
3234
 
3235
		if (plane_state->base.fb)
3236
			plane->commit_plane(&plane->base, plane_state);
3237
 
3238
		drm_modeset_unlock_crtc(crtc);
4104 Serge 3239
	}
3240
}
3241
 
5354 serge 3242
void intel_prepare_reset(struct drm_device *dev)
3243
{
3244
	/* no reset support for gen2 */
3245
	if (IS_GEN2(dev))
3246
		return;
3247
 
3248
	/* reset doesn't touch the display */
3249
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3250
		return;
3251
 
3252
	drm_modeset_lock_all(dev);
3253
	/*
3254
	 * Disabling the crtcs gracefully seems nicer. Also the
3255
	 * g33 docs say we should at least disable all the planes.
3256
	 */
6084 serge 3257
	intel_display_suspend(dev);
5354 serge 3258
}
3259
 
3260
void intel_finish_reset(struct drm_device *dev)
3261
{
3262
	struct drm_i915_private *dev_priv = to_i915(dev);
3263
 
3264
	/*
3265
	 * Flips in the rings will be nuked by the reset,
3266
	 * so complete all pending flips so that user space
3267
	 * will get its events and not get stuck.
3268
	 */
3269
	intel_complete_page_flips(dev);
3270
 
3271
	/* no reset support for gen2 */
3272
	if (IS_GEN2(dev))
3273
		return;
3274
 
3275
	/* reset doesn't touch the display */
3276
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3277
		/*
3278
		 * Flips in the rings have been nuked by the reset,
3279
		 * so update the base address of all primary
3280
		 * planes to the the last fb to make sure we're
3281
		 * showing the correct fb after a reset.
6084 serge 3282
		 *
3283
		 * FIXME: Atomic will make this obsolete since we won't schedule
3284
		 * CS-based flips (which might get lost in gpu resets) any more.
5354 serge 3285
		 */
3286
		intel_update_primary_planes(dev);
3287
		return;
3288
	}
3289
 
3290
	/*
3291
	 * The display has been reset as well,
3292
	 * so need a full re-initialization.
3293
	 */
3294
	intel_runtime_pm_disable_interrupts(dev_priv);
3295
	intel_runtime_pm_enable_interrupts(dev_priv);
3296
 
3297
	intel_modeset_init_hw(dev);
3298
 
3299
	spin_lock_irq(&dev_priv->irq_lock);
3300
	if (dev_priv->display.hpd_irq_setup)
3301
		dev_priv->display.hpd_irq_setup(dev);
3302
	spin_unlock_irq(&dev_priv->irq_lock);
3303
 
6084 serge 3304
	intel_display_resume(dev);
5354 serge 3305
 
6296 serge 3306
	intel_hpd_init(dev_priv);
5354 serge 3307
 
3308
	drm_modeset_unlock_all(dev);
3309
}
3310
 
6084 serge 3311
static void
3031 serge 3312
intel_finish_fb(struct drm_framebuffer *old_fb)
3313
{
5060 serge 3314
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
6084 serge 3315
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3031 serge 3316
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 3317
	int ret;
3318
 
3031 serge 3319
	/* Big Hammer, we also need to ensure that any pending
3320
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3321
	 * current scanout is retired before unpinning the old
6084 serge 3322
	 * framebuffer. Note that we rely on userspace rendering
3323
	 * into the buffer attached to the pipe they are waiting
3324
	 * on. If not, userspace generates a GPU hang with IPEHR
3325
	 * point to the MI_WAIT_FOR_EVENT.
3031 serge 3326
	 *
3327
	 * This should only fail upon a hung GPU, in which case we
3328
	 * can safely continue.
3329
	 */
3330
	dev_priv->mm.interruptible = false;
6084 serge 3331
	ret = i915_gem_object_wait_rendering(obj, true);
3031 serge 3332
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 3333
 
6084 serge 3334
	WARN_ON(ret);
2327 Serge 3335
}
4104 Serge 3336
 
5060 serge 3337
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
4104 Serge 3338
{
3339
	struct drm_device *dev = crtc->dev;
5060 serge 3340
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3341
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 3342
	bool pending;
4104 Serge 3343
 
5060 serge 3344
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3345
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3346
		return false;
4104 Serge 3347
 
5354 serge 3348
	spin_lock_irq(&dev->event_lock);
5060 serge 3349
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
5354 serge 3350
	spin_unlock_irq(&dev->event_lock);
4104 Serge 3351
 
5060 serge 3352
	return pending;
4104 Serge 3353
}
2327 Serge 3354
 
6084 serge 3355
static void intel_update_pipe_config(struct intel_crtc *crtc,
3356
				     struct intel_crtc_state *old_crtc_state)
5354 serge 3357
{
3358
	struct drm_device *dev = crtc->base.dev;
3359
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3360
	struct intel_crtc_state *pipe_config =
3361
		to_intel_crtc_state(crtc->base.state);
5354 serge 3362
 
6084 serge 3363
	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3364
	crtc->base.mode = crtc->base.state->mode;
5354 serge 3365
 
6084 serge 3366
	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3367
		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3368
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3369
 
3370
	if (HAS_DDI(dev))
3371
		intel_set_pipe_csc(&crtc->base);
3372
 
5354 serge 3373
	/*
3374
	 * Update pipe size and adjust fitter if needed: the reason for this is
3375
	 * that in compute_mode_changes we check the native mode (not the pfit
3376
	 * mode) to see if we can flip rather than do a full mode set. In the
3377
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3378
	 * pfit state, we'll end up with a big fb scanned out into the wrong
3379
	 * sized surface.
3380
	 */
3381
 
3382
	I915_WRITE(PIPESRC(crtc->pipe),
6084 serge 3383
		   ((pipe_config->pipe_src_w - 1) << 16) |
3384
		   (pipe_config->pipe_src_h - 1));
5354 serge 3385
 
6084 serge 3386
	/* on skylake this is done by detaching scalers */
3387
	if (INTEL_INFO(dev)->gen >= 9) {
3388
		skl_detach_scalers(crtc);
2327 Serge 3389
 
6084 serge 3390
		if (pipe_config->pch_pfit.enabled)
3391
			skylake_pfit_enable(crtc);
3392
	} else if (HAS_PCH_SPLIT(dev)) {
3393
		if (pipe_config->pch_pfit.enabled)
3394
			ironlake_pfit_enable(crtc);
3395
		else if (old_crtc_state->pch_pfit.enabled)
3396
			ironlake_pfit_disable(crtc, true);
2327 Serge 3397
	}
3398
}
3399
 
3400
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3401
{
3402
	struct drm_device *dev = crtc->dev;
3403
	struct drm_i915_private *dev_priv = dev->dev_private;
3404
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3405
	int pipe = intel_crtc->pipe;
3406
	u32 reg, temp;
3407
 
3408
	/* enable normal train */
3409
	reg = FDI_TX_CTL(pipe);
3410
	temp = I915_READ(reg);
3411
	if (IS_IVYBRIDGE(dev)) {
3412
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3413
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3414
	} else {
3415
		temp &= ~FDI_LINK_TRAIN_NONE;
3416
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3417
	}
3418
	I915_WRITE(reg, temp);
3419
 
3420
	reg = FDI_RX_CTL(pipe);
3421
	temp = I915_READ(reg);
3422
	if (HAS_PCH_CPT(dev)) {
3423
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3424
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3425
	} else {
3426
		temp &= ~FDI_LINK_TRAIN_NONE;
3427
		temp |= FDI_LINK_TRAIN_NONE;
3428
	}
3429
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3430
 
3431
	/* wait one idle pattern time */
3432
	POSTING_READ(reg);
3433
	udelay(1000);
3434
 
3435
	/* IVB wants error correction enabled */
3436
	if (IS_IVYBRIDGE(dev))
3437
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3438
			   FDI_FE_ERRC_ENABLE);
3439
}
3440
 
3441
/* The FDI link training functions for ILK/Ibexpeak. */
3442
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3443
{
6084 serge 3444
	struct drm_device *dev = crtc->dev;
3445
	struct drm_i915_private *dev_priv = dev->dev_private;
3446
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3447
	int pipe = intel_crtc->pipe;
3448
	u32 reg, temp, tries;
2327 Serge 3449
 
5060 serge 3450
	/* FDI needs bits from pipe first */
6084 serge 3451
	assert_pipe_enabled(dev_priv, pipe);
2327 Serge 3452
 
6084 serge 3453
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3454
	   for train result */
3455
	reg = FDI_RX_IMR(pipe);
3456
	temp = I915_READ(reg);
3457
	temp &= ~FDI_RX_SYMBOL_LOCK;
3458
	temp &= ~FDI_RX_BIT_LOCK;
3459
	I915_WRITE(reg, temp);
3460
	I915_READ(reg);
3461
	udelay(150);
2327 Serge 3462
 
6084 serge 3463
	/* enable CPU FDI TX and PCH FDI RX */
3464
	reg = FDI_TX_CTL(pipe);
3465
	temp = I915_READ(reg);
4104 Serge 3466
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
6084 serge 3467
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3468
	temp &= ~FDI_LINK_TRAIN_NONE;
3469
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3470
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3471
 
6084 serge 3472
	reg = FDI_RX_CTL(pipe);
3473
	temp = I915_READ(reg);
3474
	temp &= ~FDI_LINK_TRAIN_NONE;
3475
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3476
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3477
 
6084 serge 3478
	POSTING_READ(reg);
3479
	udelay(150);
2327 Serge 3480
 
6084 serge 3481
	/* Ironlake workaround, enable clock pointer after FDI enable*/
3482
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3483
	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3484
		   FDI_RX_PHASE_SYNC_POINTER_EN);
2327 Serge 3485
 
6084 serge 3486
	reg = FDI_RX_IIR(pipe);
3487
	for (tries = 0; tries < 5; tries++) {
3488
		temp = I915_READ(reg);
3489
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3490
 
6084 serge 3491
		if ((temp & FDI_RX_BIT_LOCK)) {
3492
			DRM_DEBUG_KMS("FDI train 1 done.\n");
3493
			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3494
			break;
3495
		}
3496
	}
3497
	if (tries == 5)
3498
		DRM_ERROR("FDI train 1 fail!\n");
2327 Serge 3499
 
6084 serge 3500
	/* Train 2 */
3501
	reg = FDI_TX_CTL(pipe);
3502
	temp = I915_READ(reg);
3503
	temp &= ~FDI_LINK_TRAIN_NONE;
3504
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3505
	I915_WRITE(reg, temp);
2327 Serge 3506
 
6084 serge 3507
	reg = FDI_RX_CTL(pipe);
3508
	temp = I915_READ(reg);
3509
	temp &= ~FDI_LINK_TRAIN_NONE;
3510
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3511
	I915_WRITE(reg, temp);
2327 Serge 3512
 
6084 serge 3513
	POSTING_READ(reg);
3514
	udelay(150);
2327 Serge 3515
 
6084 serge 3516
	reg = FDI_RX_IIR(pipe);
3517
	for (tries = 0; tries < 5; tries++) {
3518
		temp = I915_READ(reg);
3519
		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3520
 
6084 serge 3521
		if (temp & FDI_RX_SYMBOL_LOCK) {
3522
			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3523
			DRM_DEBUG_KMS("FDI train 2 done.\n");
3524
			break;
3525
		}
3526
	}
3527
	if (tries == 5)
3528
		DRM_ERROR("FDI train 2 fail!\n");
2327 Serge 3529
 
6084 serge 3530
	DRM_DEBUG_KMS("FDI train done\n");
2327 Serge 3531
 
3532
}
3533
 
2342 Serge 3534
static const int snb_b_fdi_train_param[] = {
6084 serge 3535
	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3536
	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3537
	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3538
	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2327 Serge 3539
};
3540
 
3541
/* The FDI link training functions for SNB/Cougarpoint. */
3542
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3543
{
6084 serge 3544
	struct drm_device *dev = crtc->dev;
3545
	struct drm_i915_private *dev_priv = dev->dev_private;
3546
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3547
	int pipe = intel_crtc->pipe;
3031 serge 3548
	u32 reg, temp, i, retry;
2327 Serge 3549
 
6084 serge 3550
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3551
	   for train result */
3552
	reg = FDI_RX_IMR(pipe);
3553
	temp = I915_READ(reg);
3554
	temp &= ~FDI_RX_SYMBOL_LOCK;
3555
	temp &= ~FDI_RX_BIT_LOCK;
3556
	I915_WRITE(reg, temp);
2327 Serge 3557
 
6084 serge 3558
	POSTING_READ(reg);
3559
	udelay(150);
2327 Serge 3560
 
6084 serge 3561
	/* enable CPU FDI TX and PCH FDI RX */
3562
	reg = FDI_TX_CTL(pipe);
3563
	temp = I915_READ(reg);
4104 Serge 3564
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
6084 serge 3565
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3566
	temp &= ~FDI_LINK_TRAIN_NONE;
3567
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3568
	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3569
	/* SNB-B */
3570
	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3571
	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3572
 
3243 Serge 3573
	I915_WRITE(FDI_RX_MISC(pipe),
3574
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3575
 
6084 serge 3576
	reg = FDI_RX_CTL(pipe);
3577
	temp = I915_READ(reg);
3578
	if (HAS_PCH_CPT(dev)) {
3579
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3580
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3581
	} else {
3582
		temp &= ~FDI_LINK_TRAIN_NONE;
3583
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3584
	}
3585
	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3586
 
6084 serge 3587
	POSTING_READ(reg);
3588
	udelay(150);
2327 Serge 3589
 
2342 Serge 3590
	for (i = 0; i < 4; i++) {
6084 serge 3591
		reg = FDI_TX_CTL(pipe);
3592
		temp = I915_READ(reg);
3593
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3594
		temp |= snb_b_fdi_train_param[i];
3595
		I915_WRITE(reg, temp);
2327 Serge 3596
 
6084 serge 3597
		POSTING_READ(reg);
3598
		udelay(500);
2327 Serge 3599
 
3031 serge 3600
		for (retry = 0; retry < 5; retry++) {
6084 serge 3601
			reg = FDI_RX_IIR(pipe);
3602
			temp = I915_READ(reg);
3603
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3604
			if (temp & FDI_RX_BIT_LOCK) {
3605
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3606
				DRM_DEBUG_KMS("FDI train 1 done.\n");
3607
				break;
3608
			}
3031 serge 3609
			udelay(50);
3610
		}
3611
		if (retry < 5)
3612
			break;
6084 serge 3613
	}
3614
	if (i == 4)
3615
		DRM_ERROR("FDI train 1 fail!\n");
2327 Serge 3616
 
6084 serge 3617
	/* Train 2 */
3618
	reg = FDI_TX_CTL(pipe);
3619
	temp = I915_READ(reg);
3620
	temp &= ~FDI_LINK_TRAIN_NONE;
3621
	temp |= FDI_LINK_TRAIN_PATTERN_2;
3622
	if (IS_GEN6(dev)) {
3623
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3624
		/* SNB-B */
3625
		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3626
	}
3627
	I915_WRITE(reg, temp);
2327 Serge 3628
 
6084 serge 3629
	reg = FDI_RX_CTL(pipe);
3630
	temp = I915_READ(reg);
3631
	if (HAS_PCH_CPT(dev)) {
3632
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3633
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3634
	} else {
3635
		temp &= ~FDI_LINK_TRAIN_NONE;
3636
		temp |= FDI_LINK_TRAIN_PATTERN_2;
3637
	}
3638
	I915_WRITE(reg, temp);
2327 Serge 3639
 
6084 serge 3640
	POSTING_READ(reg);
3641
	udelay(150);
2327 Serge 3642
 
2342 Serge 3643
	for (i = 0; i < 4; i++) {
6084 serge 3644
		reg = FDI_TX_CTL(pipe);
3645
		temp = I915_READ(reg);
3646
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3647
		temp |= snb_b_fdi_train_param[i];
3648
		I915_WRITE(reg, temp);
2327 Serge 3649
 
6084 serge 3650
		POSTING_READ(reg);
3651
		udelay(500);
2327 Serge 3652
 
3031 serge 3653
		for (retry = 0; retry < 5; retry++) {
6084 serge 3654
			reg = FDI_RX_IIR(pipe);
3655
			temp = I915_READ(reg);
3656
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3657
			if (temp & FDI_RX_SYMBOL_LOCK) {
3658
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3659
				DRM_DEBUG_KMS("FDI train 2 done.\n");
3660
				break;
3661
			}
3031 serge 3662
			udelay(50);
3663
		}
3664
		if (retry < 5)
3665
			break;
6084 serge 3666
	}
3667
	if (i == 4)
3668
		DRM_ERROR("FDI train 2 fail!\n");
2327 Serge 3669
 
6084 serge 3670
	DRM_DEBUG_KMS("FDI train done.\n");
2327 Serge 3671
}
3672
 
3673
/* Manual link training for Ivy Bridge A0 parts */
3674
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3675
{
6084 serge 3676
	struct drm_device *dev = crtc->dev;
3677
	struct drm_i915_private *dev_priv = dev->dev_private;
3678
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3679
	int pipe = intel_crtc->pipe;
4104 Serge 3680
	u32 reg, temp, i, j;
2327 Serge 3681
 
6084 serge 3682
	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3683
	   for train result */
3684
	reg = FDI_RX_IMR(pipe);
3685
	temp = I915_READ(reg);
3686
	temp &= ~FDI_RX_SYMBOL_LOCK;
3687
	temp &= ~FDI_RX_BIT_LOCK;
3688
	I915_WRITE(reg, temp);
2327 Serge 3689
 
6084 serge 3690
	POSTING_READ(reg);
3691
	udelay(150);
2327 Serge 3692
 
3243 Serge 3693
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3694
		      I915_READ(FDI_RX_IIR(pipe)));
3695
 
4104 Serge 3696
	/* Try each vswing and preemphasis setting twice before moving on */
3697
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3698
		/* disable first in case we need to retry */
3699
		reg = FDI_TX_CTL(pipe);
3700
		temp = I915_READ(reg);
3701
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3702
		temp &= ~FDI_TX_ENABLE;
3703
		I915_WRITE(reg, temp);
3704
 
3705
		reg = FDI_RX_CTL(pipe);
3706
		temp = I915_READ(reg);
3707
		temp &= ~FDI_LINK_TRAIN_AUTO;
3708
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3709
		temp &= ~FDI_RX_ENABLE;
3710
		I915_WRITE(reg, temp);
3711
 
6084 serge 3712
		/* enable CPU FDI TX and PCH FDI RX */
3713
		reg = FDI_TX_CTL(pipe);
3714
		temp = I915_READ(reg);
3715
		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3716
		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3717
		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3718
		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 3719
		temp |= snb_b_fdi_train_param[j/2];
6084 serge 3720
		temp |= FDI_COMPOSITE_SYNC;
3721
		I915_WRITE(reg, temp | FDI_TX_ENABLE);
2327 Serge 3722
 
6084 serge 3723
		I915_WRITE(FDI_RX_MISC(pipe),
3724
			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3243 Serge 3725
 
6084 serge 3726
		reg = FDI_RX_CTL(pipe);
3727
		temp = I915_READ(reg);
3728
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3729
		temp |= FDI_COMPOSITE_SYNC;
3730
		I915_WRITE(reg, temp | FDI_RX_ENABLE);
2327 Serge 3731
 
6084 serge 3732
		POSTING_READ(reg);
4104 Serge 3733
		udelay(1); /* should be 0.5us */
2327 Serge 3734
 
6084 serge 3735
		for (i = 0; i < 4; i++) {
3736
			reg = FDI_RX_IIR(pipe);
3737
			temp = I915_READ(reg);
3738
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3739
 
6084 serge 3740
			if (temp & FDI_RX_BIT_LOCK ||
3741
			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3742
				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 3743
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3744
					      i);
6084 serge 3745
				break;
3746
			}
4104 Serge 3747
			udelay(1); /* should be 0.5us */
3748
		}
3749
		if (i == 4) {
3750
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3751
			continue;
6084 serge 3752
		}
2327 Serge 3753
 
6084 serge 3754
		/* Train 2 */
3755
		reg = FDI_TX_CTL(pipe);
3756
		temp = I915_READ(reg);
3757
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3758
		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3759
		I915_WRITE(reg, temp);
2327 Serge 3760
 
6084 serge 3761
		reg = FDI_RX_CTL(pipe);
3762
		temp = I915_READ(reg);
3763
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3764
		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3765
		I915_WRITE(reg, temp);
2327 Serge 3766
 
6084 serge 3767
		POSTING_READ(reg);
4104 Serge 3768
		udelay(2); /* should be 1.5us */
2327 Serge 3769
 
6084 serge 3770
		for (i = 0; i < 4; i++) {
3771
			reg = FDI_RX_IIR(pipe);
3772
			temp = I915_READ(reg);
3773
			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2327 Serge 3774
 
4104 Serge 3775
			if (temp & FDI_RX_SYMBOL_LOCK ||
3776
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
6084 serge 3777
				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 3778
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3779
					      i);
3780
				goto train_done;
6084 serge 3781
			}
4104 Serge 3782
			udelay(2); /* should be 1.5us */
6084 serge 3783
		}
3784
		if (i == 4)
4104 Serge 3785
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3786
	}
2327 Serge 3787
 
4104 Serge 3788
train_done:
6084 serge 3789
	DRM_DEBUG_KMS("FDI train done.\n");
2327 Serge 3790
}
3791
 
3031 serge 3792
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 3793
{
3031 serge 3794
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 3795
	struct drm_i915_private *dev_priv = dev->dev_private;
3796
	int pipe = intel_crtc->pipe;
3797
	u32 reg, temp;
3798
 
3799
 
3800
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3801
	reg = FDI_RX_CTL(pipe);
3802
	temp = I915_READ(reg);
4104 Serge 3803
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6084 serge 3804
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3480 Serge 3805
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3806
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3807
 
3808
	POSTING_READ(reg);
3809
	udelay(200);
3810
 
3811
	/* Switch from Rawclk to PCDclk */
3812
	temp = I915_READ(reg);
3813
	I915_WRITE(reg, temp | FDI_PCDCLK);
3814
 
3815
	POSTING_READ(reg);
3816
	udelay(200);
3817
 
3818
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3819
	reg = FDI_TX_CTL(pipe);
3820
	temp = I915_READ(reg);
3821
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3822
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3823
 
3824
		POSTING_READ(reg);
3825
		udelay(100);
3826
	}
3827
}
3828
 
3031 serge 3829
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3830
{
3831
	struct drm_device *dev = intel_crtc->base.dev;
3832
	struct drm_i915_private *dev_priv = dev->dev_private;
3833
	int pipe = intel_crtc->pipe;
3834
	u32 reg, temp;
3835
 
3836
	/* Switch from PCDclk to Rawclk */
3837
	reg = FDI_RX_CTL(pipe);
3838
	temp = I915_READ(reg);
3839
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3840
 
3841
	/* Disable CPU FDI TX PLL */
3842
	reg = FDI_TX_CTL(pipe);
3843
	temp = I915_READ(reg);
3844
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3845
 
3846
	POSTING_READ(reg);
3847
	udelay(100);
3848
 
3849
	reg = FDI_RX_CTL(pipe);
3850
	temp = I915_READ(reg);
3851
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3852
 
3853
	/* Wait for the clocks to turn off. */
3854
	POSTING_READ(reg);
3855
	udelay(100);
3856
}
3857
 
2327 Serge 3858
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3859
{
3860
	struct drm_device *dev = crtc->dev;
3861
	struct drm_i915_private *dev_priv = dev->dev_private;
3862
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3863
	int pipe = intel_crtc->pipe;
3864
	u32 reg, temp;
3865
 
3866
	/* disable CPU FDI tx and PCH FDI rx */
3867
	reg = FDI_TX_CTL(pipe);
3868
	temp = I915_READ(reg);
3869
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3870
	POSTING_READ(reg);
3871
 
3872
	reg = FDI_RX_CTL(pipe);
3873
	temp = I915_READ(reg);
3874
	temp &= ~(0x7 << 16);
3480 Serge 3875
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3876
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3877
 
3878
	POSTING_READ(reg);
3879
	udelay(100);
3880
 
3881
	/* Ironlake workaround, disable clock pointer after downing FDI */
5060 serge 3882
	if (HAS_PCH_IBX(dev))
2327 Serge 3883
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3884
 
3885
	/* still set train pattern 1 */
3886
	reg = FDI_TX_CTL(pipe);
3887
	temp = I915_READ(reg);
3888
	temp &= ~FDI_LINK_TRAIN_NONE;
3889
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3890
	I915_WRITE(reg, temp);
3891
 
3892
	reg = FDI_RX_CTL(pipe);
3893
	temp = I915_READ(reg);
3894
	if (HAS_PCH_CPT(dev)) {
3895
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3896
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3897
	} else {
3898
		temp &= ~FDI_LINK_TRAIN_NONE;
3899
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3900
	}
3901
	/* BPC in FDI rx is consistent with that in PIPECONF */
3902
	temp &= ~(0x07 << 16);
3480 Serge 3903
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3904
	I915_WRITE(reg, temp);
3905
 
3906
	POSTING_READ(reg);
3907
	udelay(100);
3908
}
3909
 
5060 serge 3910
bool intel_has_pending_fb_unpin(struct drm_device *dev)
2327 Serge 3911
{
5060 serge 3912
	struct intel_crtc *crtc;
2327 Serge 3913
 
5060 serge 3914
	/* Note that we don't need to be called with mode_config.lock here
3915
	 * as our list of CRTC objects is static for the lifetime of the
3916
	 * device and so cannot disappear as we iterate. Similarly, we can
3917
	 * happily treat the predicates as racy, atomic checks as userspace
3918
	 * cannot claim and pin a new fb without at least acquring the
3919
	 * struct_mutex and so serialising with us.
3920
	 */
3921
	for_each_intel_crtc(dev, crtc) {
3922
		if (atomic_read(&crtc->unpin_work_count) == 0)
3923
			continue;
2327 Serge 3924
 
5060 serge 3925
		if (crtc->unpin_work)
3926
			intel_wait_for_vblank(dev, crtc->pipe);
3031 serge 3927
 
5060 serge 3928
		return true;
3929
	}
3930
 
3931
	return false;
2327 Serge 3932
}
3933
 
6283 serge 3934
static void page_flip_completed(struct intel_crtc *intel_crtc)
3935
{
3936
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3937
	struct intel_unpin_work *work = intel_crtc->unpin_work;
3938
 
3939
	/* ensure that the unpin work is consistent wrt ->pending. */
3940
	smp_rmb();
3941
	intel_crtc->unpin_work = NULL;
3942
 
3943
	if (work->event)
3944
		drm_send_vblank_event(intel_crtc->base.dev,
3945
				      intel_crtc->pipe,
3946
				      work->event);
3947
 
3948
	drm_crtc_vblank_put(&intel_crtc->base);
3949
 
6320 serge 3950
	wake_up_all(&dev_priv->pending_flip_queue);
3951
	trace_i915_flip_complete(intel_crtc->plane,
3952
				 work->pending_flip_obj);
6935 serge 3953
 
3954
	queue_work(dev_priv->wq, &work->work);
6283 serge 3955
}
6320 serge 3956
 
5060 serge 3957
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2327 Serge 3958
{
3031 serge 3959
	struct drm_device *dev = crtc->dev;
3960
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 3961
 
3480 Serge 3962
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5354 serge 3963
	if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3964
				       !intel_crtc_has_pending_flip(crtc),
3965
				       60*HZ) == 0)) {
3966
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3480 Serge 3967
 
5354 serge 3968
		spin_lock_irq(&dev->event_lock);
3969
		if (intel_crtc->unpin_work) {
3970
			WARN_ONCE(1, "Removing stuck page flip\n");
3971
			page_flip_completed(intel_crtc);
3972
		}
3973
		spin_unlock_irq(&dev->event_lock);
3974
	}
3031 serge 3975
 
5354 serge 3976
	if (crtc->primary->fb) {
6084 serge 3977
		mutex_lock(&dev->struct_mutex);
3978
		intel_finish_fb(crtc->primary->fb);
3979
		mutex_unlock(&dev->struct_mutex);
5354 serge 3980
	}
2327 Serge 3981
}
3982
 
3031 serge 3983
/* Program iCLKIP clock to the desired frequency */
3984
static void lpt_program_iclkip(struct drm_crtc *crtc)
3985
{
3986
	struct drm_device *dev = crtc->dev;
3987
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3988
	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3031 serge 3989
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3990
	u32 temp;
3991
 
6084 serge 3992
	mutex_lock(&dev_priv->sb_lock);
3480 Serge 3993
 
3031 serge 3994
	/* It is necessary to ungate the pixclk gate prior to programming
3995
	 * the divisors, and gate it back when it is done.
3996
	 */
3997
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3998
 
3999
	/* Disable SSCCTL */
4000
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 4001
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
4002
				SBI_SSCCTL_DISABLE,
4003
			SBI_ICLK);
3031 serge 4004
 
4005
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 4006
	if (clock == 20000) {
3031 serge 4007
		auxdiv = 1;
4008
		divsel = 0x41;
4009
		phaseinc = 0x20;
4010
	} else {
4011
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 4012
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
4013
		 * divisors, it is necessary to divide one by another, so we
3031 serge 4014
		 * convert the virtual clock precision to KHz here for higher
4015
		 * precision.
4016
		 */
4017
		u32 iclk_virtual_root_freq = 172800 * 1000;
4018
		u32 iclk_pi_range = 64;
4019
		u32 desired_divisor, msb_divisor_value, pi_value;
4020
 
4560 Serge 4021
		desired_divisor = (iclk_virtual_root_freq / clock);
3031 serge 4022
		msb_divisor_value = desired_divisor / iclk_pi_range;
4023
		pi_value = desired_divisor % iclk_pi_range;
4024
 
4025
		auxdiv = 0;
4026
		divsel = msb_divisor_value - 2;
4027
		phaseinc = pi_value;
4028
	}
4029
 
4030
	/* This should not happen with any sane values */
4031
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4032
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4033
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4034
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4035
 
4036
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 4037
			clock,
3031 serge 4038
			auxdiv,
4039
			divsel,
4040
			phasedir,
4041
			phaseinc);
4042
 
4043
	/* Program SSCDIVINTPHASE6 */
3243 Serge 4044
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 4045
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4046
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4047
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4048
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4049
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4050
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 4051
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 4052
 
4053
	/* Program SSCAUXDIV */
3243 Serge 4054
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 4055
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4056
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 4057
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 4058
 
4059
	/* Enable modulator and associated divider */
3243 Serge 4060
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 4061
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 4062
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 4063
 
4064
	/* Wait for initialization time */
4065
	udelay(24);
4066
 
4067
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3480 Serge 4068
 
6084 serge 4069
	mutex_unlock(&dev_priv->sb_lock);
3031 serge 4070
}
4071
 
4104 Serge 4072
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4073
						enum pipe pch_transcoder)
4074
{
4075
	struct drm_device *dev = crtc->base.dev;
4076
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 4077
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4104 Serge 4078
 
4079
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4080
		   I915_READ(HTOTAL(cpu_transcoder)));
4081
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4082
		   I915_READ(HBLANK(cpu_transcoder)));
4083
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4084
		   I915_READ(HSYNC(cpu_transcoder)));
4085
 
4086
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4087
		   I915_READ(VTOTAL(cpu_transcoder)));
4088
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4089
		   I915_READ(VBLANK(cpu_transcoder)));
4090
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4091
		   I915_READ(VSYNC(cpu_transcoder)));
4092
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4093
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4094
}
4095
 
6084 serge 4096
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4280 Serge 4097
{
4098
	struct drm_i915_private *dev_priv = dev->dev_private;
4099
	uint32_t temp;
4100
 
4101
	temp = I915_READ(SOUTH_CHICKEN1);
6084 serge 4102
	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4280 Serge 4103
		return;
4104
 
4105
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4106
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4107
 
6084 serge 4108
	temp &= ~FDI_BC_BIFURCATION_SELECT;
4109
	if (enable)
4110
		temp |= FDI_BC_BIFURCATION_SELECT;
4111
 
4112
	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4280 Serge 4113
	I915_WRITE(SOUTH_CHICKEN1, temp);
4114
	POSTING_READ(SOUTH_CHICKEN1);
4115
}
4116
 
4117
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4118
{
4119
	struct drm_device *dev = intel_crtc->base.dev;
4120
 
4121
	switch (intel_crtc->pipe) {
4122
	case PIPE_A:
4123
		break;
4124
	case PIPE_B:
6084 serge 4125
		if (intel_crtc->config->fdi_lanes > 2)
4126
			cpt_set_fdi_bc_bifurcation(dev, false);
4280 Serge 4127
		else
6084 serge 4128
			cpt_set_fdi_bc_bifurcation(dev, true);
4280 Serge 4129
 
4130
		break;
4131
	case PIPE_C:
6084 serge 4132
		cpt_set_fdi_bc_bifurcation(dev, true);
4280 Serge 4133
 
4134
		break;
4135
	default:
4136
		BUG();
4137
	}
4138
}
4139
 
2327 Serge 4140
/*
4141
 * Enable PCH resources required for PCH ports:
4142
 *   - PCH PLLs
4143
 *   - FDI training & RX/TX
4144
 *   - update transcoder timings
4145
 *   - DP transcoding bits
4146
 *   - transcoder
4147
 */
4148
static void ironlake_pch_enable(struct drm_crtc *crtc)
4149
{
4150
	struct drm_device *dev = crtc->dev;
4151
	struct drm_i915_private *dev_priv = dev->dev_private;
4152
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4153
	int pipe = intel_crtc->pipe;
3031 serge 4154
	u32 reg, temp;
2327 Serge 4155
 
4104 Serge 4156
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 4157
 
4280 Serge 4158
	if (IS_IVYBRIDGE(dev))
4159
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4160
 
3243 Serge 4161
	/* Write the TU size bits before fdi link training, so that error
4162
	 * detection works. */
4163
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4164
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4165
 
2327 Serge 4166
	/* For PCH output, training FDI link */
4167
	dev_priv->display.fdi_link_train(crtc);
4168
 
4104 Serge 4169
	/* We need to program the right clock selection before writing the pixel
4170
	 * mutliplier into the DPLL. */
3243 Serge 4171
	if (HAS_PCH_CPT(dev)) {
3031 serge 4172
		u32 sel;
2342 Serge 4173
 
2327 Serge 4174
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 4175
		temp |= TRANS_DPLL_ENABLE(pipe);
4176
		sel = TRANS_DPLLB_SEL(pipe);
6084 serge 4177
		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 4178
			temp |= sel;
4179
		else
4180
			temp &= ~sel;
2327 Serge 4181
		I915_WRITE(PCH_DPLL_SEL, temp);
4182
	}
4183
 
4104 Serge 4184
	/* XXX: pch pll's can be enabled any time before we enable the PCH
4185
	 * transcoder, and we actually should do this to not upset any PCH
4186
	 * transcoder that already use the clock when we share it.
4187
	 *
4188
	 * Note that enable_shared_dpll tries to do the right thing, but
4189
	 * get_shared_dpll unconditionally resets the pll - we need that to have
4190
	 * the right LVDS enable sequence. */
5060 serge 4191
	intel_enable_shared_dpll(intel_crtc);
4104 Serge 4192
 
2327 Serge 4193
	/* set transcoder timing, panel must allow it */
4194
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 4195
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 4196
 
4197
	intel_fdi_normal_train(crtc);
4198
 
4199
	/* For PCH DP, enable TRANS_DP_CTL */
6084 serge 4200
	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
3480 Serge 4201
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2327 Serge 4202
		reg = TRANS_DP_CTL(pipe);
4203
		temp = I915_READ(reg);
4204
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4205
			  TRANS_DP_SYNC_MASK |
4206
			  TRANS_DP_BPC_MASK);
6084 serge 4207
		temp |= TRANS_DP_OUTPUT_ENABLE;
2327 Serge 4208
		temp |= bpc << 9; /* same format but at 11:9 */
4209
 
4210
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
4211
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4212
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
4213
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4214
 
4215
		switch (intel_trans_dp_port_sel(crtc)) {
4216
		case PCH_DP_B:
4217
			temp |= TRANS_DP_PORT_SEL_B;
4218
			break;
4219
		case PCH_DP_C:
4220
			temp |= TRANS_DP_PORT_SEL_C;
4221
			break;
4222
		case PCH_DP_D:
4223
			temp |= TRANS_DP_PORT_SEL_D;
4224
			break;
4225
		default:
3243 Serge 4226
			BUG();
2327 Serge 4227
		}
4228
 
4229
		I915_WRITE(reg, temp);
4230
	}
4231
 
3243 Serge 4232
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 4233
}
4234
 
3243 Serge 4235
static void lpt_pch_enable(struct drm_crtc *crtc)
4236
{
4237
	struct drm_device *dev = crtc->dev;
4238
	struct drm_i915_private *dev_priv = dev->dev_private;
4239
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 4240
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 4241
 
4104 Serge 4242
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 4243
 
4244
	lpt_program_iclkip(crtc);
4245
 
4246
	/* Set transcoder timing. */
4104 Serge 4247
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 4248
 
4249
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4250
}
4251
 
6084 serge 4252
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4253
						struct intel_crtc_state *crtc_state)
3031 serge 4254
{
4104 Serge 4255
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
5354 serge 4256
	struct intel_shared_dpll *pll;
6084 serge 4257
	struct intel_shared_dpll_config *shared_dpll;
4104 Serge 4258
	enum intel_dpll_id i;
6084 serge 4259
	int max = dev_priv->num_shared_dpll;
3031 serge 4260
 
6084 serge 4261
	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4262
 
3031 serge 4263
	if (HAS_PCH_IBX(dev_priv->dev)) {
4264
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 4265
		i = (enum intel_dpll_id) crtc->pipe;
4266
		pll = &dev_priv->shared_dplls[i];
3031 serge 4267
 
4104 Serge 4268
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4269
			      crtc->base.base.id, pll->name);
3031 serge 4270
 
6084 serge 4271
		WARN_ON(shared_dpll[i].crtc_mask);
5060 serge 4272
 
3031 serge 4273
		goto found;
4274
	}
4275
 
6084 serge 4276
	if (IS_BROXTON(dev_priv->dev)) {
4277
		/* PLL is attached to port in bxt */
4278
		struct intel_encoder *encoder;
4279
		struct intel_digital_port *intel_dig_port;
4280
 
4281
		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4282
		if (WARN_ON(!encoder))
4283
			return NULL;
4284
 
4285
		intel_dig_port = enc_to_dig_port(&encoder->base);
4286
		/* 1:1 mapping between ports and PLLs */
4287
		i = (enum intel_dpll_id)intel_dig_port->port;
4104 Serge 4288
		pll = &dev_priv->shared_dplls[i];
6084 serge 4289
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4290
			crtc->base.base.id, pll->name);
4291
		WARN_ON(shared_dpll[i].crtc_mask);
3031 serge 4292
 
6084 serge 4293
		goto found;
4294
	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4295
		/* Do not consider SPLL */
4296
		max = 2;
4297
 
4298
	for (i = 0; i < max; i++) {
4299
		pll = &dev_priv->shared_dplls[i];
4300
 
3031 serge 4301
		/* Only want to check enabled timings first */
6084 serge 4302
		if (shared_dpll[i].crtc_mask == 0)
3031 serge 4303
			continue;
4304
 
6084 serge 4305
		if (memcmp(&crtc_state->dpll_hw_state,
4306
			   &shared_dpll[i].hw_state,
4307
			   sizeof(crtc_state->dpll_hw_state)) == 0) {
5354 serge 4308
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4309
				      crtc->base.base.id, pll->name,
6084 serge 4310
				      shared_dpll[i].crtc_mask,
5354 serge 4311
				      pll->active);
3031 serge 4312
			goto found;
4313
		}
4314
	}
4315
 
4316
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 4317
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4318
		pll = &dev_priv->shared_dplls[i];
6084 serge 4319
		if (shared_dpll[i].crtc_mask == 0) {
4104 Serge 4320
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4321
				      crtc->base.base.id, pll->name);
3031 serge 4322
			goto found;
4323
		}
4324
	}
4325
 
4326
	return NULL;
4327
 
4328
found:
6084 serge 4329
	if (shared_dpll[i].crtc_mask == 0)
4330
		shared_dpll[i].hw_state =
4331
			crtc_state->dpll_hw_state;
5060 serge 4332
 
6084 serge 4333
	crtc_state->shared_dpll = i;
4104 Serge 4334
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4335
			 pipe_name(crtc->pipe));
4336
 
6084 serge 4337
	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
3031 serge 4338
 
4339
	return pll;
4340
}
4341
 
6084 serge 4342
static void intel_shared_dpll_commit(struct drm_atomic_state *state)
5354 serge 4343
{
6084 serge 4344
	struct drm_i915_private *dev_priv = to_i915(state->dev);
4345
	struct intel_shared_dpll_config *shared_dpll;
5354 serge 4346
	struct intel_shared_dpll *pll;
4347
	enum intel_dpll_id i;
4348
 
6084 serge 4349
	if (!to_intel_atomic_state(state)->dpll_set)
4350
		return;
4351
 
4352
	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
5354 serge 4353
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4354
		pll = &dev_priv->shared_dplls[i];
6084 serge 4355
		pll->config = shared_dpll[i];
4356
	}
4357
}
5354 serge 4358
 
6084 serge 4359
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4360
{
4361
	struct drm_i915_private *dev_priv = dev->dev_private;
4362
	int dslreg = PIPEDSL(pipe);
4363
	u32 temp;
5354 serge 4364
 
6084 serge 4365
	temp = I915_READ(dslreg);
4366
	udelay(500);
4367
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4368
		if (wait_for(I915_READ(dslreg) != temp, 5))
4369
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5354 serge 4370
	}
6084 serge 4371
}
5354 serge 4372
 
6084 serge 4373
static int
4374
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4375
		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4376
		  int src_w, int src_h, int dst_w, int dst_h)
4377
{
4378
	struct intel_crtc_scaler_state *scaler_state =
4379
		&crtc_state->scaler_state;
4380
	struct intel_crtc *intel_crtc =
4381
		to_intel_crtc(crtc_state->base.crtc);
4382
	int need_scaling;
5354 serge 4383
 
6084 serge 4384
	need_scaling = intel_rotation_90_or_270(rotation) ?
4385
		(src_h != dst_w || src_w != dst_h):
4386
		(src_w != dst_w || src_h != dst_h);
4387
 
4388
	/*
4389
	 * if plane is being disabled or scaler is no more required or force detach
4390
	 *  - free scaler binded to this plane/crtc
4391
	 *  - in order to do this, update crtc->scaler_usage
4392
	 *
4393
	 * Here scaler state in crtc_state is set free so that
4394
	 * scaler can be assigned to other user. Actual register
4395
	 * update to free the scaler is done in plane/panel-fit programming.
4396
	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4397
	 */
4398
	if (force_detach || !need_scaling) {
4399
		if (*scaler_id >= 0) {
4400
			scaler_state->scaler_users &= ~(1 << scaler_user);
4401
			scaler_state->scalers[*scaler_id].in_use = 0;
4402
 
4403
			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4404
				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4405
				intel_crtc->pipe, scaler_user, *scaler_id,
4406
				scaler_state->scaler_users);
4407
			*scaler_id = -1;
4408
		}
4409
		return 0;
5354 serge 4410
	}
4411
 
6084 serge 4412
	/* range checks */
4413
	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4414
		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4415
 
4416
		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4417
		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4418
		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4419
			"size is out of scaler range\n",
4420
			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4421
		return -EINVAL;
4422
	}
4423
 
4424
	/* mark this plane as a scaler user in crtc_state */
4425
	scaler_state->scaler_users |= (1 << scaler_user);
4426
	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4427
		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4428
		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4429
		scaler_state->scaler_users);
4430
 
4431
	return 0;
5354 serge 4432
}
4433
 
6084 serge 4434
/**
4435
 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4436
 *
4437
 * @state: crtc's scaler state
4438
 *
4439
 * Return
4440
 *     0 - scaler_usage updated successfully
4441
 *    error - requested scaling cannot be supported or other error condition
4442
 */
4443
int skl_update_scaler_crtc(struct intel_crtc_state *state)
5354 serge 4444
{
6084 serge 4445
	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4446
	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5354 serge 4447
 
6084 serge 4448
	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4449
		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
5354 serge 4450
 
6084 serge 4451
	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
6660 serge 4452
		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
6084 serge 4453
		state->pipe_src_w, state->pipe_src_h,
4454
		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
5354 serge 4455
}
4456
 
6084 serge 4457
/**
4458
 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4459
 *
4460
 * @state: crtc's scaler state
4461
 * @plane_state: atomic plane state to update
4462
 *
4463
 * Return
4464
 *     0 - scaler_usage updated successfully
4465
 *    error - requested scaling cannot be supported or other error condition
4466
 */
4467
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4468
				   struct intel_plane_state *plane_state)
5354 serge 4469
{
4470
 
6084 serge 4471
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4472
	struct intel_plane *intel_plane =
4473
		to_intel_plane(plane_state->base.plane);
4474
	struct drm_framebuffer *fb = plane_state->base.fb;
4475
	int ret;
5354 serge 4476
 
6084 serge 4477
	bool force_detach = !fb || !plane_state->visible;
5354 serge 4478
 
6084 serge 4479
	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4480
		      intel_plane->base.base.id, intel_crtc->pipe,
4481
		      drm_plane_index(&intel_plane->base));
4482
 
4483
	ret = skl_update_scaler(crtc_state, force_detach,
4484
				drm_plane_index(&intel_plane->base),
4485
				&plane_state->scaler_id,
4486
				plane_state->base.rotation,
4487
				drm_rect_width(&plane_state->src) >> 16,
4488
				drm_rect_height(&plane_state->src) >> 16,
4489
				drm_rect_width(&plane_state->dst),
4490
				drm_rect_height(&plane_state->dst));
4491
 
4492
	if (ret || plane_state->scaler_id < 0)
4493
		return ret;
4494
 
4495
	/* check colorkey */
4496
	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4497
		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4498
			      intel_plane->base.base.id);
4499
		return -EINVAL;
5354 serge 4500
	}
6084 serge 4501
 
4502
	/* Check src format */
4503
	switch (fb->pixel_format) {
4504
	case DRM_FORMAT_RGB565:
4505
	case DRM_FORMAT_XBGR8888:
4506
	case DRM_FORMAT_XRGB8888:
4507
	case DRM_FORMAT_ABGR8888:
4508
	case DRM_FORMAT_ARGB8888:
4509
	case DRM_FORMAT_XRGB2101010:
4510
	case DRM_FORMAT_XBGR2101010:
4511
	case DRM_FORMAT_YUYV:
4512
	case DRM_FORMAT_YVYU:
4513
	case DRM_FORMAT_UYVY:
4514
	case DRM_FORMAT_VYUY:
4515
		break;
4516
	default:
4517
		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4518
			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4519
		return -EINVAL;
4520
	}
4521
 
4522
	return 0;
5354 serge 4523
}
4524
 
6084 serge 4525
static void skylake_scaler_disable(struct intel_crtc *crtc)
2342 Serge 4526
{
6084 serge 4527
	int i;
2342 Serge 4528
 
6084 serge 4529
	for (i = 0; i < crtc->num_scalers; i++)
4530
		skl_detach_scaler(crtc, i);
2342 Serge 4531
}
4532
 
5354 serge 4533
static void skylake_pfit_enable(struct intel_crtc *crtc)
4534
{
4535
	struct drm_device *dev = crtc->base.dev;
4536
	struct drm_i915_private *dev_priv = dev->dev_private;
4537
	int pipe = crtc->pipe;
6084 serge 4538
	struct intel_crtc_scaler_state *scaler_state =
4539
		&crtc->config->scaler_state;
5354 serge 4540
 
6084 serge 4541
	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4542
 
4543
	if (crtc->config->pch_pfit.enabled) {
4544
		int id;
4545
 
4546
		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4547
			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4548
			return;
4549
		}
4550
 
4551
		id = scaler_state->scaler_id;
4552
		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4553
			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4554
		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4555
		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4556
 
4557
		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
5354 serge 4558
	}
4559
}
4560
 
4104 Serge 4561
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4562
{
4563
	struct drm_device *dev = crtc->base.dev;
4564
	struct drm_i915_private *dev_priv = dev->dev_private;
4565
	int pipe = crtc->pipe;
4566
 
6084 serge 4567
	if (crtc->config->pch_pfit.enabled) {
4104 Serge 4568
		/* Force use of hard-coded filter coefficients
4569
		 * as some pre-programmed values are broken,
4570
		 * e.g. x201.
4571
		 */
4572
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4573
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4574
						 PF_PIPE_SEL_IVB(pipe));
4575
		else
4576
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6084 serge 4577
		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4578
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4104 Serge 4579
	}
4580
}
4581
 
4560 Serge 4582
void hsw_enable_ips(struct intel_crtc *crtc)
4583
{
5060 serge 4584
	struct drm_device *dev = crtc->base.dev;
4585
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 4586
 
6084 serge 4587
	if (!crtc->config->ips_enabled)
4560 Serge 4588
		return;
4589
 
5060 serge 4590
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4591
	intel_wait_for_vblank(dev, crtc->pipe);
4592
 
4560 Serge 4593
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4594
	if (IS_BROADWELL(dev)) {
4560 Serge 4595
		mutex_lock(&dev_priv->rps.hw_lock);
4596
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4597
		mutex_unlock(&dev_priv->rps.hw_lock);
4598
		/* Quoting Art Runyan: "its not safe to expect any particular
4599
		 * value in IPS_CTL bit 31 after enabling IPS through the
4600
		 * mailbox." Moreover, the mailbox may return a bogus state,
4601
		 * so we need to just enable it and continue on.
4602
		 */
4603
	} else {
4604
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4605
		/* The bit only becomes 1 in the next vblank, so this wait here
4606
		 * is essentially intel_wait_for_vblank. If we don't have this
4607
		 * and don't wait for vblanks until the end of crtc_enable, then
4608
		 * the HW state readout code will complain that the expected
4609
		 * IPS_CTL value is not the one we read. */
4610
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4611
			DRM_ERROR("Timed out waiting for IPS enable\n");
4612
	}
4613
}
4614
 
4615
void hsw_disable_ips(struct intel_crtc *crtc)
4616
{
4617
	struct drm_device *dev = crtc->base.dev;
4618
	struct drm_i915_private *dev_priv = dev->dev_private;
4619
 
6084 serge 4620
	if (!crtc->config->ips_enabled)
4560 Serge 4621
		return;
4622
 
4623
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4624
	if (IS_BROADWELL(dev)) {
4560 Serge 4625
		mutex_lock(&dev_priv->rps.hw_lock);
4626
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4627
		mutex_unlock(&dev_priv->rps.hw_lock);
5060 serge 4628
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4629
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4630
			DRM_ERROR("Timed out waiting for IPS disable\n");
4560 Serge 4631
	} else {
4632
		I915_WRITE(IPS_CTL, 0);
4633
		POSTING_READ(IPS_CTL);
4634
	}
4635
 
4636
	/* We need to wait for a vblank before we can disable the plane. */
4637
	intel_wait_for_vblank(dev, crtc->pipe);
4638
}
4639
 
4640
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4641
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4642
{
4643
	struct drm_device *dev = crtc->dev;
4644
	struct drm_i915_private *dev_priv = dev->dev_private;
4645
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4646
	enum pipe pipe = intel_crtc->pipe;
4647
	int i;
4648
	bool reenable_ips = false;
4649
 
4650
	/* The clocks have to be on to load the palette. */
6084 serge 4651
	if (!crtc->state->active)
4560 Serge 4652
		return;
4653
 
6084 serge 4654
	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
5354 serge 4655
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4560 Serge 4656
			assert_dsi_pll_enabled(dev_priv);
4657
		else
4658
			assert_pll_enabled(dev_priv, pipe);
4659
	}
4660
 
4661
	/* Workaround : Do not read or write the pipe palette/gamma data while
4662
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4663
	 */
6084 serge 4664
	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4560 Serge 4665
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4666
	     GAMMA_MODE_MODE_SPLIT)) {
4667
		hsw_disable_ips(intel_crtc);
4668
		reenable_ips = true;
4669
	}
4670
 
4671
	for (i = 0; i < 256; i++) {
6084 serge 4672
		u32 palreg;
4673
 
4674
		if (HAS_GMCH_DISPLAY(dev))
4675
			palreg = PALETTE(pipe, i);
4676
		else
4677
			palreg = LGC_PALETTE(pipe, i);
4678
 
4679
		I915_WRITE(palreg,
4560 Serge 4680
			   (intel_crtc->lut_r[i] << 16) |
4681
			   (intel_crtc->lut_g[i] << 8) |
4682
			   intel_crtc->lut_b[i]);
4683
	}
4684
 
4685
	if (reenable_ips)
4686
		hsw_enable_ips(intel_crtc);
4687
}
4688
 
6084 serge 4689
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5060 serge 4690
{
6084 serge 4691
	if (intel_crtc->overlay) {
5060 serge 4692
		struct drm_device *dev = intel_crtc->base.dev;
4693
		struct drm_i915_private *dev_priv = dev->dev_private;
4694
 
4695
		mutex_lock(&dev->struct_mutex);
4696
		dev_priv->mm.interruptible = false;
5354 serge 4697
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
6084 serge 4698
		dev_priv->mm.interruptible = true;
5060 serge 4699
		mutex_unlock(&dev->struct_mutex);
4700
	}
4701
 
4702
	/* Let userspace switch the overlay on again. In most cases userspace
4703
	 * has to recompute where to put it anyway.
4704
	 */
4705
}
4706
 
6084 serge 4707
/**
4708
 * intel_post_enable_primary - Perform operations after enabling primary plane
4709
 * @crtc: the CRTC whose primary plane was just enabled
4710
 *
4711
 * Performs potentially sleeping operations that must be done after the primary
4712
 * plane is enabled, such as updating FBC and IPS.  Note that this may be
4713
 * called due to an explicit primary plane update, or due to an implicit
4714
 * re-enable that is caused when a sprite plane is updated to no longer
4715
 * completely hide the primary plane.
4716
 */
4717
static void
4718
intel_post_enable_primary(struct drm_crtc *crtc)
5060 serge 4719
{
4720
	struct drm_device *dev = crtc->dev;
6084 serge 4721
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4722
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4723
	int pipe = intel_crtc->pipe;
4724
 
6084 serge 4725
	/*
4726
	 * BDW signals flip done immediately if the plane
4727
	 * is disabled, even if the plane enable is already
4728
	 * armed to occur at the next vblank :(
4729
	 */
4730
	if (IS_BROADWELL(dev))
4731
		intel_wait_for_vblank(dev, pipe);
5060 serge 4732
 
6084 serge 4733
	/*
4734
	 * FIXME IPS should be fine as long as one plane is
4735
	 * enabled, but in practice it seems to have problems
4736
	 * when going from primary only to sprite only and vice
4737
	 * versa.
4738
	 */
5060 serge 4739
	hsw_enable_ips(intel_crtc);
4740
 
5354 serge 4741
	/*
6084 serge 4742
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4743
	 * So don't enable underrun reporting before at least some planes
4744
	 * are enabled.
4745
	 * FIXME: Need to fix the logic to work when we turn off all planes
4746
	 * but leave the pipe running.
5354 serge 4747
	 */
6084 serge 4748
	if (IS_GEN2(dev))
4749
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4750
 
4751
	/* Underruns don't raise interrupts, so check manually. */
4752
	if (HAS_GMCH_DISPLAY(dev))
4753
		i9xx_check_fifo_underruns(dev_priv);
5060 serge 4754
}
4755
 
6084 serge 4756
/**
4757
 * intel_pre_disable_primary - Perform operations before disabling primary plane
4758
 * @crtc: the CRTC whose primary plane is to be disabled
4759
 *
4760
 * Performs potentially sleeping operations that must be done before the
4761
 * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4762
 * be called due to an explicit primary plane update, or due to an implicit
4763
 * disable that is caused when a sprite plane completely hides the primary
4764
 * plane.
4765
 */
4766
static void
4767
intel_pre_disable_primary(struct drm_crtc *crtc)
5060 serge 4768
{
4769
	struct drm_device *dev = crtc->dev;
4770
	struct drm_i915_private *dev_priv = dev->dev_private;
4771
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4772
	int pipe = intel_crtc->pipe;
4773
 
6084 serge 4774
	/*
4775
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4776
	 * So diasble underrun reporting before all the planes get disabled.
4777
	 * FIXME: Need to fix the logic to work when we turn off all planes
4778
	 * but leave the pipe running.
4779
	 */
4780
	if (IS_GEN2(dev))
4781
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 4782
 
6084 serge 4783
	/*
4784
	 * Vblank time updates from the shadow to live plane control register
4785
	 * are blocked if the memory self-refresh mode is active at that
4786
	 * moment. So to make sure the plane gets truly disabled, disable
4787
	 * first the self-refresh mode. The self-refresh enable bit in turn
4788
	 * will be checked/applied by the HW only at the next frame start
4789
	 * event which is after the vblank start event, so we need to have a
4790
	 * wait-for-vblank between disabling the plane and the pipe.
4791
	 */
4792
	if (HAS_GMCH_DISPLAY(dev)) {
4793
		intel_set_memory_cxsr(dev_priv, false);
4794
		dev_priv->wm.vlv.cxsr = false;
4795
		intel_wait_for_vblank(dev, pipe);
4796
	}
5060 serge 4797
 
6084 serge 4798
	/*
4799
	 * FIXME IPS should be fine as long as one plane is
4800
	 * enabled, but in practice it seems to have problems
4801
	 * when going from primary only to sprite only and vice
4802
	 * versa.
4803
	 */
5060 serge 4804
	hsw_disable_ips(intel_crtc);
6084 serge 4805
}
5060 serge 4806
 
6084 serge 4807
static void intel_post_plane_update(struct intel_crtc *crtc)
4808
{
4809
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4810
	struct drm_device *dev = crtc->base.dev;
4811
	struct drm_i915_private *dev_priv = dev->dev_private;
4812
	struct drm_plane *plane;
5354 serge 4813
 
6084 serge 4814
	if (atomic->wait_vblank)
4815
		intel_wait_for_vblank(dev, crtc->pipe);
4816
 
4817
	intel_frontbuffer_flip(dev, atomic->fb_bits);
4818
 
4819
	if (atomic->disable_cxsr)
4820
		crtc->wm.cxsr_allowed = true;
4821
 
4822
	if (crtc->atomic.update_wm_post)
4823
		intel_update_watermarks(&crtc->base);
4824
 
4825
	if (atomic->update_fbc)
4826
		intel_fbc_update(dev_priv);
4827
 
4828
	if (atomic->post_enable_primary)
4829
		intel_post_enable_primary(&crtc->base);
4830
 
4831
	drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
4832
		intel_update_sprite_watermarks(plane, &crtc->base,
4833
					       0, 0, 0, false, false);
4834
 
4835
	memset(atomic, 0, sizeof(*atomic));
4836
}
4837
 
4838
static void intel_pre_plane_update(struct intel_crtc *crtc)
4839
{
4840
	struct drm_device *dev = crtc->base.dev;
4841
	struct drm_i915_private *dev_priv = dev->dev_private;
4842
	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4843
	struct drm_plane *p;
4844
 
4845
	/* Track fb's for any planes being disabled */
4846
	drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
4847
		struct intel_plane *plane = to_intel_plane(p);
4848
 
4849
		mutex_lock(&dev->struct_mutex);
4850
		i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
4851
				  plane->frontbuffer_bit);
4852
		mutex_unlock(&dev->struct_mutex);
4853
	}
4854
 
6320 serge 4855
	if (atomic->wait_for_flips)
4856
		intel_crtc_wait_for_pending_flips(&crtc->base);
4857
 
6084 serge 4858
	if (atomic->disable_fbc)
4859
		intel_fbc_disable_crtc(crtc);
4860
 
4861
	if (crtc->atomic.disable_ips)
4862
		hsw_disable_ips(crtc);
4863
 
4864
	if (atomic->pre_disable_primary)
4865
		intel_pre_disable_primary(&crtc->base);
4866
 
4867
	if (atomic->disable_cxsr) {
4868
		crtc->wm.cxsr_allowed = false;
4869
		intel_set_memory_cxsr(dev_priv, false);
4870
	}
4871
}
4872
 
4873
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4874
{
4875
	struct drm_device *dev = crtc->dev;
4876
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4877
	struct drm_plane *p;
4878
	int pipe = intel_crtc->pipe;
4879
 
4880
	intel_crtc_dpms_overlay_disable(intel_crtc);
4881
 
4882
	drm_for_each_plane_mask(p, dev, plane_mask)
4883
		to_intel_plane(p)->disable_plane(p, crtc);
4884
 
5354 serge 4885
	/*
4886
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4887
	 * to compute the mask of flip planes precisely. For the time being
4888
	 * consider this a flip to a NULL plane.
4889
	 */
6320 serge 4890
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4891
}
4892
 
2327 Serge 4893
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4894
{
6084 serge 4895
	struct drm_device *dev = crtc->dev;
4896
	struct drm_i915_private *dev_priv = dev->dev_private;
4897
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4898
	struct intel_encoder *encoder;
6084 serge 4899
	int pipe = intel_crtc->pipe;
2327 Serge 4900
 
6084 serge 4901
	if (WARN_ON(intel_crtc->active))
4902
		return;
3031 serge 4903
 
6084 serge 4904
	if (intel_crtc->config->has_pch_encoder)
5060 serge 4905
		intel_prepare_shared_dpll(intel_crtc);
4906
 
6084 serge 4907
	if (intel_crtc->config->has_dp_encoder)
4908
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 4909
 
4910
	intel_set_pipe_timings(intel_crtc);
4911
 
6084 serge 4912
	if (intel_crtc->config->has_pch_encoder) {
5060 serge 4913
		intel_cpu_transcoder_set_m_n(intel_crtc,
6084 serge 4914
				     &intel_crtc->config->fdi_m_n, NULL);
5060 serge 4915
	}
4916
 
4917
	ironlake_set_pipeconf(crtc);
4918
 
6084 serge 4919
	intel_crtc->active = true;
4104 Serge 4920
 
5354 serge 4921
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4922
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4104 Serge 4923
 
4924
	for_each_encoder_on_crtc(dev, crtc, encoder)
4925
		if (encoder->pre_enable)
4926
			encoder->pre_enable(encoder);
2327 Serge 4927
 
6084 serge 4928
	if (intel_crtc->config->has_pch_encoder) {
3243 Serge 4929
		/* Note: FDI PLL enabling _must_ be done before we enable the
4930
		 * cpu pipes, hence this is separate from all the other fdi/pch
4931
		 * enabling. */
3031 serge 4932
		ironlake_fdi_pll_enable(intel_crtc);
4933
	} else {
4934
		assert_fdi_tx_disabled(dev_priv, pipe);
4935
		assert_fdi_rx_disabled(dev_priv, pipe);
4936
	}
2327 Serge 4937
 
4104 Serge 4938
	ironlake_pfit_enable(intel_crtc);
3031 serge 4939
 
6084 serge 4940
	/*
4941
	 * On ILK+ LUT must be loaded before the pipe is running but with
4942
	 * clocks enabled
4943
	 */
4944
	intel_crtc_load_lut(crtc);
2327 Serge 4945
 
4560 Serge 4946
	intel_update_watermarks(crtc);
5060 serge 4947
	intel_enable_pipe(intel_crtc);
2327 Serge 4948
 
6084 serge 4949
	if (intel_crtc->config->has_pch_encoder)
4950
		ironlake_pch_enable(crtc);
2327 Serge 4951
 
6084 serge 4952
	assert_vblank_disabled(crtc);
4953
	drm_crtc_vblank_on(crtc);
4954
 
3031 serge 4955
	for_each_encoder_on_crtc(dev, crtc, encoder)
4956
		encoder->enable(encoder);
4957
 
4958
	if (HAS_PCH_CPT(dev))
4104 Serge 4959
		cpt_verify_modeset(dev, intel_crtc->pipe);
2327 Serge 4960
}
4961
 
4104 Serge 4962
/* IPS only exists on ULT machines and is tied to pipe A. */
4963
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4964
{
4965
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4966
}
4967
 
3243 Serge 4968
static void haswell_crtc_enable(struct drm_crtc *crtc)
4969
{
4970
	struct drm_device *dev = crtc->dev;
4971
	struct drm_i915_private *dev_priv = dev->dev_private;
4972
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4973
	struct intel_encoder *encoder;
6084 serge 4974
	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4975
	struct intel_crtc_state *pipe_config =
4976
		to_intel_crtc_state(crtc->state);
4977
	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
3243 Serge 4978
 
6084 serge 4979
	if (WARN_ON(intel_crtc->active))
3243 Serge 4980
		return;
4981
 
5060 serge 4982
	if (intel_crtc_to_shared_dpll(intel_crtc))
4983
		intel_enable_shared_dpll(intel_crtc);
4984
 
6084 serge 4985
	if (intel_crtc->config->has_dp_encoder)
4986
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 4987
 
4988
	intel_set_pipe_timings(intel_crtc);
4989
 
6084 serge 4990
	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4991
		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4992
			   intel_crtc->config->pixel_multiplier - 1);
5354 serge 4993
	}
4994
 
6084 serge 4995
	if (intel_crtc->config->has_pch_encoder) {
5060 serge 4996
		intel_cpu_transcoder_set_m_n(intel_crtc,
6084 serge 4997
				     &intel_crtc->config->fdi_m_n, NULL);
5060 serge 4998
	}
4999
 
5000
	haswell_set_pipeconf(crtc);
5001
 
5002
	intel_set_pipe_csc(crtc);
5003
 
3243 Serge 5004
	intel_crtc->active = true;
4104 Serge 5005
 
5354 serge 5006
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6084 serge 5007
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5008
		if (encoder->pre_pll_enable)
5009
			encoder->pre_pll_enable(encoder);
3243 Serge 5010
		if (encoder->pre_enable)
5011
			encoder->pre_enable(encoder);
6084 serge 5012
	}
3243 Serge 5013
 
6084 serge 5014
	if (intel_crtc->config->has_pch_encoder) {
5354 serge 5015
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5016
						      true);
5060 serge 5017
		dev_priv->display.fdi_link_train(crtc);
5018
	}
5019
 
6084 serge 5020
	if (!is_dsi)
5021
		intel_ddi_enable_pipe_clock(intel_crtc);
3243 Serge 5022
 
6084 serge 5023
	if (INTEL_INFO(dev)->gen >= 9)
5354 serge 5024
		skylake_pfit_enable(intel_crtc);
5025
	else
6084 serge 5026
		ironlake_pfit_enable(intel_crtc);
3243 Serge 5027
 
5028
	/*
5029
	 * On ILK+ LUT must be loaded before the pipe is running but with
5030
	 * clocks enabled
5031
	 */
5032
	intel_crtc_load_lut(crtc);
5033
 
5034
	intel_ddi_set_pipe_settings(crtc);
6084 serge 5035
	if (!is_dsi)
5036
		intel_ddi_enable_transcoder_func(crtc);
3243 Serge 5037
 
4560 Serge 5038
	intel_update_watermarks(crtc);
5060 serge 5039
	intel_enable_pipe(intel_crtc);
3243 Serge 5040
 
6084 serge 5041
	if (intel_crtc->config->has_pch_encoder)
3243 Serge 5042
		lpt_pch_enable(crtc);
5043
 
6084 serge 5044
	if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
5060 serge 5045
		intel_ddi_set_vc_payload_alloc(crtc, true);
5046
 
6084 serge 5047
	assert_vblank_disabled(crtc);
5048
	drm_crtc_vblank_on(crtc);
5049
 
4560 Serge 5050
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 5051
		encoder->enable(encoder);
4560 Serge 5052
		intel_opregion_notify_encoder(encoder, true);
5053
	}
3243 Serge 5054
 
4560 Serge 5055
	/* If we change the relative order between pipe/planes enabling, we need
5056
	 * to change the workaround. */
6084 serge 5057
	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5058
	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5059
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5060
		intel_wait_for_vblank(dev, hsw_workaround_pipe);
5354 serge 5061
	}
5062
}
5063
 
6084 serge 5064
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4104 Serge 5065
{
5066
	struct drm_device *dev = crtc->base.dev;
5067
	struct drm_i915_private *dev_priv = dev->dev_private;
5068
	int pipe = crtc->pipe;
5069
 
5070
	/* To avoid upsetting the power well on haswell only disable the pfit if
5071
	 * it's in use. The hw state code will make sure we get this right. */
6084 serge 5072
	if (force || crtc->config->pch_pfit.enabled) {
4104 Serge 5073
		I915_WRITE(PF_CTL(pipe), 0);
5074
		I915_WRITE(PF_WIN_POS(pipe), 0);
5075
		I915_WRITE(PF_WIN_SZ(pipe), 0);
5076
	}
5077
}
5078
 
2327 Serge 5079
static void ironlake_crtc_disable(struct drm_crtc *crtc)
5080
{
6084 serge 5081
	struct drm_device *dev = crtc->dev;
5082
	struct drm_i915_private *dev_priv = dev->dev_private;
5083
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5084
	struct intel_encoder *encoder;
6084 serge 5085
	int pipe = intel_crtc->pipe;
5086
	u32 reg, temp;
2327 Serge 5087
 
6084 serge 5088
	for_each_encoder_on_crtc(dev, crtc, encoder)
5089
		encoder->disable(encoder);
2327 Serge 5090
 
5354 serge 5091
	drm_crtc_vblank_off(crtc);
5092
	assert_vblank_disabled(crtc);
5093
 
6084 serge 5094
	if (intel_crtc->config->has_pch_encoder)
5354 serge 5095
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 5096
 
5354 serge 5097
	intel_disable_pipe(intel_crtc);
5098
 
6084 serge 5099
	ironlake_pfit_disable(intel_crtc, false);
2327 Serge 5100
 
6084 serge 5101
	if (intel_crtc->config->has_pch_encoder)
5102
		ironlake_fdi_disable(crtc);
5103
 
3031 serge 5104
	for_each_encoder_on_crtc(dev, crtc, encoder)
5105
		if (encoder->post_disable)
5106
			encoder->post_disable(encoder);
5107
 
6084 serge 5108
	if (intel_crtc->config->has_pch_encoder) {
5109
		ironlake_disable_pch_transcoder(dev_priv, pipe);
2327 Serge 5110
 
6084 serge 5111
		if (HAS_PCH_CPT(dev)) {
5112
			/* disable TRANS_DP_CTL */
5113
			reg = TRANS_DP_CTL(pipe);
5114
			temp = I915_READ(reg);
4104 Serge 5115
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5116
				  TRANS_DP_PORT_SEL_MASK);
6084 serge 5117
			temp |= TRANS_DP_PORT_SEL_NONE;
5118
			I915_WRITE(reg, temp);
2327 Serge 5119
 
6084 serge 5120
			/* disable DPLL_SEL */
5121
			temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 5122
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6084 serge 5123
			I915_WRITE(PCH_DPLL_SEL, temp);
5124
		}
2327 Serge 5125
 
6084 serge 5126
		ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 5127
	}
2327 Serge 5128
}
5129
 
3243 Serge 5130
static void haswell_crtc_disable(struct drm_crtc *crtc)
5131
{
5132
	struct drm_device *dev = crtc->dev;
5133
	struct drm_i915_private *dev_priv = dev->dev_private;
5134
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5135
	struct intel_encoder *encoder;
6084 serge 5136
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5137
	bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
3243 Serge 5138
 
4560 Serge 5139
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5140
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 5141
		encoder->disable(encoder);
4560 Serge 5142
	}
3243 Serge 5143
 
6084 serge 5144
	drm_crtc_vblank_off(crtc);
5145
	assert_vblank_disabled(crtc);
5146
 
5147
	if (intel_crtc->config->has_pch_encoder)
5354 serge 5148
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5149
						      false);
5150
	intel_disable_pipe(intel_crtc);
3243 Serge 5151
 
6084 serge 5152
	if (intel_crtc->config->dp_encoder_is_mst)
5097 serge 5153
		intel_ddi_set_vc_payload_alloc(crtc, false);
5154
 
6084 serge 5155
	if (!is_dsi)
5156
		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3243 Serge 5157
 
6084 serge 5158
	if (INTEL_INFO(dev)->gen >= 9)
5159
		skylake_scaler_disable(intel_crtc);
5354 serge 5160
	else
6084 serge 5161
		ironlake_pfit_disable(intel_crtc, false);
3243 Serge 5162
 
6084 serge 5163
	if (!is_dsi)
5164
		intel_ddi_disable_pipe_clock(intel_crtc);
3243 Serge 5165
 
6084 serge 5166
	if (intel_crtc->config->has_pch_encoder) {
3243 Serge 5167
		lpt_disable_pch_transcoder(dev_priv);
5168
		intel_ddi_fdi_disable(crtc);
5169
	}
5170
 
5060 serge 5171
	for_each_encoder_on_crtc(dev, crtc, encoder)
5172
		if (encoder->post_disable)
5173
			encoder->post_disable(encoder);
3243 Serge 5174
}
5175
 
4104 Serge 5176
static void i9xx_pfit_enable(struct intel_crtc *crtc)
5177
{
5178
	struct drm_device *dev = crtc->base.dev;
5179
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 5180
	struct intel_crtc_state *pipe_config = crtc->config;
4104 Serge 5181
 
6084 serge 5182
	if (!pipe_config->gmch_pfit.control)
4104 Serge 5183
		return;
5184
 
5185
	/*
5186
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5187
	 * according to register description and PRM.
5188
	 */
5189
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5190
	assert_pipe_disabled(dev_priv, crtc->pipe);
5191
 
5192
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5193
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5194
 
5195
	/* Border color in case we don't scale up to the full screen. Black by
5196
	 * default, change to something else for debugging. */
5197
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5198
}
5199
 
5060 serge 5200
static enum intel_display_power_domain port_to_power_domain(enum port port)
4560 Serge 5201
{
5060 serge 5202
	switch (port) {
5203
	case PORT_A:
5204
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
5205
	case PORT_B:
5206
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
5207
	case PORT_C:
5208
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
5209
	case PORT_D:
5210
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
6084 serge 5211
	case PORT_E:
5212
		return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5060 serge 5213
	default:
6084 serge 5214
		MISSING_CASE(port);
5060 serge 5215
		return POWER_DOMAIN_PORT_OTHER;
5216
	}
5217
}
5218
 
6084 serge 5219
static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5220
{
5221
	switch (port) {
5222
	case PORT_A:
5223
		return POWER_DOMAIN_AUX_A;
5224
	case PORT_B:
5225
		return POWER_DOMAIN_AUX_B;
5226
	case PORT_C:
5227
		return POWER_DOMAIN_AUX_C;
5228
	case PORT_D:
5229
		return POWER_DOMAIN_AUX_D;
5230
	case PORT_E:
5231
		/* FIXME: Check VBT for actual wiring of PORT E */
5232
		return POWER_DOMAIN_AUX_D;
5233
	default:
5234
		MISSING_CASE(port);
5235
		return POWER_DOMAIN_AUX_A;
5236
	}
5237
}
5238
 
5060 serge 5239
#define for_each_power_domain(domain, mask)				\
5240
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
5241
		if ((1 << (domain)) & (mask))
5242
 
5243
enum intel_display_power_domain
5244
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5245
{
5246
	struct drm_device *dev = intel_encoder->base.dev;
5247
	struct intel_digital_port *intel_dig_port;
5248
 
5249
	switch (intel_encoder->type) {
5250
	case INTEL_OUTPUT_UNKNOWN:
5251
		/* Only DDI platforms should ever use this output type */
5252
		WARN_ON_ONCE(!HAS_DDI(dev));
5253
	case INTEL_OUTPUT_DISPLAYPORT:
5254
	case INTEL_OUTPUT_HDMI:
5255
	case INTEL_OUTPUT_EDP:
5256
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5257
		return port_to_power_domain(intel_dig_port->port);
5258
	case INTEL_OUTPUT_DP_MST:
5259
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5260
		return port_to_power_domain(intel_dig_port->port);
5261
	case INTEL_OUTPUT_ANALOG:
5262
		return POWER_DOMAIN_PORT_CRT;
5263
	case INTEL_OUTPUT_DSI:
5264
		return POWER_DOMAIN_PORT_DSI;
5265
	default:
5266
		return POWER_DOMAIN_PORT_OTHER;
5267
	}
5268
}
5269
 
6084 serge 5270
enum intel_display_power_domain
5271
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5272
{
5273
	struct drm_device *dev = intel_encoder->base.dev;
5274
	struct intel_digital_port *intel_dig_port;
5275
 
5276
	switch (intel_encoder->type) {
5277
	case INTEL_OUTPUT_UNKNOWN:
5278
	case INTEL_OUTPUT_HDMI:
5279
		/*
5280
		 * Only DDI platforms should ever use these output types.
5281
		 * We can get here after the HDMI detect code has already set
5282
		 * the type of the shared encoder. Since we can't be sure
5283
		 * what's the status of the given connectors, play safe and
5284
		 * run the DP detection too.
5285
		 */
5286
		WARN_ON_ONCE(!HAS_DDI(dev));
5287
	case INTEL_OUTPUT_DISPLAYPORT:
5288
	case INTEL_OUTPUT_EDP:
5289
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5290
		return port_to_aux_power_domain(intel_dig_port->port);
5291
	case INTEL_OUTPUT_DP_MST:
5292
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5293
		return port_to_aux_power_domain(intel_dig_port->port);
5294
	default:
5295
		MISSING_CASE(intel_encoder->type);
5296
		return POWER_DOMAIN_AUX_A;
5297
	}
5298
}
5299
 
5060 serge 5300
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5301
{
5302
	struct drm_device *dev = crtc->dev;
5303
	struct intel_encoder *intel_encoder;
5304
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5305
	enum pipe pipe = intel_crtc->pipe;
5306
	unsigned long mask;
5307
	enum transcoder transcoder;
5308
 
6084 serge 5309
	if (!crtc->state->active)
5310
		return 0;
5311
 
5060 serge 5312
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
5313
 
5314
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5315
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6084 serge 5316
	if (intel_crtc->config->pch_pfit.enabled ||
5317
	    intel_crtc->config->pch_pfit.force_thru)
5060 serge 5318
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5319
 
5320
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5321
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5322
 
5323
	return mask;
5324
}
5325
 
6084 serge 5326
static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5060 serge 5327
{
6084 serge 5328
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5329
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5330
	enum intel_display_power_domain domain;
5331
	unsigned long domains, new_domains, old_domains;
5060 serge 5332
 
6084 serge 5333
	old_domains = intel_crtc->enabled_power_domains;
5334
	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5060 serge 5335
 
6084 serge 5336
	domains = new_domains & ~old_domains;
5060 serge 5337
 
6084 serge 5338
	for_each_power_domain(domain, domains)
5339
		intel_display_power_get(dev_priv, domain);
5060 serge 5340
 
6084 serge 5341
	return old_domains & ~new_domains;
5342
}
5060 serge 5343
 
6084 serge 5344
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5345
				      unsigned long domains)
5346
{
5347
	enum intel_display_power_domain domain;
5354 serge 5348
 
6084 serge 5349
	for_each_power_domain(domain, domains)
5350
		intel_display_power_put(dev_priv, domain);
5351
}
5060 serge 5352
 
6084 serge 5353
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5354
{
5355
	struct drm_device *dev = state->dev;
5356
	struct drm_i915_private *dev_priv = dev->dev_private;
5357
	unsigned long put_domains[I915_MAX_PIPES] = {};
5358
	struct drm_crtc_state *crtc_state;
5359
	struct drm_crtc *crtc;
5360
	int i;
5060 serge 5361
 
6084 serge 5362
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
5363
		if (needs_modeset(crtc->state))
5364
			put_domains[to_intel_crtc(crtc)->pipe] =
5365
				modeset_get_crtc_power_domains(crtc);
5060 serge 5366
	}
5367
 
6084 serge 5368
	if (dev_priv->display.modeset_commit_cdclk) {
5369
		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5370
 
5371
		if (cdclk != dev_priv->cdclk_freq &&
5372
		    !WARN_ON(!state->allow_modeset))
5373
			dev_priv->display.modeset_commit_cdclk(state);
5374
	}
5375
 
5376
	for (i = 0; i < I915_MAX_PIPES; i++)
5377
		if (put_domains[i])
5378
			modeset_put_power_domains(dev_priv, put_domains[i]);
5060 serge 5379
}
5380
 
6084 serge 5381
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5060 serge 5382
{
6084 serge 5383
	int max_cdclk_freq = dev_priv->max_cdclk_freq;
4560 Serge 5384
 
6084 serge 5385
	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5386
	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5387
		return max_cdclk_freq;
5388
	else if (IS_CHERRYVIEW(dev_priv))
5389
		return max_cdclk_freq*95/100;
5390
	else if (INTEL_INFO(dev_priv)->gen < 4)
5391
		return 2*max_cdclk_freq*90/100;
5392
	else
5393
		return max_cdclk_freq*90/100;
5394
}
4560 Serge 5395
 
6084 serge 5396
static void intel_update_max_cdclk(struct drm_device *dev)
5397
{
5398
	struct drm_i915_private *dev_priv = dev->dev_private;
5399
 
5400
	if (IS_SKYLAKE(dev)) {
5401
		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5402
 
5403
		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5404
			dev_priv->max_cdclk_freq = 675000;
5405
		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5406
			dev_priv->max_cdclk_freq = 540000;
5407
		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5408
			dev_priv->max_cdclk_freq = 450000;
5409
		else
5410
			dev_priv->max_cdclk_freq = 337500;
5411
	} else if (IS_BROADWELL(dev))  {
5412
		/*
5413
		 * FIXME with extra cooling we can allow
5414
		 * 540 MHz for ULX and 675 Mhz for ULT.
5415
		 * How can we know if extra cooling is
5416
		 * available? PCI ID, VTB, something else?
5417
		 */
5418
		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5419
			dev_priv->max_cdclk_freq = 450000;
5420
		else if (IS_BDW_ULX(dev))
5421
			dev_priv->max_cdclk_freq = 450000;
5422
		else if (IS_BDW_ULT(dev))
5423
			dev_priv->max_cdclk_freq = 540000;
5424
		else
5425
			dev_priv->max_cdclk_freq = 675000;
5426
	} else if (IS_CHERRYVIEW(dev)) {
5427
		dev_priv->max_cdclk_freq = 320000;
5428
	} else if (IS_VALLEYVIEW(dev)) {
5429
		dev_priv->max_cdclk_freq = 400000;
5430
	} else {
5431
		/* otherwise assume cdclk is fixed */
5432
		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5433
	}
5434
 
5435
	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5436
 
5437
	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5438
			 dev_priv->max_cdclk_freq);
5439
 
5440
	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5441
			 dev_priv->max_dotclk_freq);
4560 Serge 5442
}
5443
 
6084 serge 5444
static void intel_update_cdclk(struct drm_device *dev)
5060 serge 5445
{
5446
	struct drm_i915_private *dev_priv = dev->dev_private;
5447
 
6084 serge 5448
	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5354 serge 5449
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
6084 serge 5450
			 dev_priv->cdclk_freq);
5060 serge 5451
 
5452
	/*
5453
	 * Program the gmbus_freq based on the cdclk frequency.
5454
	 * BSpec erroneously claims we should aim for 4MHz, but
5455
	 * in fact 1MHz is the correct frequency.
5456
	 */
6084 serge 5457
	if (IS_VALLEYVIEW(dev)) {
5458
		/*
5459
		 * Program the gmbus_freq based on the cdclk frequency.
5460
		 * BSpec erroneously claims we should aim for 4MHz, but
5461
		 * in fact 1MHz is the correct frequency.
5462
		 */
5463
		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5464
	}
5465
 
5466
	if (dev_priv->max_cdclk_freq == 0)
5467
		intel_update_max_cdclk(dev);
5060 serge 5468
}
5469
 
6084 serge 5470
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5471
{
5472
	struct drm_i915_private *dev_priv = dev->dev_private;
5473
	uint32_t divider;
5474
	uint32_t ratio;
5475
	uint32_t current_freq;
5476
	int ret;
5477
 
5478
	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5479
	switch (frequency) {
5480
	case 144000:
5481
		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5482
		ratio = BXT_DE_PLL_RATIO(60);
5483
		break;
5484
	case 288000:
5485
		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5486
		ratio = BXT_DE_PLL_RATIO(60);
5487
		break;
5488
	case 384000:
5489
		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5490
		ratio = BXT_DE_PLL_RATIO(60);
5491
		break;
5492
	case 576000:
5493
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5494
		ratio = BXT_DE_PLL_RATIO(60);
5495
		break;
5496
	case 624000:
5497
		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5498
		ratio = BXT_DE_PLL_RATIO(65);
5499
		break;
5500
	case 19200:
5501
		/*
5502
		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5503
		 * to suppress GCC warning.
5504
		 */
5505
		ratio = 0;
5506
		divider = 0;
5507
		break;
5508
	default:
5509
		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5510
 
5511
		return;
5512
	}
5513
 
5514
	mutex_lock(&dev_priv->rps.hw_lock);
5515
	/* Inform power controller of upcoming frequency change */
5516
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5517
				      0x80000000);
5518
	mutex_unlock(&dev_priv->rps.hw_lock);
5519
 
5520
	if (ret) {
5521
		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5522
			  ret, frequency);
5523
		return;
5524
	}
5525
 
5526
	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5527
	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5528
	current_freq = current_freq * 500 + 1000;
5529
 
5530
	/*
5531
	 * DE PLL has to be disabled when
5532
	 * - setting to 19.2MHz (bypass, PLL isn't used)
5533
	 * - before setting to 624MHz (PLL needs toggling)
5534
	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5535
	 */
5536
	if (frequency == 19200 || frequency == 624000 ||
5537
	    current_freq == 624000) {
5538
		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5539
		/* Timeout 200us */
5540
		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5541
			     1))
5542
			DRM_ERROR("timout waiting for DE PLL unlock\n");
5543
	}
5544
 
5545
	if (frequency != 19200) {
5546
		uint32_t val;
5547
 
5548
		val = I915_READ(BXT_DE_PLL_CTL);
5549
		val &= ~BXT_DE_PLL_RATIO_MASK;
5550
		val |= ratio;
5551
		I915_WRITE(BXT_DE_PLL_CTL, val);
5552
 
5553
		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5554
		/* Timeout 200us */
5555
		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5556
			DRM_ERROR("timeout waiting for DE PLL lock\n");
5557
 
5558
		val = I915_READ(CDCLK_CTL);
5559
		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5560
		val |= divider;
5561
		/*
5562
		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5563
		 * enable otherwise.
5564
		 */
5565
		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5566
		if (frequency >= 500000)
5567
			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5568
 
5569
		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5570
		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5571
		val |= (frequency - 1000) / 500;
5572
		I915_WRITE(CDCLK_CTL, val);
5573
	}
5574
 
5575
	mutex_lock(&dev_priv->rps.hw_lock);
5576
	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5577
				      DIV_ROUND_UP(frequency, 25000));
5578
	mutex_unlock(&dev_priv->rps.hw_lock);
5579
 
5580
	if (ret) {
5581
		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5582
			  ret, frequency);
5583
		return;
5584
	}
5585
 
5586
	intel_update_cdclk(dev);
5587
}
5588
 
5589
void broxton_init_cdclk(struct drm_device *dev)
5590
{
5591
	struct drm_i915_private *dev_priv = dev->dev_private;
5592
	uint32_t val;
5593
 
5594
	/*
5595
	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5596
	 * or else the reset will hang because there is no PCH to respond.
5597
	 * Move the handshake programming to initialization sequence.
5598
	 * Previously was left up to BIOS.
5599
	 */
5600
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5601
	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5602
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5603
 
5604
	/* Enable PG1 for cdclk */
5605
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5606
 
5607
	/* check if cd clock is enabled */
5608
	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5609
		DRM_DEBUG_KMS("Display already initialized\n");
5610
		return;
5611
	}
5612
 
5613
	/*
5614
	 * FIXME:
5615
	 * - The initial CDCLK needs to be read from VBT.
5616
	 *   Need to make this change after VBT has changes for BXT.
5617
	 * - check if setting the max (or any) cdclk freq is really necessary
5618
	 *   here, it belongs to modeset time
5619
	 */
5620
	broxton_set_cdclk(dev, 624000);
5621
 
5622
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5623
	POSTING_READ(DBUF_CTL);
5624
 
5625
	udelay(10);
5626
 
5627
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5628
		DRM_ERROR("DBuf power enable timeout!\n");
5629
}
5630
 
5631
void broxton_uninit_cdclk(struct drm_device *dev)
5632
{
5633
	struct drm_i915_private *dev_priv = dev->dev_private;
5634
 
5635
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5636
	POSTING_READ(DBUF_CTL);
5637
 
5638
	udelay(10);
5639
 
5640
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5641
		DRM_ERROR("DBuf power disable timeout!\n");
5642
 
5643
	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5644
	broxton_set_cdclk(dev, 19200);
5645
 
5646
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5647
}
5648
 
5649
static const struct skl_cdclk_entry {
5650
	unsigned int freq;
5651
	unsigned int vco;
5652
} skl_cdclk_frequencies[] = {
5653
	{ .freq = 308570, .vco = 8640 },
5654
	{ .freq = 337500, .vco = 8100 },
5655
	{ .freq = 432000, .vco = 8640 },
5656
	{ .freq = 450000, .vco = 8100 },
5657
	{ .freq = 540000, .vco = 8100 },
5658
	{ .freq = 617140, .vco = 8640 },
5659
	{ .freq = 675000, .vco = 8100 },
5660
};
5661
 
5662
static unsigned int skl_cdclk_decimal(unsigned int freq)
5663
{
5664
	return (freq - 1000) / 500;
5665
}
5666
 
5667
static unsigned int skl_cdclk_get_vco(unsigned int freq)
5668
{
5669
	unsigned int i;
5670
 
5671
	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5672
		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5673
 
5674
		if (e->freq == freq)
5675
			return e->vco;
5676
	}
5677
 
5678
	return 8100;
5679
}
5680
 
5681
static void
5682
skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5683
{
5684
	unsigned int min_freq;
5685
	u32 val;
5686
 
5687
	/* select the minimum CDCLK before enabling DPLL 0 */
5688
	val = I915_READ(CDCLK_CTL);
5689
	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5690
	val |= CDCLK_FREQ_337_308;
5691
 
5692
	if (required_vco == 8640)
5693
		min_freq = 308570;
5694
	else
5695
		min_freq = 337500;
5696
 
5697
	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5698
 
5699
	I915_WRITE(CDCLK_CTL, val);
5700
	POSTING_READ(CDCLK_CTL);
5701
 
5702
	/*
5703
	 * We always enable DPLL0 with the lowest link rate possible, but still
5704
	 * taking into account the VCO required to operate the eDP panel at the
5705
	 * desired frequency. The usual DP link rates operate with a VCO of
5706
	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5707
	 * The modeset code is responsible for the selection of the exact link
5708
	 * rate later on, with the constraint of choosing a frequency that
5709
	 * works with required_vco.
5710
	 */
5711
	val = I915_READ(DPLL_CTRL1);
5712
 
5713
	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5714
		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5715
	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5716
	if (required_vco == 8640)
5717
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5718
					    SKL_DPLL0);
5719
	else
5720
		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5721
					    SKL_DPLL0);
5722
 
5723
	I915_WRITE(DPLL_CTRL1, val);
5724
	POSTING_READ(DPLL_CTRL1);
5725
 
5726
	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5727
 
5728
	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5729
		DRM_ERROR("DPLL0 not locked\n");
5730
}
5731
 
5732
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5733
{
5734
	int ret;
5735
	u32 val;
5736
 
5737
	/* inform PCU we want to change CDCLK */
5738
	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5739
	mutex_lock(&dev_priv->rps.hw_lock);
5740
	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5741
	mutex_unlock(&dev_priv->rps.hw_lock);
5742
 
5743
	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5744
}
5745
 
5746
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5747
{
5748
	unsigned int i;
5749
 
5750
	for (i = 0; i < 15; i++) {
5751
		if (skl_cdclk_pcu_ready(dev_priv))
5752
			return true;
5753
		udelay(10);
5754
	}
5755
 
5756
	return false;
5757
}
5758
 
5759
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5760
{
5761
	struct drm_device *dev = dev_priv->dev;
5762
	u32 freq_select, pcu_ack;
5763
 
5764
	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5765
 
5766
	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5767
		DRM_ERROR("failed to inform PCU about cdclk change\n");
5768
		return;
5769
	}
5770
 
5771
	/* set CDCLK_CTL */
5772
	switch(freq) {
5773
	case 450000:
5774
	case 432000:
5775
		freq_select = CDCLK_FREQ_450_432;
5776
		pcu_ack = 1;
5777
		break;
5778
	case 540000:
5779
		freq_select = CDCLK_FREQ_540;
5780
		pcu_ack = 2;
5781
		break;
5782
	case 308570:
5783
	case 337500:
5784
	default:
5785
		freq_select = CDCLK_FREQ_337_308;
5786
		pcu_ack = 0;
5787
		break;
5788
	case 617140:
5789
	case 675000:
5790
		freq_select = CDCLK_FREQ_675_617;
5791
		pcu_ack = 3;
5792
		break;
5793
	}
5794
 
5795
	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5796
	POSTING_READ(CDCLK_CTL);
5797
 
5798
	/* inform PCU of the change */
5799
	mutex_lock(&dev_priv->rps.hw_lock);
5800
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5801
	mutex_unlock(&dev_priv->rps.hw_lock);
5802
 
5803
	intel_update_cdclk(dev);
5804
}
5805
 
5806
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5807
{
5808
	/* disable DBUF power */
5809
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5810
	POSTING_READ(DBUF_CTL);
5811
 
5812
	udelay(10);
5813
 
5814
	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5815
		DRM_ERROR("DBuf power disable timeout\n");
5816
 
5817
	/*
5818
	 * DMC assumes ownership of LCPLL and will get confused if we touch it.
5819
	 */
5820
	if (dev_priv->csr.dmc_payload) {
5821
		/* disable DPLL0 */
5822
		I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5823
					~LCPLL_PLL_ENABLE);
5824
		if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5825
			DRM_ERROR("Couldn't disable DPLL0\n");
5826
	}
5827
 
5828
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5829
}
5830
 
5831
void skl_init_cdclk(struct drm_i915_private *dev_priv)
5832
{
5833
	u32 val;
5834
	unsigned int required_vco;
5835
 
5836
	/* enable PCH reset handshake */
5837
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
5838
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
5839
 
5840
	/* enable PG1 and Misc I/O */
5841
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5842
 
5843
	/* DPLL0 not enabled (happens on early BIOS versions) */
5844
	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5845
		/* enable DPLL0 */
5846
		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5847
		skl_dpll0_enable(dev_priv, required_vco);
5848
	}
5849
 
5850
	/* set CDCLK to the frequency the BIOS chose */
5851
	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5852
 
5853
	/* enable DBUF power */
5854
	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5855
	POSTING_READ(DBUF_CTL);
5856
 
5857
	udelay(10);
5858
 
5859
	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5860
		DRM_ERROR("DBuf power enable timeout\n");
5861
}
5862
 
4560 Serge 5863
/* Adjust CDclk dividers to allow high res or save power if possible */
5864
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5865
{
5866
	struct drm_i915_private *dev_priv = dev->dev_private;
5867
	u32 val, cmd;
5868
 
6084 serge 5869
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5870
					!= dev_priv->cdclk_freq);
5060 serge 5871
 
5872
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4560 Serge 5873
		cmd = 2;
5060 serge 5874
	else if (cdclk == 266667)
4560 Serge 5875
		cmd = 1;
5876
	else
5877
		cmd = 0;
5878
 
5879
	mutex_lock(&dev_priv->rps.hw_lock);
5880
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5881
	val &= ~DSPFREQGUAR_MASK;
5882
	val |= (cmd << DSPFREQGUAR_SHIFT);
5883
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5884
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5885
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5886
		     50)) {
5887
		DRM_ERROR("timed out waiting for CDclk change\n");
5888
	}
5889
	mutex_unlock(&dev_priv->rps.hw_lock);
5890
 
6084 serge 5891
	mutex_lock(&dev_priv->sb_lock);
5892
 
5060 serge 5893
	if (cdclk == 400000) {
5354 serge 5894
		u32 divider;
4560 Serge 5895
 
5354 serge 5896
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4560 Serge 5897
 
5898
		/* adjust cdclk divider */
5899
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
6084 serge 5900
		val &= ~CCK_FREQUENCY_VALUES;
4560 Serge 5901
		val |= divider;
5902
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5060 serge 5903
 
5904
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
6084 serge 5905
			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5060 serge 5906
			     50))
5907
			DRM_ERROR("timed out waiting for CDclk change\n");
4560 Serge 5908
	}
5909
 
5910
	/* adjust self-refresh exit latency value */
5911
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5912
	val &= ~0x7f;
5913
 
5914
	/*
5915
	 * For high bandwidth configs, we set a higher latency in the bunit
5916
	 * so that the core display fetch happens in time to avoid underruns.
5917
	 */
5060 serge 5918
	if (cdclk == 400000)
4560 Serge 5919
		val |= 4500 / 250; /* 4.5 usec */
5920
	else
5921
		val |= 3000 / 250; /* 3.0 usec */
5922
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5923
 
6084 serge 5924
	mutex_unlock(&dev_priv->sb_lock);
5925
 
5926
	intel_update_cdclk(dev);
4560 Serge 5927
}
5928
 
5354 serge 5929
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5930
{
5931
	struct drm_i915_private *dev_priv = dev->dev_private;
5932
	u32 val, cmd;
5933
 
6084 serge 5934
	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5935
						!= dev_priv->cdclk_freq);
5354 serge 5936
 
5937
	switch (cdclk) {
5938
	case 333333:
5939
	case 320000:
5940
	case 266667:
5941
	case 200000:
5942
		break;
5943
	default:
6084 serge 5944
		MISSING_CASE(cdclk);
5354 serge 5945
		return;
5946
	}
5947
 
6084 serge 5948
	/*
5949
	 * Specs are full of misinformation, but testing on actual
5950
	 * hardware has shown that we just need to write the desired
5951
	 * CCK divider into the Punit register.
5952
	 */
5953
	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5954
 
5354 serge 5955
	mutex_lock(&dev_priv->rps.hw_lock);
5956
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5957
	val &= ~DSPFREQGUAR_MASK_CHV;
5958
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5959
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5960
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5961
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5962
		     50)) {
5963
		DRM_ERROR("timed out waiting for CDclk change\n");
5964
	}
5965
	mutex_unlock(&dev_priv->rps.hw_lock);
5966
 
6084 serge 5967
	intel_update_cdclk(dev);
5354 serge 5968
}
5969
 
4560 Serge 5970
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5971
				 int max_pixclk)
5972
{
5354 serge 5973
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
6084 serge 5974
	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
4560 Serge 5975
 
5976
	/*
5977
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5978
	 *   200MHz
5979
	 *   267MHz
5060 serge 5980
	 *   320/333MHz (depends on HPLL freq)
6084 serge 5981
	 *   400MHz (VLV only)
5982
	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5983
	 * of the lower bin and adjust if needed.
5060 serge 5984
	 *
5985
	 * We seem to get an unstable or solid color picture at 200MHz.
5986
	 * Not sure what's wrong. For now use 200MHz only when all pipes
5987
	 * are off.
4560 Serge 5988
	 */
6084 serge 5989
	if (!IS_CHERRYVIEW(dev_priv) &&
5990
	    max_pixclk > freq_320*limit/100)
5060 serge 5991
		return 400000;
6084 serge 5992
	else if (max_pixclk > 266667*limit/100)
5060 serge 5993
		return freq_320;
5994
	else if (max_pixclk > 0)
5995
		return 266667;
5996
	else
5997
		return 200000;
4560 Serge 5998
}
5999
 
6084 serge 6000
static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6001
			      int max_pixclk)
4560 Serge 6002
{
6084 serge 6003
	/*
6004
	 * FIXME:
6005
	 * - remove the guardband, it's not needed on BXT
6006
	 * - set 19.2MHz bypass frequency if there are no active pipes
6007
	 */
6008
	if (max_pixclk > 576000*9/10)
6009
		return 624000;
6010
	else if (max_pixclk > 384000*9/10)
6011
		return 576000;
6012
	else if (max_pixclk > 288000*9/10)
6013
		return 384000;
6014
	else if (max_pixclk > 144000*9/10)
6015
		return 288000;
6016
	else
6017
		return 144000;
6018
}
6019
 
6020
/* Compute the max pixel clock for new configuration. Uses atomic state if
6021
 * that's non-NULL, look at current state otherwise. */
6022
static int intel_mode_max_pixclk(struct drm_device *dev,
6023
				 struct drm_atomic_state *state)
6024
{
4560 Serge 6025
	struct intel_crtc *intel_crtc;
6084 serge 6026
	struct intel_crtc_state *crtc_state;
4560 Serge 6027
	int max_pixclk = 0;
6028
 
5060 serge 6029
	for_each_intel_crtc(dev, intel_crtc) {
6084 serge 6030
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6031
		if (IS_ERR(crtc_state))
6032
			return PTR_ERR(crtc_state);
6033
 
6034
		if (!crtc_state->base.enable)
6035
			continue;
6036
 
6037
		max_pixclk = max(max_pixclk,
6038
				 crtc_state->base.adjusted_mode.crtc_clock);
4560 Serge 6039
	}
6040
 
6041
	return max_pixclk;
6042
}
6043
 
6084 serge 6044
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
4560 Serge 6045
{
6084 serge 6046
	struct drm_device *dev = state->dev;
4560 Serge 6047
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6048
	int max_pixclk = intel_mode_max_pixclk(dev, state);
4560 Serge 6049
 
6084 serge 6050
	if (max_pixclk < 0)
6051
		return max_pixclk;
4560 Serge 6052
 
6084 serge 6053
	to_intel_atomic_state(state)->cdclk =
6054
		valleyview_calc_cdclk(dev_priv, max_pixclk);
6055
 
6056
	return 0;
4560 Serge 6057
}
6058
 
6084 serge 6059
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
4560 Serge 6060
{
6084 serge 6061
	struct drm_device *dev = state->dev;
4560 Serge 6062
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6063
	int max_pixclk = intel_mode_max_pixclk(dev, state);
4560 Serge 6064
 
6084 serge 6065
	if (max_pixclk < 0)
6066
		return max_pixclk;
5354 serge 6067
 
6084 serge 6068
	to_intel_atomic_state(state)->cdclk =
6069
		broxton_calc_cdclk(dev_priv, max_pixclk);
6070
 
6071
	return 0;
6072
}
6073
 
6074
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6075
{
6076
	unsigned int credits, default_credits;
6077
 
6078
	if (IS_CHERRYVIEW(dev_priv))
6079
		default_credits = PFI_CREDIT(12);
6080
	else
6081
		default_credits = PFI_CREDIT(8);
6082
 
6083
	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6084
		/* CHV suggested value is 31 or 63 */
6085
		if (IS_CHERRYVIEW(dev_priv))
6086
			credits = PFI_CREDIT_63;
5354 serge 6087
		else
6084 serge 6088
			credits = PFI_CREDIT(15);
6089
	} else {
6090
		credits = default_credits;
6091
	}
6092
 
6093
	/*
6094
	 * WA - write default credits before re-programming
6095
	 * FIXME: should we also set the resend bit here?
6096
	 */
6097
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6098
		   default_credits);
6099
 
6100
	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6101
		   credits | PFI_CREDIT_RESEND);
6102
 
6103
	/*
6104
	 * FIXME is this guaranteed to clear
6105
	 * immediately or should we poll for it?
6106
	 */
6107
	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6108
}
6109
 
6110
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6111
{
6112
	struct drm_device *dev = old_state->dev;
6113
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
6114
	struct drm_i915_private *dev_priv = dev->dev_private;
6115
 
6116
	/*
6117
	 * FIXME: We can end up here with all power domains off, yet
6118
	 * with a CDCLK frequency other than the minimum. To account
6119
	 * for this take the PIPE-A power domain, which covers the HW
6120
	 * blocks needed for the following programming. This can be
6121
	 * removed once it's guaranteed that we get here either with
6122
	 * the minimum CDCLK set, or the required power domains
6123
	 * enabled.
6124
	 */
6125
	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6126
 
6127
	if (IS_CHERRYVIEW(dev))
6128
		cherryview_set_cdclk(dev, req_cdclk);
6129
	else
4560 Serge 6130
		valleyview_set_cdclk(dev, req_cdclk);
5354 serge 6131
 
6084 serge 6132
	vlv_program_pfi_credits(dev_priv);
6133
 
6134
	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4560 Serge 6135
}
6136
 
4104 Serge 6137
static void valleyview_crtc_enable(struct drm_crtc *crtc)
6138
{
6139
	struct drm_device *dev = crtc->dev;
5354 serge 6140
	struct drm_i915_private *dev_priv = to_i915(dev);
4104 Serge 6141
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6142
	struct intel_encoder *encoder;
6143
	int pipe = intel_crtc->pipe;
4560 Serge 6144
	bool is_dsi;
4104 Serge 6145
 
6084 serge 6146
	if (WARN_ON(intel_crtc->active))
4104 Serge 6147
		return;
6148
 
5354 serge 6149
	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5060 serge 6150
 
6084 serge 6151
	if (intel_crtc->config->has_dp_encoder)
6152
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 6153
 
6154
	intel_set_pipe_timings(intel_crtc);
6155
 
5354 serge 6156
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6157
		struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 6158
 
5354 serge 6159
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6160
		I915_WRITE(CHV_CANVAS(pipe), 0);
6161
	}
6162
 
5060 serge 6163
	i9xx_set_pipeconf(intel_crtc);
6164
 
4104 Serge 6165
	intel_crtc->active = true;
6166
 
5354 serge 6167
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 6168
 
4104 Serge 6169
	for_each_encoder_on_crtc(dev, crtc, encoder)
6170
		if (encoder->pre_pll_enable)
6171
			encoder->pre_pll_enable(encoder);
6172
 
5060 serge 6173
	if (!is_dsi) {
6084 serge 6174
		if (IS_CHERRYVIEW(dev)) {
6175
			chv_prepare_pll(intel_crtc, intel_crtc->config);
6176
			chv_enable_pll(intel_crtc, intel_crtc->config);
6177
		} else {
6178
			vlv_prepare_pll(intel_crtc, intel_crtc->config);
6179
			vlv_enable_pll(intel_crtc, intel_crtc->config);
6180
		}
5060 serge 6181
	}
4104 Serge 6182
 
6183
	for_each_encoder_on_crtc(dev, crtc, encoder)
6184
		if (encoder->pre_enable)
6185
			encoder->pre_enable(encoder);
6186
 
6187
	i9xx_pfit_enable(intel_crtc);
6188
 
6189
	intel_crtc_load_lut(crtc);
6190
 
5060 serge 6191
	intel_enable_pipe(intel_crtc);
4104 Serge 6192
 
5354 serge 6193
	assert_vblank_disabled(crtc);
6194
	drm_crtc_vblank_on(crtc);
6195
 
6084 serge 6196
	for_each_encoder_on_crtc(dev, crtc, encoder)
6197
		encoder->enable(encoder);
4104 Serge 6198
}
6199
 
5060 serge 6200
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6201
{
6202
	struct drm_device *dev = crtc->base.dev;
6203
	struct drm_i915_private *dev_priv = dev->dev_private;
6204
 
6084 serge 6205
	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6206
	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
5060 serge 6207
}
6208
 
2327 Serge 6209
static void i9xx_crtc_enable(struct drm_crtc *crtc)
6210
{
6084 serge 6211
	struct drm_device *dev = crtc->dev;
5354 serge 6212
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 6213
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6214
	struct intel_encoder *encoder;
6084 serge 6215
	int pipe = intel_crtc->pipe;
2327 Serge 6216
 
6084 serge 6217
	if (WARN_ON(intel_crtc->active))
6218
		return;
3031 serge 6219
 
5060 serge 6220
	i9xx_set_pll_dividers(intel_crtc);
6221
 
6084 serge 6222
	if (intel_crtc->config->has_dp_encoder)
6223
		intel_dp_set_m_n(intel_crtc, M1_N1);
5060 serge 6224
 
6225
	intel_set_pipe_timings(intel_crtc);
6226
 
6227
	i9xx_set_pipeconf(intel_crtc);
6228
 
6084 serge 6229
	intel_crtc->active = true;
2327 Serge 6230
 
5060 serge 6231
	if (!IS_GEN2(dev))
5354 serge 6232
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 6233
 
3480 Serge 6234
	for_each_encoder_on_crtc(dev, crtc, encoder)
6235
		if (encoder->pre_enable)
6236
			encoder->pre_enable(encoder);
6237
 
4104 Serge 6238
	i9xx_enable_pll(intel_crtc);
6239
 
6240
	i9xx_pfit_enable(intel_crtc);
6241
 
6242
	intel_crtc_load_lut(crtc);
6243
 
4560 Serge 6244
	intel_update_watermarks(crtc);
5060 serge 6245
	intel_enable_pipe(intel_crtc);
2327 Serge 6246
 
5354 serge 6247
	assert_vblank_disabled(crtc);
6248
	drm_crtc_vblank_on(crtc);
6249
 
6084 serge 6250
	for_each_encoder_on_crtc(dev, crtc, encoder)
6251
		encoder->enable(encoder);
2327 Serge 6252
}
6253
 
3746 Serge 6254
static void i9xx_pfit_disable(struct intel_crtc *crtc)
6255
{
6256
	struct drm_device *dev = crtc->base.dev;
6257
	struct drm_i915_private *dev_priv = dev->dev_private;
6258
 
6084 serge 6259
	if (!crtc->config->gmch_pfit.control)
4104 Serge 6260
		return;
6261
 
3746 Serge 6262
	assert_pipe_disabled(dev_priv, crtc->pipe);
6263
 
4104 Serge 6264
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6265
			 I915_READ(PFIT_CONTROL));
6084 serge 6266
	I915_WRITE(PFIT_CONTROL, 0);
3746 Serge 6267
}
6268
 
2327 Serge 6269
static void i9xx_crtc_disable(struct drm_crtc *crtc)
6270
{
6084 serge 6271
	struct drm_device *dev = crtc->dev;
6272
	struct drm_i915_private *dev_priv = dev->dev_private;
6273
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6274
	struct intel_encoder *encoder;
6084 serge 6275
	int pipe = intel_crtc->pipe;
2327 Serge 6276
 
5060 serge 6277
	/*
6278
	 * On gen2 planes are double buffered but the pipe isn't, so we must
6279
	 * wait for planes to fully turn off before disabling the pipe.
6280
	 * We also need to wait on all gmch platforms because of the
6281
	 * self-refresh mode constraint explained above.
6282
	 */
6084 serge 6283
	intel_wait_for_vblank(dev, pipe);
2327 Serge 6284
 
6084 serge 6285
	for_each_encoder_on_crtc(dev, crtc, encoder)
6286
		encoder->disable(encoder);
6287
 
5354 serge 6288
	drm_crtc_vblank_off(crtc);
6289
	assert_vblank_disabled(crtc);
3480 Serge 6290
 
5354 serge 6291
	intel_disable_pipe(intel_crtc);
6292
 
3746 Serge 6293
	i9xx_pfit_disable(intel_crtc);
3480 Serge 6294
 
4104 Serge 6295
	for_each_encoder_on_crtc(dev, crtc, encoder)
6296
		if (encoder->post_disable)
6297
			encoder->post_disable(encoder);
2327 Serge 6298
 
5354 serge 6299
	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
5060 serge 6300
		if (IS_CHERRYVIEW(dev))
6301
			chv_disable_pll(dev_priv, pipe);
6302
		else if (IS_VALLEYVIEW(dev))
6084 serge 6303
			vlv_disable_pll(dev_priv, pipe);
5060 serge 6304
		else
5354 serge 6305
			i9xx_disable_pll(intel_crtc);
5060 serge 6306
	}
4104 Serge 6307
 
6084 serge 6308
	for_each_encoder_on_crtc(dev, crtc, encoder)
6309
		if (encoder->post_pll_disable)
6310
			encoder->post_pll_disable(encoder);
6311
 
5060 serge 6312
	if (!IS_GEN2(dev))
5354 serge 6313
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 6314
}
6315
 
6084 serge 6316
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
2327 Serge 6317
{
5060 serge 6318
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 6319
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5060 serge 6320
	enum intel_display_power_domain domain;
6321
	unsigned long domains;
6322
 
6084 serge 6323
	if (!intel_crtc->active)
6324
		return;
5060 serge 6325
 
6084 serge 6326
	if (to_intel_plane_state(crtc->primary->state)->visible) {
6320 serge 6327
		intel_crtc_wait_for_pending_flips(crtc);
6084 serge 6328
		intel_pre_disable_primary(crtc);
5060 serge 6329
 
6084 serge 6330
		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6331
		to_intel_plane_state(crtc->primary->state)->visible = false;
5060 serge 6332
	}
6084 serge 6333
 
6334
	dev_priv->display.crtc_disable(crtc);
6335
	intel_crtc->active = false;
6336
	intel_update_watermarks(crtc);
6337
	intel_disable_shared_dpll(intel_crtc);
6338
 
6339
	domains = intel_crtc->enabled_power_domains;
6340
	for_each_power_domain(domain, domains)
6341
		intel_display_power_put(dev_priv, domain);
6342
	intel_crtc->enabled_power_domains = 0;
2330 Serge 6343
}
2327 Serge 6344
 
6084 serge 6345
/*
6346
 * turn all crtc's off, but do not adjust state
6347
 * This has to be paired with a call to intel_modeset_setup_hw_state.
3031 serge 6348
 */
6084 serge 6349
int intel_display_suspend(struct drm_device *dev)
3031 serge 6350
{
6084 serge 6351
	struct drm_mode_config *config = &dev->mode_config;
6352
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6353
	struct drm_atomic_state *state;
6354
	struct drm_crtc *crtc;
6355
	unsigned crtc_mask = 0;
6356
	int ret = 0;
3031 serge 6357
 
6084 serge 6358
	if (WARN_ON(!ctx))
6359
		return 0;
3031 serge 6360
 
6084 serge 6361
	lockdep_assert_held(&ctx->ww_ctx);
6362
	state = drm_atomic_state_alloc(dev);
6363
	if (WARN_ON(!state))
6364
		return -ENOMEM;
3031 serge 6365
 
6084 serge 6366
	state->acquire_ctx = ctx;
6367
	state->allow_modeset = true;
2327 Serge 6368
 
6084 serge 6369
	for_each_crtc(dev, crtc) {
6370
		struct drm_crtc_state *crtc_state =
6371
			drm_atomic_get_crtc_state(state, crtc);
2327 Serge 6372
 
6084 serge 6373
		ret = PTR_ERR_OR_ZERO(crtc_state);
6374
		if (ret)
6375
			goto free;
3031 serge 6376
 
6084 serge 6377
		if (!crtc_state->active)
6378
			continue;
6379
 
6380
		crtc_state->active = false;
6381
		crtc_mask |= 1 << drm_crtc_index(crtc);
4280 Serge 6382
	}
3031 serge 6383
 
6084 serge 6384
	if (crtc_mask) {
6385
		ret = drm_atomic_commit(state);
3031 serge 6386
 
6084 serge 6387
		if (!ret) {
6388
			for_each_crtc(dev, crtc)
6389
				if (crtc_mask & (1 << drm_crtc_index(crtc)))
6390
					crtc->state->active = true;
3031 serge 6391
 
6084 serge 6392
			return ret;
6393
		}
2330 Serge 6394
	}
6084 serge 6395
 
6396
free:
6397
	if (ret)
6398
		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6399
	drm_atomic_state_free(state);
6400
	return ret;
2330 Serge 6401
}
2327 Serge 6402
 
3031 serge 6403
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 6404
{
3031 serge 6405
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6406
 
6407
	drm_encoder_cleanup(encoder);
6408
	kfree(intel_encoder);
2330 Serge 6409
}
2327 Serge 6410
 
3031 serge 6411
/* Cross check the actual hw state with our own modeset state tracking (and it's
6412
 * internal consistency). */
6413
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 6414
{
6084 serge 6415
	struct drm_crtc *crtc = connector->base.state->crtc;
6416
 
6417
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6418
		      connector->base.base.id,
6419
		      connector->base.name);
6420
 
3031 serge 6421
	if (connector->get_hw_state(connector)) {
6422
		struct intel_encoder *encoder = connector->encoder;
6084 serge 6423
		struct drm_connector_state *conn_state = connector->base.state;
3031 serge 6424
 
6084 serge 6425
		I915_STATE_WARN(!crtc,
6426
			 "connector enabled without attached crtc\n");
3031 serge 6427
 
6084 serge 6428
		if (!crtc)
5060 serge 6429
			return;
6430
 
6084 serge 6431
		I915_STATE_WARN(!crtc->state->active,
6432
		      "connector is active, but attached crtc isn't\n");
5060 serge 6433
 
6084 serge 6434
		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3031 serge 6435
			return;
6436
 
6084 serge 6437
		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6438
			"atomic encoder doesn't match attached encoder\n");
3031 serge 6439
 
6084 serge 6440
		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6441
			"attached encoder crtc differs from connector crtc\n");
6442
	} else {
6443
		I915_STATE_WARN(crtc && crtc->state->active,
6444
			"attached crtc is active, but connector isn't\n");
6445
		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6446
			"best encoder set without crtc!\n");
3031 serge 6447
	}
2330 Serge 6448
}
2327 Serge 6449
 
6084 serge 6450
int intel_connector_init(struct intel_connector *connector)
2330 Serge 6451
{
6084 serge 6452
	struct drm_connector_state *connector_state;
2342 Serge 6453
 
6084 serge 6454
	connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
6455
	if (!connector_state)
6456
		return -ENOMEM;
3031 serge 6457
 
6084 serge 6458
	connector->base.state = connector_state;
6459
	return 0;
6460
}
3031 serge 6461
 
6084 serge 6462
struct intel_connector *intel_connector_alloc(void)
6463
{
6464
	struct intel_connector *connector;
3031 serge 6465
 
6084 serge 6466
	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6467
	if (!connector)
6468
		return NULL;
6469
 
6470
	if (intel_connector_init(connector) < 0) {
6471
		kfree(connector);
6472
		return NULL;
6473
	}
6474
 
6475
	return connector;
2330 Serge 6476
}
2327 Serge 6477
 
3031 serge 6478
/* Simple connector->get_hw_state implementation for encoders that support only
6479
 * one connector and no cloning and hence the encoder state determines the state
6480
 * of the connector. */
6481
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 6482
{
3031 serge 6483
	enum pipe pipe = 0;
6484
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 6485
 
3031 serge 6486
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 6487
}
6488
 
6084 serge 6489
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
4104 Serge 6490
{
6084 serge 6491
	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6492
		return crtc_state->fdi_lanes;
4104 Serge 6493
 
6084 serge 6494
	return 0;
6495
}
6496
 
6497
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6498
				     struct intel_crtc_state *pipe_config)
6499
{
6500
	struct drm_atomic_state *state = pipe_config->base.state;
6501
	struct intel_crtc *other_crtc;
6502
	struct intel_crtc_state *other_crtc_state;
6503
 
4104 Serge 6504
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6505
		      pipe_name(pipe), pipe_config->fdi_lanes);
6506
	if (pipe_config->fdi_lanes > 4) {
6507
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6508
			      pipe_name(pipe), pipe_config->fdi_lanes);
6084 serge 6509
		return -EINVAL;
4104 Serge 6510
	}
6511
 
4560 Serge 6512
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 6513
		if (pipe_config->fdi_lanes > 2) {
6514
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6515
				      pipe_config->fdi_lanes);
6084 serge 6516
			return -EINVAL;
4104 Serge 6517
		} else {
6084 serge 6518
			return 0;
4104 Serge 6519
		}
6520
	}
6521
 
6522
	if (INTEL_INFO(dev)->num_pipes == 2)
6084 serge 6523
		return 0;
4104 Serge 6524
 
6525
	/* Ivybridge 3 pipe is really complicated */
6526
	switch (pipe) {
6527
	case PIPE_A:
6084 serge 6528
		return 0;
4104 Serge 6529
	case PIPE_B:
6084 serge 6530
		if (pipe_config->fdi_lanes <= 2)
6531
			return 0;
6532
 
6533
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6534
		other_crtc_state =
6535
			intel_atomic_get_crtc_state(state, other_crtc);
6536
		if (IS_ERR(other_crtc_state))
6537
			return PTR_ERR(other_crtc_state);
6538
 
6539
		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
4104 Serge 6540
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6541
				      pipe_name(pipe), pipe_config->fdi_lanes);
6084 serge 6542
			return -EINVAL;
4104 Serge 6543
		}
6084 serge 6544
		return 0;
4104 Serge 6545
	case PIPE_C:
6084 serge 6546
		if (pipe_config->fdi_lanes > 2) {
6547
			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6548
				      pipe_name(pipe), pipe_config->fdi_lanes);
6549
			return -EINVAL;
6550
		}
6551
 
6552
		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6553
		other_crtc_state =
6554
			intel_atomic_get_crtc_state(state, other_crtc);
6555
		if (IS_ERR(other_crtc_state))
6556
			return PTR_ERR(other_crtc_state);
6557
 
6558
		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
4104 Serge 6559
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6084 serge 6560
			return -EINVAL;
4104 Serge 6561
		}
6084 serge 6562
		return 0;
4104 Serge 6563
	default:
6564
		BUG();
6565
	}
6566
}
6567
 
6568
#define RETRY 1
6569
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6084 serge 6570
				       struct intel_crtc_state *pipe_config)
2330 Serge 6571
{
4104 Serge 6572
	struct drm_device *dev = intel_crtc->base.dev;
6084 serge 6573
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6574
	int lane, link_bw, fdi_dotclock, ret;
6575
	bool needs_recompute = false;
2330 Serge 6576
 
4104 Serge 6577
retry:
6578
	/* FDI is a binary signal running at ~2.7GHz, encoding
6579
	 * each output octet as 10 bits. The actual frequency
6580
	 * is stored as a divider into a 100MHz clock, and the
6581
	 * mode pixel clock is stored in units of 1KHz.
6582
	 * Hence the bw of each lane in terms of the mode signal
6583
	 * is:
6584
	 */
6585
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6586
 
4560 Serge 6587
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 6588
 
6589
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6590
					   pipe_config->pipe_bpp);
6591
 
6592
	pipe_config->fdi_lanes = lane;
6593
 
6594
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6595
			       link_bw, &pipe_config->fdi_m_n);
6596
 
6084 serge 6597
	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6598
				       intel_crtc->pipe, pipe_config);
6599
	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
4104 Serge 6600
		pipe_config->pipe_bpp -= 2*3;
6601
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6602
			      pipe_config->pipe_bpp);
6603
		needs_recompute = true;
6604
		pipe_config->bw_constrained = true;
6605
 
6606
		goto retry;
6607
	}
6608
 
6609
	if (needs_recompute)
6610
		return RETRY;
6611
 
6084 serge 6612
	return ret;
4104 Serge 6613
}
6614
 
6084 serge 6615
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6616
				     struct intel_crtc_state *pipe_config)
6617
{
6618
	if (pipe_config->pipe_bpp > 24)
6619
		return false;
6620
 
6621
	/* HSW can handle pixel rate up to cdclk? */
6622
	if (IS_HASWELL(dev_priv->dev))
6623
		return true;
6624
 
6625
	/*
6626
	 * We compare against max which means we must take
6627
	 * the increased cdclk requirement into account when
6628
	 * calculating the new cdclk.
6629
	 *
6630
	 * Should measure whether using a lower cdclk w/o IPS
6631
	 */
6632
	return ilk_pipe_pixel_rate(pipe_config) <=
6633
		dev_priv->max_cdclk_freq * 95 / 100;
6634
}
6635
 
4104 Serge 6636
static void hsw_compute_ips_config(struct intel_crtc *crtc,
6084 serge 6637
				   struct intel_crtc_state *pipe_config)
4104 Serge 6638
{
6084 serge 6639
	struct drm_device *dev = crtc->base.dev;
6640
	struct drm_i915_private *dev_priv = dev->dev_private;
6641
 
5060 serge 6642
	pipe_config->ips_enabled = i915.enable_ips &&
6084 serge 6643
		hsw_crtc_supports_ips(crtc) &&
6644
		pipe_config_supports_ips(dev_priv, pipe_config);
4104 Serge 6645
}
6646
 
6647
static int intel_crtc_compute_config(struct intel_crtc *crtc,
6084 serge 6648
				     struct intel_crtc_state *pipe_config)
4104 Serge 6649
{
6650
	struct drm_device *dev = crtc->base.dev;
5354 serge 6651
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 6652
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
4104 Serge 6653
 
4560 Serge 6654
	/* FIXME should check pixel clock limits on all platforms */
6655
	if (INTEL_INFO(dev)->gen < 4) {
6084 serge 6656
		int clock_limit = dev_priv->max_cdclk_freq;
4560 Serge 6657
 
6658
		/*
6659
		 * Enable pixel doubling when the dot clock
6660
		 * is > 90% of the (display) core speed.
6661
		 *
6662
		 * GDG double wide on either pipe,
6663
		 * otherwise pipe A only.
6664
		 */
6665
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
6666
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
6667
			clock_limit *= 2;
6668
			pipe_config->double_wide = true;
6669
		}
6670
 
6671
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4104 Serge 6672
			return -EINVAL;
2330 Serge 6673
	}
6674
 
4560 Serge 6675
	/*
6676
	 * Pipe horizontal size must be even in:
6677
	 * - DVO ganged mode
6678
	 * - LVDS dual channel mode
6679
	 * - Double wide pipe
6680
	 */
6084 serge 6681
	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4560 Serge 6682
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6683
		pipe_config->pipe_src_w &= ~1;
6684
 
4104 Serge 6685
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6686
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 6687
	 */
6688
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6084 serge 6689
		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
4104 Serge 6690
		return -EINVAL;
3031 serge 6691
 
4104 Serge 6692
	if (HAS_IPS(dev))
6693
		hsw_compute_ips_config(crtc, pipe_config);
6694
 
6695
	if (pipe_config->has_pch_encoder)
6696
		return ironlake_fdi_compute_config(crtc, pipe_config);
6697
 
6698
	return 0;
2330 Serge 6699
}
6700
 
6084 serge 6701
static int skylake_get_display_clock_speed(struct drm_device *dev)
3031 serge 6702
{
6084 serge 6703
	struct drm_i915_private *dev_priv = to_i915(dev);
6704
	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6705
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6706
	uint32_t linkrate;
5060 serge 6707
 
6084 serge 6708
	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6709
		return 24000; /* 24MHz is the cd freq with NSSC ref */
5354 serge 6710
 
6084 serge 6711
	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6712
		return 540000;
5354 serge 6713
 
6084 serge 6714
	linkrate = (I915_READ(DPLL_CTRL1) &
6715
		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
5060 serge 6716
 
6084 serge 6717
	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6718
	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6719
		/* vco 8640 */
6720
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6721
		case CDCLK_FREQ_450_432:
6722
			return 432000;
6723
		case CDCLK_FREQ_337_308:
6724
			return 308570;
6725
		case CDCLK_FREQ_675_617:
6726
			return 617140;
6727
		default:
6728
			WARN(1, "Unknown cd freq selection\n");
6729
		}
6730
	} else {
6731
		/* vco 8100 */
6732
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6733
		case CDCLK_FREQ_450_432:
6734
			return 450000;
6735
		case CDCLK_FREQ_337_308:
6736
			return 337500;
6737
		case CDCLK_FREQ_675_617:
6738
			return 675000;
6739
		default:
6740
			WARN(1, "Unknown cd freq selection\n");
6741
		}
6742
	}
5060 serge 6743
 
6084 serge 6744
	/* error case, do as if DPLL0 isn't enabled */
6745
	return 24000;
6746
}
5060 serge 6747
 
6084 serge 6748
static int broxton_get_display_clock_speed(struct drm_device *dev)
6749
{
6750
	struct drm_i915_private *dev_priv = to_i915(dev);
6751
	uint32_t cdctl = I915_READ(CDCLK_CTL);
6752
	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6753
	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6754
	int cdclk;
6755
 
6756
	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6757
		return 19200;
6758
 
6759
	cdclk = 19200 * pll_ratio / 2;
6760
 
6761
	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6762
	case BXT_CDCLK_CD2X_DIV_SEL_1:
6763
		return cdclk;  /* 576MHz or 624MHz */
6764
	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6765
		return cdclk * 2 / 3; /* 384MHz */
6766
	case BXT_CDCLK_CD2X_DIV_SEL_2:
6767
		return cdclk / 2; /* 288MHz */
6768
	case BXT_CDCLK_CD2X_DIV_SEL_4:
6769
		return cdclk / 4; /* 144MHz */
6770
	}
6771
 
6772
	/* error case, do as if DE PLL isn't enabled */
6773
	return 19200;
3031 serge 6774
}
6775
 
6084 serge 6776
static int broadwell_get_display_clock_speed(struct drm_device *dev)
6777
{
6778
	struct drm_i915_private *dev_priv = dev->dev_private;
6779
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6780
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6781
 
6782
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6783
		return 800000;
6784
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6785
		return 450000;
6786
	else if (freq == LCPLL_CLK_FREQ_450)
6787
		return 450000;
6788
	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6789
		return 540000;
6790
	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6791
		return 337500;
6792
	else
6793
		return 675000;
6794
}
6795
 
6796
static int haswell_get_display_clock_speed(struct drm_device *dev)
6797
{
6798
	struct drm_i915_private *dev_priv = dev->dev_private;
6799
	uint32_t lcpll = I915_READ(LCPLL_CTL);
6800
	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6801
 
6802
	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6803
		return 800000;
6804
	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6805
		return 450000;
6806
	else if (freq == LCPLL_CLK_FREQ_450)
6807
		return 450000;
6808
	else if (IS_HSW_ULT(dev))
6809
		return 337500;
6810
	else
6811
		return 540000;
6812
}
6813
 
6814
static int valleyview_get_display_clock_speed(struct drm_device *dev)
6815
{
6816
	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6817
				      CCK_DISPLAY_CLOCK_CONTROL);
6818
}
6819
 
6820
static int ilk_get_display_clock_speed(struct drm_device *dev)
6821
{
6822
	return 450000;
6823
}
6824
 
2327 Serge 6825
static int i945_get_display_clock_speed(struct drm_device *dev)
6826
{
6827
	return 400000;
6828
}
6829
 
6830
static int i915_get_display_clock_speed(struct drm_device *dev)
6831
{
6084 serge 6832
	return 333333;
2327 Serge 6833
}
6834
 
6835
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6836
{
6837
	return 200000;
6838
}
6839
 
4104 Serge 6840
static int pnv_get_display_clock_speed(struct drm_device *dev)
6841
{
6842
	u16 gcfgc = 0;
6843
 
6844
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6845
 
6846
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6847
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6084 serge 6848
		return 266667;
4104 Serge 6849
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6084 serge 6850
		return 333333;
4104 Serge 6851
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6084 serge 6852
		return 444444;
4104 Serge 6853
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6854
		return 200000;
6855
	default:
6856
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6857
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6084 serge 6858
		return 133333;
4104 Serge 6859
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6084 serge 6860
		return 166667;
4104 Serge 6861
	}
6862
}
6863
 
2327 Serge 6864
static int i915gm_get_display_clock_speed(struct drm_device *dev)
6865
{
6866
	u16 gcfgc = 0;
6867
 
6868
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6869
 
6870
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6084 serge 6871
		return 133333;
2327 Serge 6872
	else {
6873
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6874
		case GC_DISPLAY_CLOCK_333_MHZ:
6084 serge 6875
			return 333333;
2327 Serge 6876
		default:
6877
		case GC_DISPLAY_CLOCK_190_200_MHZ:
6878
			return 190000;
6879
		}
6880
	}
6881
}
6882
 
6883
static int i865_get_display_clock_speed(struct drm_device *dev)
6884
{
6084 serge 6885
	return 266667;
2327 Serge 6886
}
6887
 
6084 serge 6888
static int i85x_get_display_clock_speed(struct drm_device *dev)
2327 Serge 6889
{
6890
	u16 hpllcc = 0;
6084 serge 6891
 
6892
	/*
6893
	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6894
	 * encoding is different :(
6895
	 * FIXME is this the right way to detect 852GM/852GMV?
6896
	 */
6897
	if (dev->pdev->revision == 0x1)
6898
		return 133333;
6899
 
6900
//   pci_bus_read_config_word(dev->pdev->bus,
6901
//                PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6902
 
2327 Serge 6903
	/* Assume that the hardware is in the high speed state.  This
6904
	 * should be the default.
6905
	 */
6906
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6907
	case GC_CLOCK_133_200:
6084 serge 6908
	case GC_CLOCK_133_200_2:
2327 Serge 6909
	case GC_CLOCK_100_200:
6910
		return 200000;
6911
	case GC_CLOCK_166_250:
6912
		return 250000;
6913
	case GC_CLOCK_100_133:
6084 serge 6914
		return 133333;
6915
	case GC_CLOCK_133_266:
6916
	case GC_CLOCK_133_266_2:
6917
	case GC_CLOCK_166_266:
6918
		return 266667;
2327 Serge 6919
	}
6920
 
6921
	/* Shouldn't happen */
6922
	return 0;
6923
}
6924
 
6925
static int i830_get_display_clock_speed(struct drm_device *dev)
6926
{
6084 serge 6927
	return 133333;
2327 Serge 6928
}
6929
 
6084 serge 6930
static unsigned int intel_hpll_vco(struct drm_device *dev)
6931
{
6932
	struct drm_i915_private *dev_priv = dev->dev_private;
6933
	static const unsigned int blb_vco[8] = {
6934
		[0] = 3200000,
6935
		[1] = 4000000,
6936
		[2] = 5333333,
6937
		[3] = 4800000,
6938
		[4] = 6400000,
6939
	};
6940
	static const unsigned int pnv_vco[8] = {
6941
		[0] = 3200000,
6942
		[1] = 4000000,
6943
		[2] = 5333333,
6944
		[3] = 4800000,
6945
		[4] = 2666667,
6946
	};
6947
	static const unsigned int cl_vco[8] = {
6948
		[0] = 3200000,
6949
		[1] = 4000000,
6950
		[2] = 5333333,
6951
		[3] = 6400000,
6952
		[4] = 3333333,
6953
		[5] = 3566667,
6954
		[6] = 4266667,
6955
	};
6956
	static const unsigned int elk_vco[8] = {
6957
		[0] = 3200000,
6958
		[1] = 4000000,
6959
		[2] = 5333333,
6960
		[3] = 4800000,
6961
	};
6962
	static const unsigned int ctg_vco[8] = {
6963
		[0] = 3200000,
6964
		[1] = 4000000,
6965
		[2] = 5333333,
6966
		[3] = 6400000,
6967
		[4] = 2666667,
6968
		[5] = 4266667,
6969
	};
6970
	const unsigned int *vco_table;
6971
	unsigned int vco;
6972
	uint8_t tmp = 0;
6973
 
6974
	/* FIXME other chipsets? */
6975
	if (IS_GM45(dev))
6976
		vco_table = ctg_vco;
6977
	else if (IS_G4X(dev))
6978
		vco_table = elk_vco;
6979
	else if (IS_CRESTLINE(dev))
6980
		vco_table = cl_vco;
6981
	else if (IS_PINEVIEW(dev))
6982
		vco_table = pnv_vco;
6983
	else if (IS_G33(dev))
6984
		vco_table = blb_vco;
6985
	else
6986
		return 0;
6987
 
6988
	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6989
 
6990
	vco = vco_table[tmp & 0x7];
6991
	if (vco == 0)
6992
		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6993
	else
6994
		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6995
 
6996
	return vco;
6997
}
6998
 
6999
static int gm45_get_display_clock_speed(struct drm_device *dev)
7000
{
7001
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7002
	uint16_t tmp = 0;
7003
 
7004
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7005
 
7006
	cdclk_sel = (tmp >> 12) & 0x1;
7007
 
7008
	switch (vco) {
7009
	case 2666667:
7010
	case 4000000:
7011
	case 5333333:
7012
		return cdclk_sel ? 333333 : 222222;
7013
	case 3200000:
7014
		return cdclk_sel ? 320000 : 228571;
7015
	default:
7016
		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7017
		return 222222;
7018
	}
7019
}
7020
 
7021
static int i965gm_get_display_clock_speed(struct drm_device *dev)
7022
{
7023
	static const uint8_t div_3200[] = { 16, 10,  8 };
7024
	static const uint8_t div_4000[] = { 20, 12, 10 };
7025
	static const uint8_t div_5333[] = { 24, 16, 14 };
7026
	const uint8_t *div_table;
7027
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7028
	uint16_t tmp = 0;
7029
 
7030
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7031
 
7032
	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7033
 
7034
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7035
		goto fail;
7036
 
7037
	switch (vco) {
7038
	case 3200000:
7039
		div_table = div_3200;
7040
		break;
7041
	case 4000000:
7042
		div_table = div_4000;
7043
		break;
7044
	case 5333333:
7045
		div_table = div_5333;
7046
		break;
7047
	default:
7048
		goto fail;
7049
	}
7050
 
7051
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7052
 
7053
fail:
7054
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7055
	return 200000;
7056
}
7057
 
7058
static int g33_get_display_clock_speed(struct drm_device *dev)
7059
{
7060
	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7061
	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7062
	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7063
	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7064
	const uint8_t *div_table;
7065
	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7066
	uint16_t tmp = 0;
7067
 
7068
	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7069
 
7070
	cdclk_sel = (tmp >> 4) & 0x7;
7071
 
7072
	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7073
		goto fail;
7074
 
7075
	switch (vco) {
7076
	case 3200000:
7077
		div_table = div_3200;
7078
		break;
7079
	case 4000000:
7080
		div_table = div_4000;
7081
		break;
7082
	case 4800000:
7083
		div_table = div_4800;
7084
		break;
7085
	case 5333333:
7086
		div_table = div_5333;
7087
		break;
7088
	default:
7089
		goto fail;
7090
	}
7091
 
7092
	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7093
 
7094
fail:
7095
	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7096
	return 190476;
7097
}
7098
 
2327 Serge 7099
static void
3746 Serge 7100
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 7101
{
3746 Serge 7102
	while (*num > DATA_LINK_M_N_MASK ||
7103
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 7104
		*num >>= 1;
7105
		*den >>= 1;
7106
	}
7107
}
7108
 
3746 Serge 7109
static void compute_m_n(unsigned int m, unsigned int n,
7110
			uint32_t *ret_m, uint32_t *ret_n)
7111
{
7112
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7113
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7114
	intel_reduce_m_n_ratio(ret_m, ret_n);
7115
}
7116
 
3480 Serge 7117
void
7118
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7119
		       int pixel_clock, int link_clock,
7120
		       struct intel_link_m_n *m_n)
2327 Serge 7121
{
3480 Serge 7122
	m_n->tu = 64;
3746 Serge 7123
 
7124
	compute_m_n(bits_per_pixel * pixel_clock,
7125
		    link_clock * nlanes * 8,
7126
		    &m_n->gmch_m, &m_n->gmch_n);
7127
 
7128
	compute_m_n(pixel_clock, link_clock,
7129
		    &m_n->link_m, &m_n->link_n);
2327 Serge 7130
}
7131
 
7132
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7133
{
5060 serge 7134
	if (i915.panel_use_ssc >= 0)
7135
		return i915.panel_use_ssc != 0;
4104 Serge 7136
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 7137
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7138
}
7139
 
6084 serge 7140
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7141
			   int num_connectors)
3031 serge 7142
{
6084 serge 7143
	struct drm_device *dev = crtc_state->base.crtc->dev;
3031 serge 7144
	struct drm_i915_private *dev_priv = dev->dev_private;
7145
	int refclk;
2327 Serge 7146
 
6084 serge 7147
	WARN_ON(!crtc_state->base.state);
7148
 
7149
	if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
4560 Serge 7150
		refclk = 100000;
6084 serge 7151
	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7152
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 7153
		refclk = dev_priv->vbt.lvds_ssc_freq;
7154
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 7155
	} else if (!IS_GEN2(dev)) {
7156
		refclk = 96000;
7157
	} else {
7158
		refclk = 48000;
7159
	}
2327 Serge 7160
 
3031 serge 7161
	return refclk;
7162
}
2327 Serge 7163
 
4104 Serge 7164
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 7165
{
4104 Serge 7166
	return (1 << dpll->n) << 16 | dpll->m2;
7167
}
3746 Serge 7168
 
4104 Serge 7169
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7170
{
7171
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 7172
}
2327 Serge 7173
 
3746 Serge 7174
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6084 serge 7175
				     struct intel_crtc_state *crtc_state,
3031 serge 7176
				     intel_clock_t *reduced_clock)
7177
{
3746 Serge 7178
	struct drm_device *dev = crtc->base.dev;
3031 serge 7179
	u32 fp, fp2 = 0;
2327 Serge 7180
 
3031 serge 7181
	if (IS_PINEVIEW(dev)) {
6084 serge 7182
		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
3031 serge 7183
		if (reduced_clock)
4104 Serge 7184
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 7185
	} else {
6084 serge 7186
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
3031 serge 7187
		if (reduced_clock)
4104 Serge 7188
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 7189
	}
2327 Serge 7190
 
6084 serge 7191
	crtc_state->dpll_hw_state.fp0 = fp;
2327 Serge 7192
 
3746 Serge 7193
	crtc->lowfreq_avail = false;
6084 serge 7194
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7195
	    reduced_clock) {
7196
		crtc_state->dpll_hw_state.fp1 = fp2;
3746 Serge 7197
		crtc->lowfreq_avail = true;
3031 serge 7198
	} else {
6084 serge 7199
		crtc_state->dpll_hw_state.fp1 = fp;
3031 serge 7200
	}
7201
}
2327 Serge 7202
 
4560 Serge 7203
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7204
		pipe)
4104 Serge 7205
{
7206
	u32 reg_val;
7207
 
7208
	/*
7209
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7210
	 * and set it to a reasonable value instead.
7211
	 */
4560 Serge 7212
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 7213
	reg_val &= 0xffffff00;
7214
	reg_val |= 0x00000030;
4560 Serge 7215
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 7216
 
4560 Serge 7217
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 7218
	reg_val &= 0x8cffffff;
7219
	reg_val = 0x8c000000;
4560 Serge 7220
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 7221
 
4560 Serge 7222
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 7223
	reg_val &= 0xffffff00;
4560 Serge 7224
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 7225
 
4560 Serge 7226
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 7227
	reg_val &= 0x00ffffff;
7228
	reg_val |= 0xb0000000;
4560 Serge 7229
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 7230
}
7231
 
7232
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7233
					 struct intel_link_m_n *m_n)
7234
{
7235
	struct drm_device *dev = crtc->base.dev;
7236
	struct drm_i915_private *dev_priv = dev->dev_private;
7237
	int pipe = crtc->pipe;
7238
 
7239
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7240
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7241
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7242
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7243
}
7244
 
7245
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 7246
					 struct intel_link_m_n *m_n,
7247
					 struct intel_link_m_n *m2_n2)
4104 Serge 7248
{
7249
	struct drm_device *dev = crtc->base.dev;
7250
	struct drm_i915_private *dev_priv = dev->dev_private;
7251
	int pipe = crtc->pipe;
6084 serge 7252
	enum transcoder transcoder = crtc->config->cpu_transcoder;
4104 Serge 7253
 
7254
	if (INTEL_INFO(dev)->gen >= 5) {
7255
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7256
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7257
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7258
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5354 serge 7259
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7260
		 * for gen < 8) and if DRRS is supported (to make sure the
7261
		 * registers are not unnecessarily accessed).
7262
		 */
6084 serge 7263
		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7264
			crtc->config->has_drrs) {
5354 serge 7265
			I915_WRITE(PIPE_DATA_M2(transcoder),
7266
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7267
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7268
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7269
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7270
		}
4104 Serge 7271
	} else {
7272
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7273
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7274
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7275
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7276
	}
7277
}
7278
 
6084 serge 7279
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
3031 serge 7280
{
6084 serge 7281
	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7282
 
7283
	if (m_n == M1_N1) {
7284
		dp_m_n = &crtc->config->dp_m_n;
7285
		dp_m2_n2 = &crtc->config->dp_m2_n2;
7286
	} else if (m_n == M2_N2) {
7287
 
7288
		/*
7289
		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7290
		 * needs to be programmed into M1_N1.
7291
		 */
7292
		dp_m_n = &crtc->config->dp_m2_n2;
7293
	} else {
7294
		DRM_ERROR("Unsupported divider value\n");
7295
		return;
7296
	}
7297
 
7298
	if (crtc->config->has_pch_encoder)
7299
		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
3746 Serge 7300
	else
6084 serge 7301
		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
3746 Serge 7302
}
7303
 
6084 serge 7304
static void vlv_compute_dpll(struct intel_crtc *crtc,
7305
			     struct intel_crtc_state *pipe_config)
3746 Serge 7306
{
5060 serge 7307
	u32 dpll, dpll_md;
7308
 
7309
	/*
7310
	 * Enable DPIO clock input. We should never disable the reference
7311
	 * clock for pipe B, since VGA hotplug / manual detection depends
7312
	 * on it.
7313
	 */
6084 serge 7314
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7315
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
5060 serge 7316
	/* We should never disable this, set it here for state tracking */
7317
	if (crtc->pipe == PIPE_B)
7318
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7319
	dpll |= DPLL_VCO_ENABLE;
5354 serge 7320
	pipe_config->dpll_hw_state.dpll = dpll;
5060 serge 7321
 
5354 serge 7322
	dpll_md = (pipe_config->pixel_multiplier - 1)
5060 serge 7323
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 7324
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
5060 serge 7325
}
7326
 
5354 serge 7327
static void vlv_prepare_pll(struct intel_crtc *crtc,
6084 serge 7328
			    const struct intel_crtc_state *pipe_config)
5060 serge 7329
{
3746 Serge 7330
	struct drm_device *dev = crtc->base.dev;
3031 serge 7331
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 7332
	int pipe = crtc->pipe;
5060 serge 7333
	u32 mdiv;
3031 serge 7334
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5060 serge 7335
	u32 coreclk, reg_val;
2327 Serge 7336
 
6084 serge 7337
	mutex_lock(&dev_priv->sb_lock);
3480 Serge 7338
 
5354 serge 7339
	bestn = pipe_config->dpll.n;
7340
	bestm1 = pipe_config->dpll.m1;
7341
	bestm2 = pipe_config->dpll.m2;
7342
	bestp1 = pipe_config->dpll.p1;
7343
	bestp2 = pipe_config->dpll.p2;
3031 serge 7344
 
4104 Serge 7345
	/* See eDP HDMI DPIO driver vbios notes doc */
7346
 
7347
	/* PLL B needs special handling */
5060 serge 7348
	if (pipe == PIPE_B)
4560 Serge 7349
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 7350
 
7351
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 7352
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 7353
 
7354
	/* Disable target IRef on PLL */
4560 Serge 7355
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 7356
	reg_val &= 0x00ffffff;
4560 Serge 7357
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 7358
 
7359
	/* Disable fast lock */
4560 Serge 7360
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 7361
 
7362
	/* Set idtafcrecal before PLL is enabled */
3031 serge 7363
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7364
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7365
	mdiv |= ((bestn << DPIO_N_SHIFT));
7366
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 7367
 
7368
	/*
7369
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7370
	 * but we don't support that).
7371
	 * Note: don't use the DAC post divider as it seems unstable.
7372
	 */
7373
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 7374
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 7375
 
3031 serge 7376
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 7377
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 7378
 
4104 Serge 7379
	/* Set HBR and RBR LPF coefficients */
5354 serge 7380
	if (pipe_config->port_clock == 162000 ||
7381
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7382
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4560 Serge 7383
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 7384
				 0x009f0003);
7385
	else
4560 Serge 7386
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 7387
				 0x00d0000f);
3031 serge 7388
 
6084 serge 7389
	if (pipe_config->has_dp_encoder) {
4104 Serge 7390
		/* Use SSC source */
5060 serge 7391
		if (pipe == PIPE_A)
4560 Serge 7392
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7393
					 0x0df40000);
7394
		else
4560 Serge 7395
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7396
					 0x0df70000);
7397
	} else { /* HDMI or VGA */
7398
		/* Use bend source */
5060 serge 7399
		if (pipe == PIPE_A)
4560 Serge 7400
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7401
					 0x0df70000);
7402
		else
4560 Serge 7403
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 7404
					 0x0df40000);
7405
	}
3031 serge 7406
 
4560 Serge 7407
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 7408
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5354 serge 7409
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7410
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
4104 Serge 7411
		coreclk |= 0x01000000;
4560 Serge 7412
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 7413
 
4560 Serge 7414
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6084 serge 7415
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 7416
}
4104 Serge 7417
 
6084 serge 7418
static void chv_compute_dpll(struct intel_crtc *crtc,
7419
			     struct intel_crtc_state *pipe_config)
5060 serge 7420
{
6084 serge 7421
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7422
		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5354 serge 7423
		DPLL_VCO_ENABLE;
7424
	if (crtc->pipe != PIPE_A)
7425
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7426
 
7427
	pipe_config->dpll_hw_state.dpll_md =
7428
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7429
}
7430
 
7431
static void chv_prepare_pll(struct intel_crtc *crtc,
6084 serge 7432
			    const struct intel_crtc_state *pipe_config)
5354 serge 7433
{
5060 serge 7434
	struct drm_device *dev = crtc->base.dev;
7435
	struct drm_i915_private *dev_priv = dev->dev_private;
7436
	int pipe = crtc->pipe;
7437
	int dpll_reg = DPLL(crtc->pipe);
7438
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6084 serge 7439
	u32 loopfilter, tribuf_calcntr;
5060 serge 7440
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6084 serge 7441
	u32 dpio_val;
7442
	int vco;
5060 serge 7443
 
5354 serge 7444
	bestn = pipe_config->dpll.n;
7445
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7446
	bestm1 = pipe_config->dpll.m1;
7447
	bestm2 = pipe_config->dpll.m2 >> 22;
7448
	bestp1 = pipe_config->dpll.p1;
7449
	bestp2 = pipe_config->dpll.p2;
6084 serge 7450
	vco = pipe_config->dpll.vco;
7451
	dpio_val = 0;
7452
	loopfilter = 0;
5060 serge 7453
 
4560 Serge 7454
	/*
5060 serge 7455
	 * Enable Refclk and SSC
4560 Serge 7456
	 */
5060 serge 7457
	I915_WRITE(dpll_reg,
5354 serge 7458
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
3031 serge 7459
 
6084 serge 7460
	mutex_lock(&dev_priv->sb_lock);
3031 serge 7461
 
5060 serge 7462
	/* p1 and p2 divider */
7463
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7464
			5 << DPIO_CHV_S1_DIV_SHIFT |
7465
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7466
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7467
			1 << DPIO_CHV_K_DIV_SHIFT);
3243 Serge 7468
 
5060 serge 7469
	/* Feedback post-divider - m2 */
7470
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7471
 
7472
	/* Feedback refclk divider - n and m1 */
7473
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7474
			DPIO_CHV_M1_DIV_BY_2 |
7475
			1 << DPIO_CHV_N_DIV_SHIFT);
7476
 
7477
	/* M2 fraction division */
7478
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7479
 
7480
	/* M2 fraction division enable */
6084 serge 7481
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7482
	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7483
	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7484
	if (bestm2_frac)
7485
		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7486
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
5060 serge 7487
 
6084 serge 7488
	/* Program digital lock detect threshold */
7489
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7490
	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7491
					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7492
	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7493
	if (!bestm2_frac)
7494
		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7495
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7496
 
5060 serge 7497
	/* Loop filter */
6084 serge 7498
	if (vco == 5400000) {
7499
		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7500
		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7501
		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7502
		tribuf_calcntr = 0x9;
7503
	} else if (vco <= 6200000) {
7504
		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7505
		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7506
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7507
		tribuf_calcntr = 0x9;
7508
	} else if (vco <= 6480000) {
7509
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7510
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7511
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7512
		tribuf_calcntr = 0x8;
7513
	} else {
7514
		/* Not supported. Apply the same limits as in the max case */
7515
		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7516
		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7517
		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7518
		tribuf_calcntr = 0;
7519
	}
5060 serge 7520
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7521
 
6084 serge 7522
	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7523
	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7524
	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7525
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7526
 
5060 serge 7527
	/* AFC Recal */
7528
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7529
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7530
			DPIO_AFC_RECAL);
7531
 
6084 serge 7532
	mutex_unlock(&dev_priv->sb_lock);
3031 serge 7533
}
7534
 
5354 serge 7535
/**
7536
 * vlv_force_pll_on - forcibly enable just the PLL
7537
 * @dev_priv: i915 private structure
7538
 * @pipe: pipe PLL to enable
7539
 * @dpll: PLL configuration
7540
 *
7541
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7542
 * in cases where we need the PLL enabled even when @pipe is not going to
7543
 * be enabled.
7544
 */
7545
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7546
		      const struct dpll *dpll)
7547
{
7548
	struct intel_crtc *crtc =
7549
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6084 serge 7550
	struct intel_crtc_state pipe_config = {
7551
		.base.crtc = &crtc->base,
5354 serge 7552
		.pixel_multiplier = 1,
7553
		.dpll = *dpll,
7554
	};
7555
 
7556
	if (IS_CHERRYVIEW(dev)) {
6084 serge 7557
		chv_compute_dpll(crtc, &pipe_config);
5354 serge 7558
		chv_prepare_pll(crtc, &pipe_config);
7559
		chv_enable_pll(crtc, &pipe_config);
7560
	} else {
6084 serge 7561
		vlv_compute_dpll(crtc, &pipe_config);
5354 serge 7562
		vlv_prepare_pll(crtc, &pipe_config);
7563
		vlv_enable_pll(crtc, &pipe_config);
7564
	}
7565
}
7566
 
7567
/**
7568
 * vlv_force_pll_off - forcibly disable just the PLL
7569
 * @dev_priv: i915 private structure
7570
 * @pipe: pipe PLL to disable
7571
 *
7572
 * Disable the PLL for @pipe. To be used in cases where we need
7573
 * the PLL enabled even when @pipe is not going to be enabled.
7574
 */
7575
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7576
{
7577
	if (IS_CHERRYVIEW(dev))
7578
		chv_disable_pll(to_i915(dev), pipe);
7579
	else
7580
		vlv_disable_pll(to_i915(dev), pipe);
7581
}
7582
 
6084 serge 7583
static void i9xx_compute_dpll(struct intel_crtc *crtc,
7584
			      struct intel_crtc_state *crtc_state,
7585
			      intel_clock_t *reduced_clock,
7586
			      int num_connectors)
3031 serge 7587
{
3746 Serge 7588
	struct drm_device *dev = crtc->base.dev;
3031 serge 7589
	struct drm_i915_private *dev_priv = dev->dev_private;
7590
	u32 dpll;
7591
	bool is_sdvo;
6084 serge 7592
	struct dpll *clock = &crtc_state->dpll;
3031 serge 7593
 
6084 serge 7594
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
3243 Serge 7595
 
6084 serge 7596
	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7597
		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
3031 serge 7598
 
7599
	dpll = DPLL_VGA_MODE_DIS;
7600
 
6084 serge 7601
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
3031 serge 7602
		dpll |= DPLLB_MODE_LVDS;
7603
	else
7604
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 7605
 
4104 Serge 7606
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6084 serge 7607
		dpll |= (crtc_state->pixel_multiplier - 1)
7608
			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7609
	}
4104 Serge 7610
 
7611
	if (is_sdvo)
7612
		dpll |= DPLL_SDVO_HIGH_SPEED;
7613
 
6084 serge 7614
	if (crtc_state->has_dp_encoder)
4104 Serge 7615
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 7616
 
3031 serge 7617
	/* compute bitmask from p1 value */
7618
	if (IS_PINEVIEW(dev))
7619
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7620
	else {
7621
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7622
		if (IS_G4X(dev) && reduced_clock)
7623
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7624
	}
7625
	switch (clock->p2) {
7626
	case 5:
7627
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7628
		break;
7629
	case 7:
7630
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7631
		break;
7632
	case 10:
7633
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7634
		break;
7635
	case 14:
7636
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7637
		break;
7638
	}
7639
	if (INTEL_INFO(dev)->gen >= 4)
7640
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 7641
 
6084 serge 7642
	if (crtc_state->sdvo_tv_clock)
3031 serge 7643
		dpll |= PLL_REF_INPUT_TVCLKINBC;
6084 serge 7644
	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7645
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7646
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7647
	else
7648
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 7649
 
3031 serge 7650
	dpll |= DPLL_VCO_ENABLE;
6084 serge 7651
	crtc_state->dpll_hw_state.dpll = dpll;
2327 Serge 7652
 
4104 Serge 7653
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 7654
		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7655
			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7656
		crtc_state->dpll_hw_state.dpll_md = dpll_md;
4104 Serge 7657
	}
3031 serge 7658
}
2327 Serge 7659
 
6084 serge 7660
static void i8xx_compute_dpll(struct intel_crtc *crtc,
7661
			      struct intel_crtc_state *crtc_state,
7662
			      intel_clock_t *reduced_clock,
7663
			      int num_connectors)
3031 serge 7664
{
3746 Serge 7665
	struct drm_device *dev = crtc->base.dev;
3031 serge 7666
	struct drm_i915_private *dev_priv = dev->dev_private;
7667
	u32 dpll;
6084 serge 7668
	struct dpll *clock = &crtc_state->dpll;
2327 Serge 7669
 
6084 serge 7670
	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
3243 Serge 7671
 
3031 serge 7672
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 7673
 
6084 serge 7674
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
3031 serge 7675
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7676
	} else {
7677
		if (clock->p1 == 2)
7678
			dpll |= PLL_P1_DIVIDE_BY_TWO;
7679
		else
7680
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7681
		if (clock->p2 == 4)
7682
			dpll |= PLL_P2_DIVIDE_BY_4;
7683
	}
2327 Serge 7684
 
6084 serge 7685
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4104 Serge 7686
		dpll |= DPLL_DVO_2X_MODE;
7687
 
6084 serge 7688
	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
3031 serge 7689
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7690
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7691
	else
7692
		dpll |= PLL_REF_INPUT_DREFCLK;
7693
 
7694
	dpll |= DPLL_VCO_ENABLE;
6084 serge 7695
	crtc_state->dpll_hw_state.dpll = dpll;
3031 serge 7696
}
7697
 
4104 Serge 7698
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 7699
{
7700
	struct drm_device *dev = intel_crtc->base.dev;
7701
	struct drm_i915_private *dev_priv = dev->dev_private;
7702
	enum pipe pipe = intel_crtc->pipe;
6084 serge 7703
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7704
	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
5060 serge 7705
	uint32_t crtc_vtotal, crtc_vblank_end;
7706
	int vsyncshift = 0;
3243 Serge 7707
 
4104 Serge 7708
	/* We need to be careful not to changed the adjusted mode, for otherwise
7709
	 * the hw state checker will get angry at the mismatch. */
7710
	crtc_vtotal = adjusted_mode->crtc_vtotal;
7711
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7712
 
5060 serge 7713
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3243 Serge 7714
		/* the chip adds 2 halflines automatically */
4104 Serge 7715
		crtc_vtotal -= 1;
7716
		crtc_vblank_end -= 1;
5060 serge 7717
 
5354 serge 7718
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
5060 serge 7719
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7720
		else
7721
			vsyncshift = adjusted_mode->crtc_hsync_start -
7722
				adjusted_mode->crtc_htotal / 2;
7723
		if (vsyncshift < 0)
7724
			vsyncshift += adjusted_mode->crtc_htotal;
3243 Serge 7725
	}
7726
 
7727
	if (INTEL_INFO(dev)->gen > 3)
7728
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7729
 
7730
	I915_WRITE(HTOTAL(cpu_transcoder),
7731
		   (adjusted_mode->crtc_hdisplay - 1) |
7732
		   ((adjusted_mode->crtc_htotal - 1) << 16));
7733
	I915_WRITE(HBLANK(cpu_transcoder),
7734
		   (adjusted_mode->crtc_hblank_start - 1) |
7735
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7736
	I915_WRITE(HSYNC(cpu_transcoder),
7737
		   (adjusted_mode->crtc_hsync_start - 1) |
7738
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7739
 
7740
	I915_WRITE(VTOTAL(cpu_transcoder),
7741
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 7742
		   ((crtc_vtotal - 1) << 16));
3243 Serge 7743
	I915_WRITE(VBLANK(cpu_transcoder),
7744
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 7745
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 7746
	I915_WRITE(VSYNC(cpu_transcoder),
7747
		   (adjusted_mode->crtc_vsync_start - 1) |
7748
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7749
 
7750
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7751
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7752
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7753
	 * bits. */
7754
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7755
	    (pipe == PIPE_B || pipe == PIPE_C))
7756
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7757
 
7758
	/* pipesrc controls the size that is scaled from, which should
7759
	 * always be the user's requested size.
7760
	 */
7761
	I915_WRITE(PIPESRC(pipe),
6084 serge 7762
		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7763
		   (intel_crtc->config->pipe_src_h - 1));
3243 Serge 7764
}
7765
 
4104 Serge 7766
static void intel_get_pipe_timings(struct intel_crtc *crtc,
6084 serge 7767
				   struct intel_crtc_state *pipe_config)
4104 Serge 7768
{
7769
	struct drm_device *dev = crtc->base.dev;
7770
	struct drm_i915_private *dev_priv = dev->dev_private;
7771
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7772
	uint32_t tmp;
7773
 
7774
	tmp = I915_READ(HTOTAL(cpu_transcoder));
6084 serge 7775
	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7776
	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7777
	tmp = I915_READ(HBLANK(cpu_transcoder));
6084 serge 7778
	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7779
	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7780
	tmp = I915_READ(HSYNC(cpu_transcoder));
6084 serge 7781
	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7782
	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7783
 
7784
	tmp = I915_READ(VTOTAL(cpu_transcoder));
6084 serge 7785
	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7786
	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7787
	tmp = I915_READ(VBLANK(cpu_transcoder));
6084 serge 7788
	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7789
	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7790
	tmp = I915_READ(VSYNC(cpu_transcoder));
6084 serge 7791
	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7792
	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4104 Serge 7793
 
7794
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6084 serge 7795
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7796
		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7797
		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
4104 Serge 7798
	}
7799
 
7800
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 7801
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7802
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7803
 
6084 serge 7804
	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7805
	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 7806
}
7807
 
5060 serge 7808
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6084 serge 7809
				 struct intel_crtc_state *pipe_config)
4104 Serge 7810
{
6084 serge 7811
	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7812
	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7813
	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7814
	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
4104 Serge 7815
 
6084 serge 7816
	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7817
	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7818
	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7819
	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
4104 Serge 7820
 
6084 serge 7821
	mode->flags = pipe_config->base.adjusted_mode.flags;
7822
	mode->type = DRM_MODE_TYPE_DRIVER;
4104 Serge 7823
 
6084 serge 7824
	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7825
	mode->flags |= pipe_config->base.adjusted_mode.flags;
7826
 
7827
	mode->hsync = drm_mode_hsync(mode);
7828
	mode->vrefresh = drm_mode_vrefresh(mode);
7829
	drm_mode_set_name(mode);
4104 Serge 7830
}
7831
 
3746 Serge 7832
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7833
{
7834
	struct drm_device *dev = intel_crtc->base.dev;
7835
	struct drm_i915_private *dev_priv = dev->dev_private;
7836
	uint32_t pipeconf;
7837
 
4104 Serge 7838
	pipeconf = 0;
3746 Serge 7839
 
5354 serge 7840
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7841
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7842
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
4104 Serge 7843
 
6084 serge 7844
	if (intel_crtc->config->double_wide)
7845
		pipeconf |= PIPECONF_DOUBLE_WIDE;
3746 Serge 7846
 
4104 Serge 7847
	/* only g4x and later have fancy bpc/dither controls */
7848
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
7849
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6084 serge 7850
		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
4104 Serge 7851
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 7852
				    PIPECONF_DITHER_TYPE_SP;
7853
 
6084 serge 7854
		switch (intel_crtc->config->pipe_bpp) {
4104 Serge 7855
		case 18:
7856
			pipeconf |= PIPECONF_6BPC;
7857
			break;
7858
		case 24:
7859
			pipeconf |= PIPECONF_8BPC;
7860
			break;
7861
		case 30:
7862
			pipeconf |= PIPECONF_10BPC;
7863
			break;
7864
		default:
7865
			/* Case prevented by intel_choose_pipe_bpp_dither. */
7866
			BUG();
3746 Serge 7867
		}
7868
	}
7869
 
7870
	if (HAS_PIPE_CXSR(dev)) {
7871
		if (intel_crtc->lowfreq_avail) {
7872
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7873
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7874
		} else {
7875
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7876
		}
7877
	}
7878
 
6084 serge 7879
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5060 serge 7880
		if (INTEL_INFO(dev)->gen < 4 ||
5354 serge 7881
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6084 serge 7882
			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7883
		else
5060 serge 7884
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7885
	} else
3746 Serge 7886
		pipeconf |= PIPECONF_PROGRESSIVE;
7887
 
6084 serge 7888
	if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
7889
		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3746 Serge 7890
 
7891
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7892
	POSTING_READ(PIPECONF(intel_crtc->pipe));
7893
}
7894
 
6084 serge 7895
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7896
				   struct intel_crtc_state *crtc_state)
3031 serge 7897
{
5354 serge 7898
	struct drm_device *dev = crtc->base.dev;
3031 serge 7899
	struct drm_i915_private *dev_priv = dev->dev_private;
7900
	int refclk, num_connectors = 0;
6084 serge 7901
	intel_clock_t clock;
7902
	bool ok;
7903
	bool is_dsi = false;
3031 serge 7904
	struct intel_encoder *encoder;
7905
	const intel_limit_t *limit;
6084 serge 7906
	struct drm_atomic_state *state = crtc_state->base.state;
7907
	struct drm_connector *connector;
7908
	struct drm_connector_state *connector_state;
7909
	int i;
3031 serge 7910
 
6084 serge 7911
	memset(&crtc_state->dpll_hw_state, 0,
7912
	       sizeof(crtc_state->dpll_hw_state));
7913
 
7914
	for_each_connector_in_state(state, connector, connector_state, i) {
7915
		if (connector_state->crtc != &crtc->base)
5354 serge 7916
			continue;
7917
 
6084 serge 7918
		encoder = to_intel_encoder(connector_state->best_encoder);
7919
 
3031 serge 7920
		switch (encoder->type) {
4560 Serge 7921
		case INTEL_OUTPUT_DSI:
7922
			is_dsi = true;
7923
			break;
5354 serge 7924
		default:
7925
			break;
3031 serge 7926
		}
7927
 
7928
		num_connectors++;
7929
	}
7930
 
4560 Serge 7931
	if (is_dsi)
5060 serge 7932
		return 0;
4560 Serge 7933
 
6084 serge 7934
	if (!crtc_state->clock_set) {
7935
		refclk = i9xx_get_refclk(crtc_state, num_connectors);
3031 serge 7936
 
6084 serge 7937
		/*
4560 Serge 7938
		 * Returns a set of divisors for the desired target clock with
7939
		 * the given refclk, or FALSE.  The returned values represent
7940
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7941
		 * 2) / p1 / p2.
6084 serge 7942
		 */
7943
		limit = intel_limit(crtc_state, refclk);
7944
		ok = dev_priv->display.find_dpll(limit, crtc_state,
7945
						 crtc_state->port_clock,
7946
						 refclk, NULL, &clock);
4560 Serge 7947
		if (!ok) {
6084 serge 7948
			DRM_ERROR("Couldn't find PLL settings for mode!\n");
7949
			return -EINVAL;
7950
		}
3031 serge 7951
 
6084 serge 7952
		/* Compat-code for transition, will disappear. */
7953
		crtc_state->dpll.n = clock.n;
7954
		crtc_state->dpll.m1 = clock.m1;
7955
		crtc_state->dpll.m2 = clock.m2;
7956
		crtc_state->dpll.p1 = clock.p1;
7957
		crtc_state->dpll.p2 = clock.p2;
3031 serge 7958
	}
7959
 
4560 Serge 7960
	if (IS_GEN2(dev)) {
6084 serge 7961
		i8xx_compute_dpll(crtc, crtc_state, NULL,
7962
				  num_connectors);
5060 serge 7963
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 7964
		chv_compute_dpll(crtc, crtc_state);
4560 Serge 7965
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 7966
		vlv_compute_dpll(crtc, crtc_state);
4560 Serge 7967
	} else {
6084 serge 7968
		i9xx_compute_dpll(crtc, crtc_state, NULL,
7969
				  num_connectors);
4560 Serge 7970
	}
3031 serge 7971
 
5060 serge 7972
	return 0;
2327 Serge 7973
}
7974
 
4104 Serge 7975
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6084 serge 7976
				 struct intel_crtc_state *pipe_config)
4104 Serge 7977
{
7978
	struct drm_device *dev = crtc->base.dev;
7979
	struct drm_i915_private *dev_priv = dev->dev_private;
7980
	uint32_t tmp;
7981
 
4560 Serge 7982
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7983
		return;
7984
 
4104 Serge 7985
	tmp = I915_READ(PFIT_CONTROL);
7986
	if (!(tmp & PFIT_ENABLE))
7987
		return;
7988
 
7989
	/* Check whether the pfit is attached to our pipe. */
7990
	if (INTEL_INFO(dev)->gen < 4) {
7991
		if (crtc->pipe != PIPE_B)
7992
			return;
7993
	} else {
7994
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7995
			return;
7996
	}
7997
 
7998
	pipe_config->gmch_pfit.control = tmp;
7999
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8000
	if (INTEL_INFO(dev)->gen < 5)
8001
		pipe_config->gmch_pfit.lvds_border_bits =
8002
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
8003
}
8004
 
4398 Serge 8005
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 8006
			       struct intel_crtc_state *pipe_config)
4398 Serge 8007
{
8008
	struct drm_device *dev = crtc->base.dev;
8009
	struct drm_i915_private *dev_priv = dev->dev_private;
8010
	int pipe = pipe_config->cpu_transcoder;
8011
	intel_clock_t clock;
8012
	u32 mdiv;
8013
	int refclk = 100000;
8014
 
5060 serge 8015
	/* In case of MIPI DPLL will not even be used */
8016
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8017
		return;
8018
 
6084 serge 8019
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 8020
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6084 serge 8021
	mutex_unlock(&dev_priv->sb_lock);
4398 Serge 8022
 
8023
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8024
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8025
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8026
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8027
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8028
 
6084 serge 8029
	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4398 Serge 8030
}
8031
 
6084 serge 8032
static void
8033
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8034
			      struct intel_initial_plane_config *plane_config)
5060 serge 8035
{
8036
	struct drm_device *dev = crtc->base.dev;
8037
	struct drm_i915_private *dev_priv = dev->dev_private;
8038
	u32 val, base, offset;
8039
	int pipe = crtc->pipe, plane = crtc->plane;
8040
	int fourcc, pixel_format;
6084 serge 8041
	unsigned int aligned_height;
8042
	struct drm_framebuffer *fb;
8043
	struct intel_framebuffer *intel_fb;
5060 serge 8044
 
6084 serge 8045
	val = I915_READ(DSPCNTR(plane));
8046
	if (!(val & DISPLAY_PLANE_ENABLE))
8047
		return;
8048
 
8049
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8050
	if (!intel_fb) {
5060 serge 8051
		DRM_DEBUG_KMS("failed to alloc fb\n");
8052
		return;
8053
	}
8054
 
6084 serge 8055
	fb = &intel_fb->base;
5060 serge 8056
 
6084 serge 8057
	if (INTEL_INFO(dev)->gen >= 4) {
8058
		if (val & DISPPLANE_TILED) {
8059
			plane_config->tiling = I915_TILING_X;
8060
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8061
		}
8062
	}
5060 serge 8063
 
8064
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6084 serge 8065
	fourcc = i9xx_format_to_fourcc(pixel_format);
8066
	fb->pixel_format = fourcc;
8067
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
5060 serge 8068
 
8069
	if (INTEL_INFO(dev)->gen >= 4) {
6084 serge 8070
		if (plane_config->tiling)
5060 serge 8071
			offset = I915_READ(DSPTILEOFF(plane));
8072
		else
8073
			offset = I915_READ(DSPLINOFF(plane));
8074
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8075
	} else {
8076
		base = I915_READ(DSPADDR(plane));
8077
	}
8078
	plane_config->base = base;
8079
 
8080
	val = I915_READ(PIPESRC(pipe));
6084 serge 8081
	fb->width = ((val >> 16) & 0xfff) + 1;
8082
	fb->height = ((val >> 0) & 0xfff) + 1;
5060 serge 8083
 
8084
	val = I915_READ(DSPSTRIDE(pipe));
6283 serge 8085
	fb->pitches[0] = val & 0xffffffc0;
5060 serge 8086
 
6084 serge 8087
	aligned_height = intel_fb_align_height(dev, fb->height,
8088
					       fb->pixel_format,
8089
					       fb->modifier[0]);
5060 serge 8090
 
6283 serge 8091
	plane_config->size = fb->pitches[0] * aligned_height;
5060 serge 8092
 
6084 serge 8093
	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8094
		      pipe_name(pipe), plane, fb->width, fb->height,
8095
		      fb->bits_per_pixel, base, fb->pitches[0],
5060 serge 8096
		      plane_config->size);
8097
 
6084 serge 8098
	plane_config->fb = intel_fb;
5060 serge 8099
}
8100
 
8101
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 8102
			       struct intel_crtc_state *pipe_config)
5060 serge 8103
{
8104
	struct drm_device *dev = crtc->base.dev;
8105
	struct drm_i915_private *dev_priv = dev->dev_private;
8106
	int pipe = pipe_config->cpu_transcoder;
8107
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8108
	intel_clock_t clock;
6084 serge 8109
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
5060 serge 8110
	int refclk = 100000;
8111
 
6084 serge 8112
	mutex_lock(&dev_priv->sb_lock);
5060 serge 8113
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8114
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8115
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8116
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6084 serge 8117
	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8118
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 8119
 
8120
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6084 serge 8121
	clock.m2 = (pll_dw0 & 0xff) << 22;
8122
	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8123
		clock.m2 |= pll_dw2 & 0x3fffff;
5060 serge 8124
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8125
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8126
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8127
 
6084 serge 8128
	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
5060 serge 8129
}
8130
 
3746 Serge 8131
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6084 serge 8132
				 struct intel_crtc_state *pipe_config)
3746 Serge 8133
{
8134
	struct drm_device *dev = crtc->base.dev;
8135
	struct drm_i915_private *dev_priv = dev->dev_private;
8136
	uint32_t tmp;
8137
 
5354 serge 8138
	if (!intel_display_power_is_enabled(dev_priv,
6084 serge 8139
					    POWER_DOMAIN_PIPE(crtc->pipe)))
5060 serge 8140
		return false;
8141
 
4104 Serge 8142
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8143
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8144
 
3746 Serge 8145
	tmp = I915_READ(PIPECONF(crtc->pipe));
8146
	if (!(tmp & PIPECONF_ENABLE))
8147
		return false;
8148
 
4280 Serge 8149
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
8150
		switch (tmp & PIPECONF_BPC_MASK) {
8151
		case PIPECONF_6BPC:
8152
			pipe_config->pipe_bpp = 18;
8153
			break;
8154
		case PIPECONF_8BPC:
8155
			pipe_config->pipe_bpp = 24;
8156
			break;
8157
		case PIPECONF_10BPC:
8158
			pipe_config->pipe_bpp = 30;
8159
			break;
8160
		default:
8161
			break;
8162
		}
8163
	}
8164
 
5060 serge 8165
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
8166
		pipe_config->limited_color_range = true;
8167
 
4560 Serge 8168
	if (INTEL_INFO(dev)->gen < 4)
8169
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8170
 
4104 Serge 8171
	intel_get_pipe_timings(crtc, pipe_config);
8172
 
8173
	i9xx_get_pfit_config(crtc, pipe_config);
8174
 
8175
	if (INTEL_INFO(dev)->gen >= 4) {
8176
		tmp = I915_READ(DPLL_MD(crtc->pipe));
8177
		pipe_config->pixel_multiplier =
8178
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8179
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8180
		pipe_config->dpll_hw_state.dpll_md = tmp;
8181
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8182
		tmp = I915_READ(DPLL(crtc->pipe));
8183
		pipe_config->pixel_multiplier =
8184
			((tmp & SDVO_MULTIPLIER_MASK)
8185
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8186
	} else {
8187
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8188
		 * port and will be fixed up in the encoder->get_config
8189
		 * function. */
8190
		pipe_config->pixel_multiplier = 1;
8191
	}
8192
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8193
	if (!IS_VALLEYVIEW(dev)) {
5354 serge 8194
		/*
8195
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8196
		 * on 830. Filter it out here so that we don't
8197
		 * report errors due to that.
8198
		 */
8199
		if (IS_I830(dev))
8200
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8201
 
4104 Serge 8202
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8203
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8204
	} else {
8205
		/* Mask out read-only status bits. */
8206
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8207
						     DPLL_PORTC_READY_MASK |
8208
						     DPLL_PORTB_READY_MASK);
8209
	}
8210
 
5060 serge 8211
	if (IS_CHERRYVIEW(dev))
8212
		chv_crtc_clock_get(crtc, pipe_config);
8213
	else if (IS_VALLEYVIEW(dev))
4560 Serge 8214
		vlv_crtc_clock_get(crtc, pipe_config);
8215
	else
8216
		i9xx_crtc_clock_get(crtc, pipe_config);
8217
 
6084 serge 8218
	/*
8219
	 * Normally the dotclock is filled in by the encoder .get_config()
8220
	 * but in case the pipe is enabled w/o any ports we need a sane
8221
	 * default.
8222
	 */
8223
	pipe_config->base.adjusted_mode.crtc_clock =
8224
		pipe_config->port_clock / pipe_config->pixel_multiplier;
8225
 
3746 Serge 8226
	return true;
8227
}
8228
 
3243 Serge 8229
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 8230
{
8231
	struct drm_i915_private *dev_priv = dev->dev_private;
8232
	struct intel_encoder *encoder;
6660 serge 8233
	int i;
3746 Serge 8234
	u32 val, final;
2327 Serge 8235
	bool has_lvds = false;
2342 Serge 8236
	bool has_cpu_edp = false;
8237
	bool has_panel = false;
8238
	bool has_ck505 = false;
8239
	bool can_ssc = false;
6660 serge 8240
	bool using_ssc_source = false;
2327 Serge 8241
 
8242
	/* We need to take the global config into account */
5354 serge 8243
	for_each_intel_encoder(dev, encoder) {
6084 serge 8244
		switch (encoder->type) {
8245
		case INTEL_OUTPUT_LVDS:
2342 Serge 8246
			has_panel = true;
6084 serge 8247
			has_lvds = true;
2342 Serge 8248
			break;
6084 serge 8249
		case INTEL_OUTPUT_EDP:
2342 Serge 8250
			has_panel = true;
4104 Serge 8251
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 8252
				has_cpu_edp = true;
6084 serge 8253
			break;
5354 serge 8254
		default:
8255
			break;
2327 Serge 8256
		}
6084 serge 8257
	}
2342 Serge 8258
 
8259
	if (HAS_PCH_IBX(dev)) {
4104 Serge 8260
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 8261
		can_ssc = has_ck505;
8262
	} else {
8263
		has_ck505 = false;
8264
		can_ssc = true;
2327 Serge 8265
	}
8266
 
6660 serge 8267
	/* Check if any DPLLs are using the SSC source */
8268
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8269
		u32 temp = I915_READ(PCH_DPLL(i));
2342 Serge 8270
 
6660 serge 8271
		if (!(temp & DPLL_VCO_ENABLE))
8272
			continue;
8273
 
8274
		if ((temp & PLL_REF_INPUT_MASK) ==
8275
		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8276
			using_ssc_source = true;
8277
			break;
8278
		}
8279
	}
8280
 
8281
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8282
		      has_panel, has_lvds, has_ck505, using_ssc_source);
8283
 
2327 Serge 8284
	/* Ironlake: try to setup display ref clock before DPLL
8285
	 * enabling. This is only under driver's control after
8286
	 * PCH B stepping, previous chipset stepping should be
8287
	 * ignoring this setting.
8288
	 */
3746 Serge 8289
	val = I915_READ(PCH_DREF_CONTROL);
8290
 
8291
	/* As we must carefully and slowly disable/enable each source in turn,
8292
	 * compute the final state we want first and check if we need to
8293
	 * make any changes at all.
8294
	 */
8295
	final = val;
8296
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8297
	if (has_ck505)
8298
		final |= DREF_NONSPREAD_CK505_ENABLE;
8299
	else
8300
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8301
 
8302
	final &= ~DREF_SSC_SOURCE_MASK;
8303
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8304
	final &= ~DREF_SSC1_ENABLE;
8305
 
8306
	if (has_panel) {
8307
		final |= DREF_SSC_SOURCE_ENABLE;
8308
 
8309
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8310
			final |= DREF_SSC1_ENABLE;
8311
 
8312
		if (has_cpu_edp) {
8313
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8314
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8315
			else
8316
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8317
		} else
8318
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6660 serge 8319
	} else if (using_ssc_source) {
8320
		final |= DREF_SSC_SOURCE_ENABLE;
8321
		final |= DREF_SSC1_ENABLE;
3746 Serge 8322
	}
8323
 
8324
	if (final == val)
8325
		return;
8326
 
2327 Serge 8327
	/* Always enable nonspread source */
3746 Serge 8328
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 8329
 
8330
	if (has_ck505)
3746 Serge 8331
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 8332
	else
3746 Serge 8333
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 8334
 
8335
	if (has_panel) {
3746 Serge 8336
		val &= ~DREF_SSC_SOURCE_MASK;
8337
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 8338
 
2342 Serge 8339
		/* SSC must be turned on before enabling the CPU output  */
8340
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8341
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 8342
			val |= DREF_SSC1_ENABLE;
3031 serge 8343
		} else
3746 Serge 8344
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 8345
 
2342 Serge 8346
		/* Get SSC going before enabling the outputs */
3746 Serge 8347
		I915_WRITE(PCH_DREF_CONTROL, val);
6084 serge 8348
		POSTING_READ(PCH_DREF_CONTROL);
8349
		udelay(200);
2342 Serge 8350
 
3746 Serge 8351
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 8352
 
8353
		/* Enable CPU source on CPU attached eDP */
2342 Serge 8354
		if (has_cpu_edp) {
8355
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8356
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 8357
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5060 serge 8358
			} else
3746 Serge 8359
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 8360
		} else
3746 Serge 8361
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 8362
 
3746 Serge 8363
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 8364
		POSTING_READ(PCH_DREF_CONTROL);
8365
		udelay(200);
6084 serge 8366
	} else {
6660 serge 8367
		DRM_DEBUG_KMS("Disabling CPU source output\n");
2342 Serge 8368
 
3746 Serge 8369
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 8370
 
8371
		/* Turn off CPU output */
3746 Serge 8372
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 8373
 
3746 Serge 8374
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 8375
		POSTING_READ(PCH_DREF_CONTROL);
8376
		udelay(200);
2342 Serge 8377
 
6660 serge 8378
		if (!using_ssc_source) {
8379
			DRM_DEBUG_KMS("Disabling SSC source\n");
8380
 
2342 Serge 8381
		/* Turn off the SSC source */
3746 Serge 8382
		val &= ~DREF_SSC_SOURCE_MASK;
8383
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 8384
 
8385
		/* Turn off SSC1 */
3746 Serge 8386
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 8387
 
3746 Serge 8388
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 8389
		POSTING_READ(PCH_DREF_CONTROL);
8390
		udelay(200);
2327 Serge 8391
	}
6660 serge 8392
	}
3746 Serge 8393
 
8394
	BUG_ON(val != final);
2327 Serge 8395
}
8396
 
4104 Serge 8397
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 8398
{
4104 Serge 8399
	uint32_t tmp;
3243 Serge 8400
 
6084 serge 8401
	tmp = I915_READ(SOUTH_CHICKEN2);
8402
	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8403
	I915_WRITE(SOUTH_CHICKEN2, tmp);
3243 Serge 8404
 
6084 serge 8405
	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8406
			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8407
		DRM_ERROR("FDI mPHY reset assert timeout\n");
3243 Serge 8408
 
6084 serge 8409
	tmp = I915_READ(SOUTH_CHICKEN2);
8410
	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8411
	I915_WRITE(SOUTH_CHICKEN2, tmp);
3243 Serge 8412
 
6084 serge 8413
	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 8414
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6084 serge 8415
		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 8416
}
3243 Serge 8417
 
4104 Serge 8418
/* WaMPhyProgramming:hsw */
8419
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8420
{
8421
	uint32_t tmp;
8422
 
3243 Serge 8423
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8424
	tmp &= ~(0xFF << 24);
8425
	tmp |= (0x12 << 24);
8426
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8427
 
8428
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8429
	tmp |= (1 << 11);
8430
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8431
 
8432
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8433
	tmp |= (1 << 11);
8434
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8435
 
8436
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8437
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8438
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8439
 
8440
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8441
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8442
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8443
 
6084 serge 8444
	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8445
	tmp &= ~(7 << 13);
8446
	tmp |= (5 << 13);
8447
	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
3243 Serge 8448
 
6084 serge 8449
	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8450
	tmp &= ~(7 << 13);
8451
	tmp |= (5 << 13);
8452
	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
3243 Serge 8453
 
8454
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8455
	tmp &= ~0xFF;
8456
	tmp |= 0x1C;
8457
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8458
 
8459
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8460
	tmp &= ~0xFF;
8461
	tmp |= 0x1C;
8462
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8463
 
8464
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8465
	tmp &= ~(0xFF << 16);
8466
	tmp |= (0x1C << 16);
8467
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8468
 
8469
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8470
	tmp &= ~(0xFF << 16);
8471
	tmp |= (0x1C << 16);
8472
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8473
 
6084 serge 8474
	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8475
	tmp |= (1 << 27);
8476
	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
3243 Serge 8477
 
6084 serge 8478
	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8479
	tmp |= (1 << 27);
8480
	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
3243 Serge 8481
 
6084 serge 8482
	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8483
	tmp &= ~(0xF << 28);
8484
	tmp |= (4 << 28);
8485
	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
3243 Serge 8486
 
6084 serge 8487
	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8488
	tmp &= ~(0xF << 28);
8489
	tmp |= (4 << 28);
8490
	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 8491
}
3243 Serge 8492
 
4104 Serge 8493
/* Implements 3 different sequences from BSpec chapter "Display iCLK
8494
 * Programming" based on the parameters passed:
8495
 * - Sequence to enable CLKOUT_DP
8496
 * - Sequence to enable CLKOUT_DP without spread
8497
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8498
 */
8499
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8500
				 bool with_fdi)
8501
{
8502
	struct drm_i915_private *dev_priv = dev->dev_private;
8503
	uint32_t reg, tmp;
3480 Serge 8504
 
4104 Serge 8505
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8506
		with_spread = true;
6084 serge 8507
	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
4104 Serge 8508
		with_fdi = false;
8509
 
6084 serge 8510
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 8511
 
8512
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8513
	tmp &= ~SBI_SSCCTL_DISABLE;
8514
	tmp |= SBI_SSCCTL_PATHALT;
8515
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8516
 
8517
	udelay(24);
8518
 
8519
	if (with_spread) {
8520
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8521
		tmp &= ~SBI_SSCCTL_PATHALT;
8522
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8523
 
8524
		if (with_fdi) {
8525
			lpt_reset_fdi_mphy(dev_priv);
8526
			lpt_program_fdi_mphy(dev_priv);
8527
		}
8528
	}
8529
 
6084 serge 8530
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
4104 Serge 8531
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8532
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8533
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8534
 
6084 serge 8535
	mutex_unlock(&dev_priv->sb_lock);
3243 Serge 8536
}
8537
 
4104 Serge 8538
/* Sequence to disable CLKOUT_DP */
8539
static void lpt_disable_clkout_dp(struct drm_device *dev)
8540
{
8541
	struct drm_i915_private *dev_priv = dev->dev_private;
8542
	uint32_t reg, tmp;
8543
 
6084 serge 8544
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 8545
 
6084 serge 8546
	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
4104 Serge 8547
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8548
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8549
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8550
 
8551
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8552
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8553
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8554
			tmp |= SBI_SSCCTL_PATHALT;
8555
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8556
			udelay(32);
8557
		}
8558
		tmp |= SBI_SSCCTL_DISABLE;
8559
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8560
	}
8561
 
6084 serge 8562
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 8563
}
8564
 
8565
static void lpt_init_pch_refclk(struct drm_device *dev)
8566
{
8567
	struct intel_encoder *encoder;
8568
	bool has_vga = false;
8569
 
5354 serge 8570
	for_each_intel_encoder(dev, encoder) {
4104 Serge 8571
		switch (encoder->type) {
8572
		case INTEL_OUTPUT_ANALOG:
8573
			has_vga = true;
8574
			break;
5354 serge 8575
		default:
8576
			break;
4104 Serge 8577
		}
8578
	}
8579
 
8580
	if (has_vga)
8581
		lpt_enable_clkout_dp(dev, true, true);
8582
	else
8583
		lpt_disable_clkout_dp(dev);
8584
}
8585
 
3243 Serge 8586
/*
8587
 * Initialize reference clocks when the driver loads
8588
 */
8589
void intel_init_pch_refclk(struct drm_device *dev)
8590
{
8591
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8592
		ironlake_init_pch_refclk(dev);
8593
	else if (HAS_PCH_LPT(dev))
8594
		lpt_init_pch_refclk(dev);
8595
}
8596
 
6084 serge 8597
static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
2342 Serge 8598
{
6084 serge 8599
	struct drm_device *dev = crtc_state->base.crtc->dev;
2342 Serge 8600
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 8601
	struct drm_atomic_state *state = crtc_state->base.state;
8602
	struct drm_connector *connector;
8603
	struct drm_connector_state *connector_state;
2342 Serge 8604
	struct intel_encoder *encoder;
6084 serge 8605
	int num_connectors = 0, i;
2342 Serge 8606
	bool is_lvds = false;
8607
 
6084 serge 8608
	for_each_connector_in_state(state, connector, connector_state, i) {
8609
		if (connector_state->crtc != crtc_state->base.crtc)
5354 serge 8610
			continue;
8611
 
6084 serge 8612
		encoder = to_intel_encoder(connector_state->best_encoder);
8613
 
2342 Serge 8614
		switch (encoder->type) {
8615
		case INTEL_OUTPUT_LVDS:
8616
			is_lvds = true;
8617
			break;
5354 serge 8618
		default:
8619
			break;
2342 Serge 8620
		}
8621
		num_connectors++;
8622
	}
8623
 
8624
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 8625
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 8626
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 8627
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 8628
	}
8629
 
8630
	return 120000;
8631
}
8632
 
4104 Serge 8633
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 8634
{
8635
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8636
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8637
	int pipe = intel_crtc->pipe;
8638
	uint32_t val;
8639
 
4104 Serge 8640
	val = 0;
3031 serge 8641
 
6084 serge 8642
	switch (intel_crtc->config->pipe_bpp) {
3031 serge 8643
	case 18:
3480 Serge 8644
		val |= PIPECONF_6BPC;
3031 serge 8645
		break;
8646
	case 24:
3480 Serge 8647
		val |= PIPECONF_8BPC;
3031 serge 8648
		break;
8649
	case 30:
3480 Serge 8650
		val |= PIPECONF_10BPC;
3031 serge 8651
		break;
8652
	case 36:
3480 Serge 8653
		val |= PIPECONF_12BPC;
3031 serge 8654
		break;
8655
	default:
3243 Serge 8656
		/* Case prevented by intel_choose_pipe_bpp_dither. */
8657
		BUG();
3031 serge 8658
	}
8659
 
6084 serge 8660
	if (intel_crtc->config->dither)
3031 serge 8661
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8662
 
6084 serge 8663
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 8664
		val |= PIPECONF_INTERLACED_ILK;
8665
	else
8666
		val |= PIPECONF_PROGRESSIVE;
8667
 
6084 serge 8668
	if (intel_crtc->config->limited_color_range)
3480 Serge 8669
		val |= PIPECONF_COLOR_RANGE_SELECT;
8670
 
3031 serge 8671
	I915_WRITE(PIPECONF(pipe), val);
8672
	POSTING_READ(PIPECONF(pipe));
8673
}
8674
 
3480 Serge 8675
/*
8676
 * Set up the pipe CSC unit.
8677
 *
8678
 * Currently only full range RGB to limited range RGB conversion
8679
 * is supported, but eventually this should handle various
8680
 * RGB<->YCbCr scenarios as well.
8681
 */
3746 Serge 8682
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 8683
{
8684
	struct drm_device *dev = crtc->dev;
8685
	struct drm_i915_private *dev_priv = dev->dev_private;
8686
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8687
	int pipe = intel_crtc->pipe;
8688
	uint16_t coeff = 0x7800; /* 1.0 */
8689
 
8690
	/*
8691
	 * TODO: Check what kind of values actually come out of the pipe
8692
	 * with these coeff/postoff values and adjust to get the best
8693
	 * accuracy. Perhaps we even need to take the bpc value into
8694
	 * consideration.
8695
	 */
8696
 
6084 serge 8697
	if (intel_crtc->config->limited_color_range)
3480 Serge 8698
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8699
 
8700
	/*
8701
	 * GY/GU and RY/RU should be the other way around according
8702
	 * to BSpec, but reality doesn't agree. Just set them up in
8703
	 * a way that results in the correct picture.
8704
	 */
8705
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8706
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8707
 
8708
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8709
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8710
 
8711
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8712
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8713
 
8714
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8715
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8716
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8717
 
8718
	if (INTEL_INFO(dev)->gen > 6) {
8719
		uint16_t postoff = 0;
8720
 
6084 serge 8721
		if (intel_crtc->config->limited_color_range)
4398 Serge 8722
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 8723
 
8724
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8725
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8726
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8727
 
8728
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8729
	} else {
8730
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
8731
 
6084 serge 8732
		if (intel_crtc->config->limited_color_range)
3480 Serge 8733
			mode |= CSC_BLACK_SCREEN_OFFSET;
8734
 
8735
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8736
	}
8737
}
8738
 
4104 Serge 8739
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 8740
{
4560 Serge 8741
	struct drm_device *dev = crtc->dev;
8742
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 8743
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 8744
	enum pipe pipe = intel_crtc->pipe;
6084 serge 8745
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
3243 Serge 8746
	uint32_t val;
8747
 
4104 Serge 8748
	val = 0;
3243 Serge 8749
 
6084 serge 8750
	if (IS_HASWELL(dev) && intel_crtc->config->dither)
3243 Serge 8751
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8752
 
6084 serge 8753
	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 8754
		val |= PIPECONF_INTERLACED_ILK;
8755
	else
8756
		val |= PIPECONF_PROGRESSIVE;
8757
 
8758
	I915_WRITE(PIPECONF(cpu_transcoder), val);
8759
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 8760
 
8761
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8762
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 8763
 
5354 serge 8764
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
4560 Serge 8765
		val = 0;
8766
 
6084 serge 8767
		switch (intel_crtc->config->pipe_bpp) {
4560 Serge 8768
		case 18:
8769
			val |= PIPEMISC_DITHER_6_BPC;
8770
			break;
8771
		case 24:
8772
			val |= PIPEMISC_DITHER_8_BPC;
8773
			break;
8774
		case 30:
8775
			val |= PIPEMISC_DITHER_10_BPC;
8776
			break;
8777
		case 36:
8778
			val |= PIPEMISC_DITHER_12_BPC;
8779
			break;
8780
		default:
8781
			/* Case prevented by pipe_config_set_bpp. */
8782
			BUG();
8783
		}
8784
 
6084 serge 8785
		if (intel_crtc->config->dither)
4560 Serge 8786
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8787
 
8788
		I915_WRITE(PIPEMISC(pipe), val);
8789
	}
3243 Serge 8790
}
8791
 
3031 serge 8792
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6084 serge 8793
				    struct intel_crtc_state *crtc_state,
3031 serge 8794
				    intel_clock_t *clock,
8795
				    bool *has_reduced_clock,
8796
				    intel_clock_t *reduced_clock)
8797
{
8798
	struct drm_device *dev = crtc->dev;
8799
	struct drm_i915_private *dev_priv = dev->dev_private;
8800
	int refclk;
8801
	const intel_limit_t *limit;
6084 serge 8802
	bool ret;
3031 serge 8803
 
6084 serge 8804
	refclk = ironlake_get_refclk(crtc_state);
3031 serge 8805
 
8806
	/*
8807
	 * Returns a set of divisors for the desired target clock with the given
8808
	 * refclk, or FALSE.  The returned values represent the clock equation:
8809
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8810
	 */
6084 serge 8811
	limit = intel_limit(crtc_state, refclk);
8812
	ret = dev_priv->display.find_dpll(limit, crtc_state,
8813
					  crtc_state->port_clock,
4104 Serge 8814
					  refclk, NULL, clock);
3031 serge 8815
	if (!ret)
8816
		return false;
8817
 
8818
	return true;
8819
}
8820
 
3243 Serge 8821
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8822
{
8823
	/*
8824
	 * Account for spread spectrum to avoid
8825
	 * oversubscribing the link. Max center spread
8826
	 * is 2.5%; use 5% for safety's sake.
8827
	 */
8828
	u32 bps = target_clock * bpp * 21 / 20;
5060 serge 8829
	return DIV_ROUND_UP(bps, link_bw * 8);
3243 Serge 8830
}
8831
 
4104 Serge 8832
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 8833
{
4104 Serge 8834
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 8835
}
8836
 
3243 Serge 8837
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6084 serge 8838
				      struct intel_crtc_state *crtc_state,
4104 Serge 8839
				      u32 *fp,
3746 Serge 8840
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 8841
{
8842
	struct drm_crtc *crtc = &intel_crtc->base;
8843
	struct drm_device *dev = crtc->dev;
8844
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 8845
	struct drm_atomic_state *state = crtc_state->base.state;
8846
	struct drm_connector *connector;
8847
	struct drm_connector_state *connector_state;
8848
	struct intel_encoder *encoder;
3243 Serge 8849
	uint32_t dpll;
6084 serge 8850
	int factor, num_connectors = 0, i;
4104 Serge 8851
	bool is_lvds = false, is_sdvo = false;
3243 Serge 8852
 
6084 serge 8853
	for_each_connector_in_state(state, connector, connector_state, i) {
8854
		if (connector_state->crtc != crtc_state->base.crtc)
5354 serge 8855
			continue;
8856
 
6084 serge 8857
		encoder = to_intel_encoder(connector_state->best_encoder);
8858
 
8859
		switch (encoder->type) {
3243 Serge 8860
		case INTEL_OUTPUT_LVDS:
8861
			is_lvds = true;
8862
			break;
8863
		case INTEL_OUTPUT_SDVO:
8864
		case INTEL_OUTPUT_HDMI:
8865
			is_sdvo = true;
8866
			break;
5354 serge 8867
		default:
8868
			break;
3243 Serge 8869
		}
8870
 
8871
		num_connectors++;
8872
	}
8873
 
6084 serge 8874
	/* Enable autotuning of the PLL clock (if permissible) */
8875
	factor = 21;
8876
	if (is_lvds) {
8877
		if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 8878
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 8879
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6084 serge 8880
			factor = 25;
8881
	} else if (crtc_state->sdvo_tv_clock)
8882
		factor = 20;
2327 Serge 8883
 
6084 serge 8884
	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
3746 Serge 8885
		*fp |= FP_CB_TUNE;
2327 Serge 8886
 
3746 Serge 8887
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8888
		*fp2 |= FP_CB_TUNE;
8889
 
6084 serge 8890
	dpll = 0;
2327 Serge 8891
 
6084 serge 8892
	if (is_lvds)
8893
		dpll |= DPLLB_MODE_LVDS;
8894
	else
8895
		dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 8896
 
6084 serge 8897
	dpll |= (crtc_state->pixel_multiplier - 1)
8898
		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 8899
 
4104 Serge 8900
	if (is_sdvo)
8901
		dpll |= DPLL_SDVO_HIGH_SPEED;
6084 serge 8902
	if (crtc_state->has_dp_encoder)
4104 Serge 8903
		dpll |= DPLL_SDVO_HIGH_SPEED;
8904
 
6084 serge 8905
	/* compute bitmask from p1 value */
8906
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8907
	/* also FPA1 */
8908
	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 8909
 
6084 serge 8910
	switch (crtc_state->dpll.p2) {
8911
	case 5:
8912
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8913
		break;
8914
	case 7:
8915
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8916
		break;
8917
	case 10:
8918
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8919
		break;
8920
	case 14:
8921
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8922
		break;
8923
	}
2327 Serge 8924
 
4104 Serge 8925
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6084 serge 8926
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8927
	else
8928
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 8929
 
4104 Serge 8930
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 8931
}
8932
 
6084 serge 8933
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8934
				       struct intel_crtc_state *crtc_state)
3243 Serge 8935
{
5354 serge 8936
	struct drm_device *dev = crtc->base.dev;
3243 Serge 8937
	intel_clock_t clock, reduced_clock;
4104 Serge 8938
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 8939
	bool ok, has_reduced_clock = false;
3746 Serge 8940
	bool is_lvds = false;
4104 Serge 8941
	struct intel_shared_dpll *pll;
3243 Serge 8942
 
6084 serge 8943
	memset(&crtc_state->dpll_hw_state, 0,
8944
	       sizeof(crtc_state->dpll_hw_state));
8945
 
5354 serge 8946
	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
3243 Serge 8947
 
8948
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
8949
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
8950
 
6084 serge 8951
	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
3243 Serge 8952
				     &has_reduced_clock, &reduced_clock);
6084 serge 8953
	if (!ok && !crtc_state->clock_set) {
3243 Serge 8954
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8955
		return -EINVAL;
8956
	}
3746 Serge 8957
	/* Compat-code for transition, will disappear. */
6084 serge 8958
	if (!crtc_state->clock_set) {
8959
		crtc_state->dpll.n = clock.n;
8960
		crtc_state->dpll.m1 = clock.m1;
8961
		crtc_state->dpll.m2 = clock.m2;
8962
		crtc_state->dpll.p1 = clock.p1;
8963
		crtc_state->dpll.p2 = clock.p2;
3746 Serge 8964
	}
3243 Serge 8965
 
4104 Serge 8966
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6084 serge 8967
	if (crtc_state->has_pch_encoder) {
8968
		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8969
		if (has_reduced_clock)
4104 Serge 8970
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 8971
 
6084 serge 8972
		dpll = ironlake_compute_dpll(crtc, crtc_state,
4104 Serge 8973
					     &fp, &reduced_clock,
5060 serge 8974
					     has_reduced_clock ? &fp2 : NULL);
3243 Serge 8975
 
6084 serge 8976
		crtc_state->dpll_hw_state.dpll = dpll;
8977
		crtc_state->dpll_hw_state.fp0 = fp;
4104 Serge 8978
		if (has_reduced_clock)
6084 serge 8979
			crtc_state->dpll_hw_state.fp1 = fp2;
4104 Serge 8980
		else
6084 serge 8981
			crtc_state->dpll_hw_state.fp1 = fp;
2327 Serge 8982
 
6084 serge 8983
		pll = intel_get_shared_dpll(crtc, crtc_state);
3031 serge 8984
		if (pll == NULL) {
4104 Serge 8985
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5354 serge 8986
					 pipe_name(crtc->pipe));
2342 Serge 8987
			return -EINVAL;
6084 serge 8988
		}
5354 serge 8989
	}
2327 Serge 8990
 
6084 serge 8991
	if (is_lvds && has_reduced_clock)
5354 serge 8992
		crtc->lowfreq_avail = true;
4104 Serge 8993
	else
5354 serge 8994
		crtc->lowfreq_avail = false;
2327 Serge 8995
 
5060 serge 8996
	return 0;
4104 Serge 8997
}
3243 Serge 8998
 
4560 Serge 8999
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9000
					 struct intel_link_m_n *m_n)
4104 Serge 9001
{
9002
	struct drm_device *dev = crtc->base.dev;
9003
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 9004
	enum pipe pipe = crtc->pipe;
4104 Serge 9005
 
4560 Serge 9006
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9007
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9008
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9009
		& ~TU_SIZE_MASK;
9010
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9011
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9012
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9013
}
9014
 
9015
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9016
					 enum transcoder transcoder,
5354 serge 9017
					 struct intel_link_m_n *m_n,
9018
					 struct intel_link_m_n *m2_n2)
4560 Serge 9019
{
9020
	struct drm_device *dev = crtc->base.dev;
9021
	struct drm_i915_private *dev_priv = dev->dev_private;
9022
	enum pipe pipe = crtc->pipe;
9023
 
9024
	if (INTEL_INFO(dev)->gen >= 5) {
9025
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9026
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9027
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6084 serge 9028
			& ~TU_SIZE_MASK;
4560 Serge 9029
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9030
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6084 serge 9031
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5354 serge 9032
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9033
		 * gen < 8) and if DRRS is supported (to make sure the
9034
		 * registers are not unnecessarily read).
9035
		 */
9036
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6084 serge 9037
			crtc->config->has_drrs) {
5354 serge 9038
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9039
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9040
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9041
					& ~TU_SIZE_MASK;
9042
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9043
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9044
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9045
		}
4560 Serge 9046
	} else {
9047
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9048
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9049
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9050
			& ~TU_SIZE_MASK;
9051
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9052
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9053
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9054
	}
3243 Serge 9055
}
9056
 
4560 Serge 9057
void intel_dp_get_m_n(struct intel_crtc *crtc,
6084 serge 9058
		      struct intel_crtc_state *pipe_config)
4560 Serge 9059
{
6084 serge 9060
	if (pipe_config->has_pch_encoder)
4560 Serge 9061
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9062
	else
9063
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 9064
					     &pipe_config->dp_m_n,
9065
					     &pipe_config->dp_m2_n2);
4560 Serge 9066
}
9067
 
9068
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6084 serge 9069
					struct intel_crtc_state *pipe_config)
4560 Serge 9070
{
9071
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 9072
				     &pipe_config->fdi_m_n, NULL);
4560 Serge 9073
}
9074
 
5354 serge 9075
static void skylake_get_pfit_config(struct intel_crtc *crtc,
6084 serge 9076
				    struct intel_crtc_state *pipe_config)
5354 serge 9077
{
9078
	struct drm_device *dev = crtc->base.dev;
9079
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 9080
	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9081
	uint32_t ps_ctrl = 0;
9082
	int id = -1;
9083
	int i;
5354 serge 9084
 
6084 serge 9085
	/* find scaler attached to this pipe */
9086
	for (i = 0; i < crtc->num_scalers; i++) {
9087
		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9088
		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9089
			id = i;
9090
			pipe_config->pch_pfit.enabled = true;
9091
			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9092
			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9093
			break;
9094
		}
9095
	}
5354 serge 9096
 
6084 serge 9097
	scaler_state->scaler_id = id;
9098
	if (id >= 0) {
9099
		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9100
	} else {
9101
		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5354 serge 9102
	}
9103
}
9104
 
6084 serge 9105
static void
9106
skylake_get_initial_plane_config(struct intel_crtc *crtc,
9107
				 struct intel_initial_plane_config *plane_config)
9108
{
9109
	struct drm_device *dev = crtc->base.dev;
9110
	struct drm_i915_private *dev_priv = dev->dev_private;
9111
	u32 val, base, offset, stride_mult, tiling;
9112
	int pipe = crtc->pipe;
9113
	int fourcc, pixel_format;
9114
	unsigned int aligned_height;
9115
	struct drm_framebuffer *fb;
9116
	struct intel_framebuffer *intel_fb;
9117
 
9118
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9119
	if (!intel_fb) {
9120
		DRM_DEBUG_KMS("failed to alloc fb\n");
9121
		return;
9122
	}
9123
 
9124
	fb = &intel_fb->base;
9125
 
9126
	val = I915_READ(PLANE_CTL(pipe, 0));
9127
	if (!(val & PLANE_CTL_ENABLE))
9128
		goto error;
9129
 
9130
	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9131
	fourcc = skl_format_to_fourcc(pixel_format,
9132
				      val & PLANE_CTL_ORDER_RGBX,
9133
				      val & PLANE_CTL_ALPHA_MASK);
9134
	fb->pixel_format = fourcc;
9135
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9136
 
9137
	tiling = val & PLANE_CTL_TILED_MASK;
9138
	switch (tiling) {
9139
	case PLANE_CTL_TILED_LINEAR:
9140
		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9141
		break;
9142
	case PLANE_CTL_TILED_X:
9143
		plane_config->tiling = I915_TILING_X;
9144
		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9145
		break;
9146
	case PLANE_CTL_TILED_Y:
9147
		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9148
		break;
9149
	case PLANE_CTL_TILED_YF:
9150
		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9151
		break;
9152
	default:
9153
		MISSING_CASE(tiling);
9154
		goto error;
9155
	}
9156
 
9157
	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9158
	plane_config->base = base;
9159
 
9160
	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9161
 
9162
	val = I915_READ(PLANE_SIZE(pipe, 0));
9163
	fb->height = ((val >> 16) & 0xfff) + 1;
9164
	fb->width = ((val >> 0) & 0x1fff) + 1;
9165
 
9166
	val = I915_READ(PLANE_STRIDE(pipe, 0));
9167
	stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9168
						fb->pixel_format);
6283 serge 9169
	fb->pitches[0] = (val & 0x3ff) * stride_mult;
6084 serge 9170
 
9171
	aligned_height = intel_fb_align_height(dev, fb->height,
9172
					       fb->pixel_format,
9173
					       fb->modifier[0]);
9174
 
6283 serge 9175
	plane_config->size = fb->pitches[0] * aligned_height;
6084 serge 9176
 
9177
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9178
		      pipe_name(pipe), fb->width, fb->height,
9179
		      fb->bits_per_pixel, base, fb->pitches[0],
9180
		      plane_config->size);
9181
 
9182
	plane_config->fb = intel_fb;
9183
	return;
9184
 
9185
error:
9186
	kfree(fb);
9187
}
9188
 
4104 Serge 9189
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6084 serge 9190
				     struct intel_crtc_state *pipe_config)
4104 Serge 9191
{
9192
	struct drm_device *dev = crtc->base.dev;
9193
	struct drm_i915_private *dev_priv = dev->dev_private;
9194
	uint32_t tmp;
9195
 
9196
	tmp = I915_READ(PF_CTL(crtc->pipe));
9197
 
9198
	if (tmp & PF_ENABLE) {
9199
		pipe_config->pch_pfit.enabled = true;
9200
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9201
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9202
 
9203
		/* We currently do not free assignements of panel fitters on
9204
		 * ivb/hsw (since we don't use the higher upscaling modes which
9205
		 * differentiates them) so just WARN about this case for now. */
9206
		if (IS_GEN7(dev)) {
9207
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9208
				PF_PIPE_SEL_IVB(crtc->pipe));
9209
		}
9210
	}
9211
}
9212
 
6084 serge 9213
static void
9214
ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9215
				  struct intel_initial_plane_config *plane_config)
5060 serge 9216
{
9217
	struct drm_device *dev = crtc->base.dev;
9218
	struct drm_i915_private *dev_priv = dev->dev_private;
9219
	u32 val, base, offset;
6084 serge 9220
	int pipe = crtc->pipe;
5060 serge 9221
	int fourcc, pixel_format;
6084 serge 9222
	unsigned int aligned_height;
9223
	struct drm_framebuffer *fb;
9224
	struct intel_framebuffer *intel_fb;
5060 serge 9225
 
6084 serge 9226
	val = I915_READ(DSPCNTR(pipe));
9227
	if (!(val & DISPLAY_PLANE_ENABLE))
9228
		return;
9229
 
9230
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9231
	if (!intel_fb) {
5060 serge 9232
		DRM_DEBUG_KMS("failed to alloc fb\n");
9233
		return;
9234
	}
9235
 
6084 serge 9236
	fb = &intel_fb->base;
5060 serge 9237
 
6084 serge 9238
	if (INTEL_INFO(dev)->gen >= 4) {
9239
		if (val & DISPPLANE_TILED) {
9240
			plane_config->tiling = I915_TILING_X;
9241
			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9242
		}
9243
	}
5060 serge 9244
 
9245
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6084 serge 9246
	fourcc = i9xx_format_to_fourcc(pixel_format);
9247
	fb->pixel_format = fourcc;
9248
	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
5060 serge 9249
 
6084 serge 9250
	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
5060 serge 9251
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 9252
		offset = I915_READ(DSPOFFSET(pipe));
5060 serge 9253
	} else {
6084 serge 9254
		if (plane_config->tiling)
9255
			offset = I915_READ(DSPTILEOFF(pipe));
5060 serge 9256
		else
6084 serge 9257
			offset = I915_READ(DSPLINOFF(pipe));
5060 serge 9258
	}
9259
	plane_config->base = base;
9260
 
9261
	val = I915_READ(PIPESRC(pipe));
6084 serge 9262
	fb->width = ((val >> 16) & 0xfff) + 1;
9263
	fb->height = ((val >> 0) & 0xfff) + 1;
5060 serge 9264
 
9265
	val = I915_READ(DSPSTRIDE(pipe));
6283 serge 9266
	fb->pitches[0] = val & 0xffffffc0;
5060 serge 9267
 
6084 serge 9268
	aligned_height = intel_fb_align_height(dev, fb->height,
9269
					       fb->pixel_format,
9270
					       fb->modifier[0]);
5060 serge 9271
 
6283 serge 9272
	plane_config->size = fb->pitches[0] * aligned_height;
5060 serge 9273
 
6084 serge 9274
	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9275
		      pipe_name(pipe), fb->width, fb->height,
9276
		      fb->bits_per_pixel, base, fb->pitches[0],
5060 serge 9277
		      plane_config->size);
6084 serge 9278
 
9279
	plane_config->fb = intel_fb;
5060 serge 9280
}
9281
 
3746 Serge 9282
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6084 serge 9283
				     struct intel_crtc_state *pipe_config)
3746 Serge 9284
{
9285
	struct drm_device *dev = crtc->base.dev;
9286
	struct drm_i915_private *dev_priv = dev->dev_private;
9287
	uint32_t tmp;
9288
 
5354 serge 9289
	if (!intel_display_power_is_enabled(dev_priv,
6084 serge 9290
					    POWER_DOMAIN_PIPE(crtc->pipe)))
5060 serge 9291
		return false;
9292
 
4104 Serge 9293
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9294
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9295
 
3746 Serge 9296
	tmp = I915_READ(PIPECONF(crtc->pipe));
9297
	if (!(tmp & PIPECONF_ENABLE))
9298
		return false;
9299
 
4280 Serge 9300
	switch (tmp & PIPECONF_BPC_MASK) {
9301
	case PIPECONF_6BPC:
9302
		pipe_config->pipe_bpp = 18;
9303
		break;
9304
	case PIPECONF_8BPC:
9305
		pipe_config->pipe_bpp = 24;
9306
		break;
9307
	case PIPECONF_10BPC:
9308
		pipe_config->pipe_bpp = 30;
9309
		break;
9310
	case PIPECONF_12BPC:
9311
		pipe_config->pipe_bpp = 36;
9312
		break;
9313
	default:
9314
		break;
9315
	}
9316
 
5060 serge 9317
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9318
		pipe_config->limited_color_range = true;
9319
 
4104 Serge 9320
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9321
		struct intel_shared_dpll *pll;
9322
 
3746 Serge 9323
		pipe_config->has_pch_encoder = true;
9324
 
4104 Serge 9325
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9326
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9327
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9328
 
9329
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9330
 
9331
		if (HAS_PCH_IBX(dev_priv->dev)) {
9332
			pipe_config->shared_dpll =
9333
				(enum intel_dpll_id) crtc->pipe;
9334
		} else {
9335
			tmp = I915_READ(PCH_DPLL_SEL);
9336
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9337
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9338
			else
9339
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9340
		}
9341
 
9342
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9343
 
9344
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9345
					   &pipe_config->dpll_hw_state));
9346
 
9347
		tmp = pipe_config->dpll_hw_state.dpll;
9348
		pipe_config->pixel_multiplier =
9349
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9350
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 9351
 
9352
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 9353
	} else {
9354
		pipe_config->pixel_multiplier = 1;
9355
	}
9356
 
9357
	intel_get_pipe_timings(crtc, pipe_config);
9358
 
9359
	ironlake_get_pfit_config(crtc, pipe_config);
9360
 
3746 Serge 9361
	return true;
9362
}
9363
 
4104 Serge 9364
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9365
{
9366
	struct drm_device *dev = dev_priv->dev;
9367
	struct intel_crtc *crtc;
9368
 
5060 serge 9369
	for_each_intel_crtc(dev, crtc)
6084 serge 9370
		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 9371
		     pipe_name(crtc->pipe));
9372
 
6084 serge 9373
	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9374
	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9375
	I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9376
	I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9377
	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9378
	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4104 Serge 9379
	     "CPU PWM1 enabled\n");
5060 serge 9380
	if (IS_HASWELL(dev))
6084 serge 9381
		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9382
		     "CPU PWM2 enabled\n");
9383
	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4104 Serge 9384
	     "PCH PWM1 enabled\n");
6084 serge 9385
	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4104 Serge 9386
	     "Utility pin enabled\n");
6084 serge 9387
	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
4104 Serge 9388
 
5060 serge 9389
	/*
9390
	 * In theory we can still leave IRQs enabled, as long as only the HPD
9391
	 * interrupts remain enabled. We used to check for that, but since it's
9392
	 * gen-specific and since we only disable LCPLL after we fully disable
9393
	 * the interrupts, the check below should be enough.
9394
	 */
6084 serge 9395
	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4104 Serge 9396
}
9397
 
5060 serge 9398
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9399
{
9400
	struct drm_device *dev = dev_priv->dev;
9401
 
9402
	if (IS_HASWELL(dev))
9403
		return I915_READ(D_COMP_HSW);
9404
	else
9405
		return I915_READ(D_COMP_BDW);
9406
}
9407
 
9408
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9409
{
9410
	struct drm_device *dev = dev_priv->dev;
9411
 
9412
	if (IS_HASWELL(dev)) {
9413
		mutex_lock(&dev_priv->rps.hw_lock);
9414
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9415
					    val))
9416
			DRM_ERROR("Failed to write to D_COMP\n");
9417
		mutex_unlock(&dev_priv->rps.hw_lock);
9418
	} else {
9419
		I915_WRITE(D_COMP_BDW, val);
9420
		POSTING_READ(D_COMP_BDW);
9421
	}
9422
}
9423
 
4104 Serge 9424
/*
9425
 * This function implements pieces of two sequences from BSpec:
9426
 * - Sequence for display software to disable LCPLL
9427
 * - Sequence for display software to allow package C8+
9428
 * The steps implemented here are just the steps that actually touch the LCPLL
9429
 * register. Callers should take care of disabling all the display engine
9430
 * functions, doing the mode unset, fixing interrupts, etc.
9431
 */
4560 Serge 9432
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6084 serge 9433
			      bool switch_to_fclk, bool allow_power_down)
4104 Serge 9434
{
9435
	uint32_t val;
9436
 
9437
	assert_can_disable_lcpll(dev_priv);
9438
 
9439
	val = I915_READ(LCPLL_CTL);
9440
 
9441
	if (switch_to_fclk) {
9442
		val |= LCPLL_CD_SOURCE_FCLK;
9443
		I915_WRITE(LCPLL_CTL, val);
9444
 
9445
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9446
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9447
			DRM_ERROR("Switching to FCLK failed\n");
9448
 
9449
		val = I915_READ(LCPLL_CTL);
9450
	}
9451
 
9452
	val |= LCPLL_PLL_DISABLE;
9453
	I915_WRITE(LCPLL_CTL, val);
9454
	POSTING_READ(LCPLL_CTL);
9455
 
9456
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9457
		DRM_ERROR("LCPLL still locked\n");
9458
 
5060 serge 9459
	val = hsw_read_dcomp(dev_priv);
4104 Serge 9460
	val |= D_COMP_COMP_DISABLE;
5060 serge 9461
	hsw_write_dcomp(dev_priv, val);
9462
	ndelay(100);
4104 Serge 9463
 
5060 serge 9464
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9465
		     1))
4104 Serge 9466
		DRM_ERROR("D_COMP RCOMP still in progress\n");
9467
 
9468
	if (allow_power_down) {
9469
		val = I915_READ(LCPLL_CTL);
9470
		val |= LCPLL_POWER_DOWN_ALLOW;
9471
		I915_WRITE(LCPLL_CTL, val);
9472
		POSTING_READ(LCPLL_CTL);
9473
	}
9474
}
9475
 
9476
/*
9477
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9478
 * source.
9479
 */
4560 Serge 9480
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 9481
{
9482
	uint32_t val;
9483
 
9484
	val = I915_READ(LCPLL_CTL);
9485
 
9486
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9487
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9488
		return;
9489
 
5060 serge 9490
	/*
9491
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9492
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9493
	 */
6084 serge 9494
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4104 Serge 9495
 
9496
	if (val & LCPLL_POWER_DOWN_ALLOW) {
9497
		val &= ~LCPLL_POWER_DOWN_ALLOW;
9498
		I915_WRITE(LCPLL_CTL, val);
9499
		POSTING_READ(LCPLL_CTL);
9500
	}
9501
 
5060 serge 9502
	val = hsw_read_dcomp(dev_priv);
4104 Serge 9503
	val |= D_COMP_COMP_FORCE;
9504
	val &= ~D_COMP_COMP_DISABLE;
5060 serge 9505
	hsw_write_dcomp(dev_priv, val);
4104 Serge 9506
 
9507
	val = I915_READ(LCPLL_CTL);
9508
	val &= ~LCPLL_PLL_DISABLE;
9509
	I915_WRITE(LCPLL_CTL, val);
9510
 
9511
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9512
		DRM_ERROR("LCPLL not locked yet\n");
9513
 
9514
	if (val & LCPLL_CD_SOURCE_FCLK) {
9515
		val = I915_READ(LCPLL_CTL);
9516
		val &= ~LCPLL_CD_SOURCE_FCLK;
9517
		I915_WRITE(LCPLL_CTL, val);
9518
 
9519
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9520
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9521
			DRM_ERROR("Switching back to LCPLL failed\n");
9522
	}
9523
 
6084 serge 9524
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9525
	intel_update_cdclk(dev_priv->dev);
4104 Serge 9526
}
9527
 
5060 serge 9528
/*
9529
 * Package states C8 and deeper are really deep PC states that can only be
9530
 * reached when all the devices on the system allow it, so even if the graphics
9531
 * device allows PC8+, it doesn't mean the system will actually get to these
9532
 * states. Our driver only allows PC8+ when going into runtime PM.
9533
 *
9534
 * The requirements for PC8+ are that all the outputs are disabled, the power
9535
 * well is disabled and most interrupts are disabled, and these are also
9536
 * requirements for runtime PM. When these conditions are met, we manually do
9537
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9538
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9539
 * hang the machine.
9540
 *
9541
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9542
 * the state of some registers, so when we come back from PC8+ we need to
9543
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9544
 * need to take care of the registers kept by RC6. Notice that this happens even
9545
 * if we don't put the device in PCI D3 state (which is what currently happens
9546
 * because of the runtime PM support).
9547
 *
9548
 * For more, read "Display Sequences for Package C8" on the hardware
9549
 * documentation.
9550
 */
9551
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 9552
{
9553
	struct drm_device *dev = dev_priv->dev;
9554
	uint32_t val;
9555
 
9556
	DRM_DEBUG_KMS("Enabling package C8+\n");
9557
 
6084 serge 9558
	if (HAS_PCH_LPT_LP(dev)) {
4104 Serge 9559
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9560
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9561
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9562
	}
9563
 
9564
	lpt_disable_clkout_dp(dev);
9565
	hsw_disable_lcpll(dev_priv, true, true);
9566
}
9567
 
5060 serge 9568
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 9569
{
9570
	struct drm_device *dev = dev_priv->dev;
9571
	uint32_t val;
9572
 
9573
	DRM_DEBUG_KMS("Disabling package C8+\n");
9574
 
9575
	hsw_restore_lcpll(dev_priv);
9576
	lpt_init_pch_refclk(dev);
9577
 
6084 serge 9578
	if (HAS_PCH_LPT_LP(dev)) {
4104 Serge 9579
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9580
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9581
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9582
	}
9583
 
9584
	intel_prepare_ddi(dev);
9585
}
9586
 
6084 serge 9587
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
4104 Serge 9588
{
6084 serge 9589
	struct drm_device *dev = old_state->dev;
9590
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9591
 
9592
	broxton_set_cdclk(dev, req_cdclk);
9593
}
9594
 
9595
/* compute the max rate for new configuration */
9596
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9597
{
9598
	struct intel_crtc *intel_crtc;
9599
	struct intel_crtc_state *crtc_state;
9600
	int max_pixel_rate = 0;
9601
 
9602
	for_each_intel_crtc(state->dev, intel_crtc) {
9603
		int pixel_rate;
9604
 
9605
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9606
		if (IS_ERR(crtc_state))
9607
			return PTR_ERR(crtc_state);
9608
 
9609
		if (!crtc_state->base.enable)
9610
			continue;
9611
 
9612
		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9613
 
9614
		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9615
		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
9616
			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9617
 
9618
		max_pixel_rate = max(max_pixel_rate, pixel_rate);
9619
	}
9620
 
9621
	return max_pixel_rate;
9622
}
9623
 
9624
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9625
{
9626
	struct drm_i915_private *dev_priv = dev->dev_private;
9627
	uint32_t val, data;
9628
	int ret;
9629
 
9630
	if (WARN((I915_READ(LCPLL_CTL) &
9631
		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9632
		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9633
		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9634
		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9635
		 "trying to change cdclk frequency with cdclk not enabled\n"))
9636
		return;
9637
 
9638
	mutex_lock(&dev_priv->rps.hw_lock);
9639
	ret = sandybridge_pcode_write(dev_priv,
9640
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9641
	mutex_unlock(&dev_priv->rps.hw_lock);
9642
	if (ret) {
9643
		DRM_ERROR("failed to inform pcode about cdclk change\n");
9644
		return;
9645
	}
9646
 
9647
	val = I915_READ(LCPLL_CTL);
9648
	val |= LCPLL_CD_SOURCE_FCLK;
9649
	I915_WRITE(LCPLL_CTL, val);
9650
 
9651
	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9652
			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
9653
		DRM_ERROR("Switching to FCLK failed\n");
9654
 
9655
	val = I915_READ(LCPLL_CTL);
9656
	val &= ~LCPLL_CLK_FREQ_MASK;
9657
 
9658
	switch (cdclk) {
9659
	case 450000:
9660
		val |= LCPLL_CLK_FREQ_450;
9661
		data = 0;
9662
		break;
9663
	case 540000:
9664
		val |= LCPLL_CLK_FREQ_54O_BDW;
9665
		data = 1;
9666
		break;
9667
	case 337500:
9668
		val |= LCPLL_CLK_FREQ_337_5_BDW;
9669
		data = 2;
9670
		break;
9671
	case 675000:
9672
		val |= LCPLL_CLK_FREQ_675_BDW;
9673
		data = 3;
9674
		break;
9675
	default:
9676
		WARN(1, "invalid cdclk frequency\n");
9677
		return;
9678
	}
9679
 
9680
	I915_WRITE(LCPLL_CTL, val);
9681
 
9682
	val = I915_READ(LCPLL_CTL);
9683
	val &= ~LCPLL_CD_SOURCE_FCLK;
9684
	I915_WRITE(LCPLL_CTL, val);
9685
 
9686
	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9687
				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9688
		DRM_ERROR("Switching back to LCPLL failed\n");
9689
 
9690
	mutex_lock(&dev_priv->rps.hw_lock);
9691
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9692
	mutex_unlock(&dev_priv->rps.hw_lock);
9693
 
6660 serge 9694
	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9695
 
6084 serge 9696
	intel_update_cdclk(dev);
9697
 
9698
	WARN(cdclk != dev_priv->cdclk_freq,
9699
	     "cdclk requested %d kHz but got %d kHz\n",
9700
	     cdclk, dev_priv->cdclk_freq);
9701
}
9702
 
9703
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9704
{
9705
	struct drm_i915_private *dev_priv = to_i915(state->dev);
9706
	int max_pixclk = ilk_max_pixel_rate(state);
9707
	int cdclk;
9708
 
9709
	/*
9710
	 * FIXME should also account for plane ratio
9711
	 * once 64bpp pixel formats are supported.
9712
	 */
9713
	if (max_pixclk > 540000)
9714
		cdclk = 675000;
9715
	else if (max_pixclk > 450000)
9716
		cdclk = 540000;
9717
	else if (max_pixclk > 337500)
9718
		cdclk = 450000;
9719
	else
9720
		cdclk = 337500;
9721
 
9722
	/*
9723
	 * FIXME move the cdclk caclulation to
9724
	 * compute_config() so we can fail gracegully.
9725
	 */
9726
	if (cdclk > dev_priv->max_cdclk_freq) {
9727
		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9728
			  cdclk, dev_priv->max_cdclk_freq);
9729
		cdclk = dev_priv->max_cdclk_freq;
9730
	}
9731
 
9732
	to_intel_atomic_state(state)->cdclk = cdclk;
9733
 
9734
	return 0;
9735
}
9736
 
9737
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9738
{
9739
	struct drm_device *dev = old_state->dev;
9740
	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9741
 
9742
	broadwell_set_cdclk(dev, req_cdclk);
9743
}
9744
 
9745
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9746
				      struct intel_crtc_state *crtc_state)
9747
{
9748
	if (!intel_ddi_pll_select(crtc, crtc_state))
5354 serge 9749
		return -EINVAL;
9750
 
9751
	crtc->lowfreq_avail = false;
9752
 
9753
	return 0;
4104 Serge 9754
}
9755
 
6084 serge 9756
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9757
				enum port port,
9758
				struct intel_crtc_state *pipe_config)
9759
{
9760
	switch (port) {
9761
	case PORT_A:
9762
		pipe_config->ddi_pll_sel = SKL_DPLL0;
9763
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9764
		break;
9765
	case PORT_B:
9766
		pipe_config->ddi_pll_sel = SKL_DPLL1;
9767
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9768
		break;
9769
	case PORT_C:
9770
		pipe_config->ddi_pll_sel = SKL_DPLL2;
9771
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9772
		break;
9773
	default:
9774
		DRM_ERROR("Incorrect port type\n");
9775
	}
9776
}
9777
 
5354 serge 9778
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9779
				enum port port,
6084 serge 9780
				struct intel_crtc_state *pipe_config)
4104 Serge 9781
{
6084 serge 9782
	u32 temp, dpll_ctl1;
5354 serge 9783
 
9784
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9785
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9786
 
9787
	switch (pipe_config->ddi_pll_sel) {
6084 serge 9788
	case SKL_DPLL0:
9789
		/*
9790
		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9791
		 * of the shared DPLL framework and thus needs to be read out
9792
		 * separately
9793
		 */
9794
		dpll_ctl1 = I915_READ(DPLL_CTRL1);
9795
		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9796
		break;
5354 serge 9797
	case SKL_DPLL1:
9798
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9799
		break;
9800
	case SKL_DPLL2:
9801
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9802
		break;
9803
	case SKL_DPLL3:
9804
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9805
		break;
9806
	}
4104 Serge 9807
}
9808
 
5354 serge 9809
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9810
				enum port port,
6084 serge 9811
				struct intel_crtc_state *pipe_config)
4104 Serge 9812
{
5354 serge 9813
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
4104 Serge 9814
 
5354 serge 9815
	switch (pipe_config->ddi_pll_sel) {
9816
	case PORT_CLK_SEL_WRPLL1:
9817
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9818
		break;
9819
	case PORT_CLK_SEL_WRPLL2:
9820
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9821
		break;
6084 serge 9822
	case PORT_CLK_SEL_SPLL:
9823
		pipe_config->shared_dpll = DPLL_ID_SPLL;
5354 serge 9824
	}
4104 Serge 9825
}
9826
 
5060 serge 9827
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
6084 serge 9828
				       struct intel_crtc_state *pipe_config)
4104 Serge 9829
{
5060 serge 9830
	struct drm_device *dev = crtc->base.dev;
4104 Serge 9831
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 9832
	struct intel_shared_dpll *pll;
9833
	enum port port;
9834
	uint32_t tmp;
4104 Serge 9835
 
5060 serge 9836
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4560 Serge 9837
 
5060 serge 9838
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
4104 Serge 9839
 
5354 serge 9840
	if (IS_SKYLAKE(dev))
9841
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
6084 serge 9842
	else if (IS_BROXTON(dev))
9843
		bxt_get_ddi_pll(dev_priv, port, pipe_config);
5354 serge 9844
	else
9845
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
4104 Serge 9846
 
5060 serge 9847
	if (pipe_config->shared_dpll >= 0) {
9848
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
4560 Serge 9849
 
5060 serge 9850
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
9851
					   &pipe_config->dpll_hw_state));
4104 Serge 9852
	}
9853
 
4560 Serge 9854
	/*
5060 serge 9855
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9856
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9857
	 * the PCH transcoder is on.
4560 Serge 9858
	 */
5354 serge 9859
	if (INTEL_INFO(dev)->gen < 9 &&
9860
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
5060 serge 9861
		pipe_config->has_pch_encoder = true;
4560 Serge 9862
 
5060 serge 9863
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9864
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9865
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3480 Serge 9866
 
5060 serge 9867
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
3480 Serge 9868
	}
4560 Serge 9869
}
9870
 
3746 Serge 9871
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6084 serge 9872
				    struct intel_crtc_state *pipe_config)
3746 Serge 9873
{
9874
	struct drm_device *dev = crtc->base.dev;
9875
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 9876
	enum intel_display_power_domain pfit_domain;
3746 Serge 9877
	uint32_t tmp;
9878
 
5354 serge 9879
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 9880
					 POWER_DOMAIN_PIPE(crtc->pipe)))
9881
		return false;
9882
 
4104 Serge 9883
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9884
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9885
 
9886
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9887
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9888
		enum pipe trans_edp_pipe;
9889
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9890
		default:
9891
			WARN(1, "unknown pipe linked to edp transcoder\n");
9892
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9893
		case TRANS_DDI_EDP_INPUT_A_ON:
9894
			trans_edp_pipe = PIPE_A;
9895
			break;
9896
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9897
			trans_edp_pipe = PIPE_B;
9898
			break;
9899
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9900
			trans_edp_pipe = PIPE_C;
9901
			break;
9902
		}
9903
 
9904
		if (trans_edp_pipe == crtc->pipe)
9905
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9906
	}
9907
 
5354 serge 9908
	if (!intel_display_power_is_enabled(dev_priv,
4104 Serge 9909
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
9910
		return false;
9911
 
9912
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 9913
	if (!(tmp & PIPECONF_ENABLE))
9914
		return false;
9915
 
5060 serge 9916
	haswell_get_ddi_port_state(crtc, pipe_config);
3746 Serge 9917
 
4104 Serge 9918
	intel_get_pipe_timings(crtc, pipe_config);
9919
 
6084 serge 9920
	if (INTEL_INFO(dev)->gen >= 9) {
9921
		skl_init_scalers(dev, crtc, pipe_config);
9922
	}
9923
 
4104 Serge 9924
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
6084 serge 9925
 
9926
	if (INTEL_INFO(dev)->gen >= 9) {
9927
		pipe_config->scaler_state.scaler_id = -1;
9928
		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9929
	}
9930
 
5354 serge 9931
	if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
6084 serge 9932
		if (INTEL_INFO(dev)->gen >= 9)
5354 serge 9933
			skylake_get_pfit_config(crtc, pipe_config);
9934
		else
6084 serge 9935
			ironlake_get_pfit_config(crtc, pipe_config);
5354 serge 9936
	}
4104 Serge 9937
 
4560 Serge 9938
	if (IS_HASWELL(dev))
6084 serge 9939
		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
9940
			(I915_READ(IPS_CTL) & IPS_ENABLE);
4104 Serge 9941
 
5354 serge 9942
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
9943
		pipe_config->pixel_multiplier =
9944
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9945
	} else {
6084 serge 9946
		pipe_config->pixel_multiplier = 1;
4560 Serge 9947
	}
9948
 
2342 Serge 9949
	return true;
9950
}
9951
 
6084 serge 9952
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
2342 Serge 9953
{
5354 serge 9954
	struct drm_device *dev = crtc->dev;
9955
	struct drm_i915_private *dev_priv = dev->dev_private;
9956
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9957
	uint32_t cntl = 0, size = 0;
2342 Serge 9958
 
6084 serge 9959
	if (on) {
9960
		unsigned int width = intel_crtc->base.cursor->state->crtc_w;
9961
		unsigned int height = intel_crtc->base.cursor->state->crtc_h;
5354 serge 9962
		unsigned int stride = roundup_pow_of_two(width) * 4;
2342 Serge 9963
 
5354 serge 9964
		switch (stride) {
9965
		default:
9966
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9967
				  width, stride);
9968
			stride = 256;
9969
			/* fallthrough */
9970
		case 256:
9971
		case 512:
9972
		case 1024:
9973
		case 2048:
9974
			break;
6084 serge 9975
		}
3031 serge 9976
 
5354 serge 9977
		cntl |= CURSOR_ENABLE |
9978
			CURSOR_GAMMA_ENABLE |
9979
			CURSOR_FORMAT_ARGB |
9980
			CURSOR_STRIDE(stride);
3031 serge 9981
 
5354 serge 9982
		size = (height << 12) | width;
2342 Serge 9983
	}
9984
 
5354 serge 9985
	if (intel_crtc->cursor_cntl != 0 &&
9986
	    (intel_crtc->cursor_base != base ||
9987
	     intel_crtc->cursor_size != size ||
9988
	     intel_crtc->cursor_cntl != cntl)) {
9989
		/* On these chipsets we can only modify the base/size/stride
9990
		 * whilst the cursor is disabled.
3031 serge 9991
		 */
6084 serge 9992
		I915_WRITE(CURCNTR(PIPE_A), 0);
9993
		POSTING_READ(CURCNTR(PIPE_A));
9994
		intel_crtc->cursor_cntl = 0;
9995
	}
5060 serge 9996
 
5354 serge 9997
	if (intel_crtc->cursor_base != base) {
6084 serge 9998
		I915_WRITE(CURBASE(PIPE_A), base);
5354 serge 9999
		intel_crtc->cursor_base = base;
5060 serge 10000
	}
2327 Serge 10001
 
5354 serge 10002
	if (intel_crtc->cursor_size != size) {
10003
		I915_WRITE(CURSIZE, size);
10004
		intel_crtc->cursor_size = size;
10005
	}
10006
 
5060 serge 10007
	if (intel_crtc->cursor_cntl != cntl) {
6084 serge 10008
		I915_WRITE(CURCNTR(PIPE_A), cntl);
10009
		POSTING_READ(CURCNTR(PIPE_A));
5060 serge 10010
		intel_crtc->cursor_cntl = cntl;
10011
	}
3031 serge 10012
}
2327 Serge 10013
 
6084 serge 10014
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
3031 serge 10015
{
10016
	struct drm_device *dev = crtc->dev;
10017
	struct drm_i915_private *dev_priv = dev->dev_private;
10018
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10019
	int pipe = intel_crtc->pipe;
6084 serge 10020
	uint32_t cntl = 0;
2327 Serge 10021
 
6084 serge 10022
	if (on) {
5060 serge 10023
		cntl = MCURSOR_GAMMA_ENABLE;
6084 serge 10024
		switch (intel_crtc->base.cursor->state->crtc_w) {
5060 serge 10025
			case 64:
10026
				cntl |= CURSOR_MODE_64_ARGB_AX;
10027
				break;
10028
			case 128:
10029
				cntl |= CURSOR_MODE_128_ARGB_AX;
10030
				break;
10031
			case 256:
10032
				cntl |= CURSOR_MODE_256_ARGB_AX;
10033
				break;
10034
			default:
6084 serge 10035
				MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
5060 serge 10036
				return;
6084 serge 10037
		}
10038
		cntl |= pipe << 28; /* Connect to correct pipe */
2327 Serge 10039
 
6084 serge 10040
		if (HAS_DDI(dev))
3480 Serge 10041
			cntl |= CURSOR_PIPE_CSC_ENABLE;
5354 serge 10042
	}
5060 serge 10043
 
6084 serge 10044
	if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
5354 serge 10045
		cntl |= CURSOR_ROTATE_180;
10046
 
5060 serge 10047
	if (intel_crtc->cursor_cntl != cntl) {
10048
		I915_WRITE(CURCNTR(pipe), cntl);
10049
		POSTING_READ(CURCNTR(pipe));
10050
		intel_crtc->cursor_cntl = cntl;
6084 serge 10051
	}
2327 Serge 10052
 
3031 serge 10053
	/* and commit changes on next vblank */
5060 serge 10054
	I915_WRITE(CURBASE(pipe), base);
10055
	POSTING_READ(CURBASE(pipe));
5354 serge 10056
 
10057
	intel_crtc->cursor_base = base;
3031 serge 10058
}
2327 Serge 10059
 
3031 serge 10060
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5060 serge 10061
void intel_crtc_update_cursor(struct drm_crtc *crtc,
3031 serge 10062
				     bool on)
10063
{
10064
	struct drm_device *dev = crtc->dev;
10065
	struct drm_i915_private *dev_priv = dev->dev_private;
10066
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10067
	int pipe = intel_crtc->pipe;
6084 serge 10068
	struct drm_plane_state *cursor_state = crtc->cursor->state;
10069
	int x = cursor_state->crtc_x;
10070
	int y = cursor_state->crtc_y;
4560 Serge 10071
	u32 base = 0, pos = 0;
2327 Serge 10072
 
6084 serge 10073
	base = intel_crtc->cursor_addr;
2327 Serge 10074
 
6084 serge 10075
	if (x >= intel_crtc->config->pipe_src_w)
10076
		on = false;
2327 Serge 10077
 
6084 serge 10078
	if (y >= intel_crtc->config->pipe_src_h)
10079
		on = false;
2327 Serge 10080
 
3031 serge 10081
	if (x < 0) {
6084 serge 10082
		if (x + cursor_state->crtc_w <= 0)
10083
			on = false;
2327 Serge 10084
 
3031 serge 10085
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10086
		x = -x;
10087
	}
10088
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 10089
 
3031 serge 10090
	if (y < 0) {
6084 serge 10091
		if (y + cursor_state->crtc_h <= 0)
10092
			on = false;
2327 Serge 10093
 
3031 serge 10094
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10095
		y = -y;
10096
	}
10097
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 10098
 
5060 serge 10099
	I915_WRITE(CURPOS(pipe), pos);
10100
 
5354 serge 10101
	/* ILK+ do this automagically */
10102
	if (HAS_GMCH_DISPLAY(dev) &&
6084 serge 10103
	    crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
10104
		base += (cursor_state->crtc_h *
10105
			 cursor_state->crtc_w - 1) * 4;
5354 serge 10106
	}
10107
 
10108
	if (IS_845G(dev) || IS_I865G(dev))
6084 serge 10109
		i845_update_cursor(crtc, base, on);
5060 serge 10110
	else
6084 serge 10111
		i9xx_update_cursor(crtc, base, on);
3031 serge 10112
}
2327 Serge 10113
 
5354 serge 10114
static bool cursor_size_ok(struct drm_device *dev,
10115
			   uint32_t width, uint32_t height)
10116
{
10117
	if (width == 0 || height == 0)
10118
		return false;
10119
 
10120
	/*
10121
	 * 845g/865g are special in that they are only limited by
10122
	 * the width of their cursors, the height is arbitrary up to
10123
	 * the precision of the register. Everything else requires
10124
	 * square cursors, limited to a few power-of-two sizes.
6084 serge 10125
	 */
5354 serge 10126
	if (IS_845G(dev) || IS_I865G(dev)) {
10127
		if ((width & 63) != 0)
10128
			return false;
10129
 
10130
		if (width > (IS_845G(dev) ? 64 : 512))
10131
			return false;
10132
 
10133
		if (height > 1023)
10134
			return false;
10135
	} else {
10136
		switch (width | height) {
10137
		case 256:
10138
		case 128:
10139
			if (IS_GEN2(dev))
10140
				return false;
10141
		case 64:
10142
			break;
10143
		default:
10144
			return false;
10145
		}
10146
	}
10147
 
10148
	return true;
10149
}
10150
 
2330 Serge 10151
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10152
				 u16 *blue, uint32_t start, uint32_t size)
10153
{
10154
	int end = (start + size > 256) ? 256 : start + size, i;
10155
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 10156
 
2330 Serge 10157
	for (i = start; i < end; i++) {
10158
		intel_crtc->lut_r[i] = red[i] >> 8;
10159
		intel_crtc->lut_g[i] = green[i] >> 8;
10160
		intel_crtc->lut_b[i] = blue[i] >> 8;
10161
	}
2327 Serge 10162
 
2330 Serge 10163
	intel_crtc_load_lut(crtc);
10164
}
2327 Serge 10165
 
2330 Serge 10166
/* VESA 640x480x72Hz mode to set on the pipe */
10167
static struct drm_display_mode load_detect_mode = {
10168
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10169
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10170
};
2327 Serge 10171
 
4560 Serge 10172
struct drm_framebuffer *
5060 serge 10173
__intel_framebuffer_create(struct drm_device *dev,
6084 serge 10174
			   struct drm_mode_fb_cmd2 *mode_cmd,
10175
			   struct drm_i915_gem_object *obj)
3031 serge 10176
{
10177
	struct intel_framebuffer *intel_fb;
10178
	int ret;
2327 Serge 10179
 
3031 serge 10180
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10181
	if (!intel_fb) {
5354 serge 10182
		drm_gem_object_unreference(&obj->base);
3031 serge 10183
		return ERR_PTR(-ENOMEM);
10184
	}
2327 Serge 10185
 
3031 serge 10186
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 10187
	if (ret)
10188
		goto err;
10189
 
10190
	return &intel_fb->base;
10191
err:
5354 serge 10192
	drm_gem_object_unreference(&obj->base);
6084 serge 10193
	kfree(intel_fb);
4560 Serge 10194
 
6084 serge 10195
	return ERR_PTR(ret);
3031 serge 10196
}
2327 Serge 10197
 
5060 serge 10198
static struct drm_framebuffer *
10199
intel_framebuffer_create(struct drm_device *dev,
10200
			 struct drm_mode_fb_cmd2 *mode_cmd,
10201
			 struct drm_i915_gem_object *obj)
10202
{
10203
	struct drm_framebuffer *fb;
10204
	int ret;
10205
 
10206
	ret = i915_mutex_lock_interruptible(dev);
10207
	if (ret)
10208
		return ERR_PTR(ret);
10209
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10210
	mutex_unlock(&dev->struct_mutex);
10211
 
10212
	return fb;
10213
}
10214
 
2330 Serge 10215
static u32
10216
intel_framebuffer_pitch_for_width(int width, int bpp)
10217
{
10218
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10219
	return ALIGN(pitch, 64);
10220
}
2327 Serge 10221
 
2330 Serge 10222
static u32
10223
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10224
{
10225
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5060 serge 10226
	return PAGE_ALIGN(pitch * mode->vdisplay);
2330 Serge 10227
}
2327 Serge 10228
 
2330 Serge 10229
static struct drm_framebuffer *
10230
intel_framebuffer_create_for_mode(struct drm_device *dev,
10231
				  struct drm_display_mode *mode,
10232
				  int depth, int bpp)
10233
{
10234
	struct drm_i915_gem_object *obj;
3243 Serge 10235
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 10236
 
5060 serge 10237
	obj = i915_gem_alloc_object(dev,
10238
				    intel_framebuffer_size_for_mode(mode, bpp));
10239
	if (obj == NULL)
10240
		return ERR_PTR(-ENOMEM);
10241
 
10242
	mode_cmd.width = mode->hdisplay;
10243
	mode_cmd.height = mode->vdisplay;
10244
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10245
								bpp);
10246
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10247
 
10248
	return intel_framebuffer_create(dev, &mode_cmd, obj);
2330 Serge 10249
}
2327 Serge 10250
 
2330 Serge 10251
static struct drm_framebuffer *
10252
mode_fits_in_fbdev(struct drm_device *dev,
10253
		   struct drm_display_mode *mode)
10254
{
6084 serge 10255
#ifdef CONFIG_DRM_FBDEV_EMULATION
2330 Serge 10256
	struct drm_i915_private *dev_priv = dev->dev_private;
10257
	struct drm_i915_gem_object *obj;
10258
	struct drm_framebuffer *fb;
2327 Serge 10259
 
5060 serge 10260
	if (!dev_priv->fbdev)
4280 Serge 10261
		return NULL;
2327 Serge 10262
 
5060 serge 10263
	if (!dev_priv->fbdev->fb)
2330 Serge 10264
		return NULL;
2327 Serge 10265
 
5060 serge 10266
	obj = dev_priv->fbdev->fb->obj;
10267
	BUG_ON(!obj);
10268
 
10269
	fb = &dev_priv->fbdev->fb->base;
3031 serge 10270
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10271
							       fb->bits_per_pixel))
4280 Serge 10272
		return NULL;
2327 Serge 10273
 
3031 serge 10274
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10275
		return NULL;
10276
 
4280 Serge 10277
	return fb;
4560 Serge 10278
#else
10279
	return NULL;
10280
#endif
2330 Serge 10281
}
2327 Serge 10282
 
6084 serge 10283
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10284
					   struct drm_crtc *crtc,
10285
					   struct drm_display_mode *mode,
10286
					   struct drm_framebuffer *fb,
10287
					   int x, int y)
10288
{
10289
	struct drm_plane_state *plane_state;
10290
	int hdisplay, vdisplay;
10291
	int ret;
10292
 
10293
	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10294
	if (IS_ERR(plane_state))
10295
		return PTR_ERR(plane_state);
10296
 
10297
	if (mode)
10298
		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10299
	else
10300
		hdisplay = vdisplay = 0;
10301
 
10302
	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10303
	if (ret)
10304
		return ret;
10305
	drm_atomic_set_fb_for_plane(plane_state, fb);
10306
	plane_state->crtc_x = 0;
10307
	plane_state->crtc_y = 0;
10308
	plane_state->crtc_w = hdisplay;
10309
	plane_state->crtc_h = vdisplay;
10310
	plane_state->src_x = x << 16;
10311
	plane_state->src_y = y << 16;
10312
	plane_state->src_w = hdisplay << 16;
10313
	plane_state->src_h = vdisplay << 16;
10314
 
10315
	return 0;
10316
}
10317
 
3031 serge 10318
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 10319
				struct drm_display_mode *mode,
5060 serge 10320
				struct intel_load_detect_pipe *old,
10321
				struct drm_modeset_acquire_ctx *ctx)
2330 Serge 10322
{
10323
	struct intel_crtc *intel_crtc;
3031 serge 10324
	struct intel_encoder *intel_encoder =
10325
		intel_attached_encoder(connector);
2330 Serge 10326
	struct drm_crtc *possible_crtc;
10327
	struct drm_encoder *encoder = &intel_encoder->base;
10328
	struct drm_crtc *crtc = NULL;
10329
	struct drm_device *dev = encoder->dev;
3031 serge 10330
	struct drm_framebuffer *fb;
5060 serge 10331
	struct drm_mode_config *config = &dev->mode_config;
6084 serge 10332
	struct drm_atomic_state *state = NULL;
10333
	struct drm_connector_state *connector_state;
10334
	struct intel_crtc_state *crtc_state;
5060 serge 10335
	int ret, i = -1;
2327 Serge 10336
 
2330 Serge 10337
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 10338
		      connector->base.id, connector->name,
10339
		      encoder->base.id, encoder->name);
2327 Serge 10340
 
5060 serge 10341
retry:
10342
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10343
	if (ret)
6084 serge 10344
		goto fail;
5060 serge 10345
 
2330 Serge 10346
	/*
10347
	 * Algorithm gets a little messy:
10348
	 *
10349
	 *   - if the connector already has an assigned crtc, use it (but make
10350
	 *     sure it's on first)
10351
	 *
10352
	 *   - try to find the first unused crtc that can drive this connector,
10353
	 *     and use that if we find one
10354
	 */
2327 Serge 10355
 
2330 Serge 10356
	/* See if we already have a CRTC for this connector */
10357
	if (encoder->crtc) {
10358
		crtc = encoder->crtc;
2327 Serge 10359
 
5060 serge 10360
		ret = drm_modeset_lock(&crtc->mutex, ctx);
10361
		if (ret)
6084 serge 10362
			goto fail;
5354 serge 10363
		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10364
		if (ret)
6084 serge 10365
			goto fail;
3480 Serge 10366
 
3031 serge 10367
		old->dpms_mode = connector->dpms;
2330 Serge 10368
		old->load_detect_temp = false;
2327 Serge 10369
 
2330 Serge 10370
		/* Make sure the crtc and connector are running */
3031 serge 10371
		if (connector->dpms != DRM_MODE_DPMS_ON)
10372
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 10373
 
2330 Serge 10374
		return true;
10375
	}
2327 Serge 10376
 
2330 Serge 10377
	/* Find an unused one (if possible) */
5060 serge 10378
	for_each_crtc(dev, possible_crtc) {
2330 Serge 10379
		i++;
10380
		if (!(encoder->possible_crtcs & (1 << i)))
10381
			continue;
6084 serge 10382
		if (possible_crtc->state->enable)
5060 serge 10383
			continue;
10384
 
6084 serge 10385
		crtc = possible_crtc;
10386
		break;
10387
	}
2327 Serge 10388
 
2330 Serge 10389
	/*
10390
	 * If we didn't find an unused CRTC, don't use any.
10391
	 */
10392
	if (!crtc) {
10393
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6084 serge 10394
		goto fail;
2330 Serge 10395
	}
2327 Serge 10396
 
5060 serge 10397
	ret = drm_modeset_lock(&crtc->mutex, ctx);
10398
	if (ret)
6084 serge 10399
		goto fail;
5354 serge 10400
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10401
	if (ret)
6084 serge 10402
		goto fail;
2327 Serge 10403
 
2330 Serge 10404
	intel_crtc = to_intel_crtc(crtc);
3031 serge 10405
	old->dpms_mode = connector->dpms;
2330 Serge 10406
	old->load_detect_temp = true;
10407
	old->release_fb = NULL;
2327 Serge 10408
 
6084 serge 10409
	state = drm_atomic_state_alloc(dev);
10410
	if (!state)
10411
		return false;
10412
 
10413
	state->acquire_ctx = ctx;
10414
 
10415
	connector_state = drm_atomic_get_connector_state(state, connector);
10416
	if (IS_ERR(connector_state)) {
10417
		ret = PTR_ERR(connector_state);
10418
		goto fail;
10419
	}
10420
 
10421
	connector_state->crtc = crtc;
10422
	connector_state->best_encoder = &intel_encoder->base;
10423
 
10424
	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10425
	if (IS_ERR(crtc_state)) {
10426
		ret = PTR_ERR(crtc_state);
10427
		goto fail;
10428
	}
10429
 
10430
	crtc_state->base.active = crtc_state->base.enable = true;
10431
 
2330 Serge 10432
	if (!mode)
10433
		mode = &load_detect_mode;
2327 Serge 10434
 
2330 Serge 10435
	/* We need a framebuffer large enough to accommodate all accesses
10436
	 * that the plane may generate whilst we perform load detection.
10437
	 * We can not rely on the fbcon either being present (we get called
10438
	 * during its initialisation to detect all boot displays, or it may
10439
	 * not even exist) or that it is large enough to satisfy the
10440
	 * requested mode.
10441
	 */
3031 serge 10442
	fb = mode_fits_in_fbdev(dev, mode);
10443
	if (fb == NULL) {
2330 Serge 10444
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 10445
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10446
		old->release_fb = fb;
2330 Serge 10447
	} else
10448
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 10449
	if (IS_ERR(fb)) {
2330 Serge 10450
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5060 serge 10451
		goto fail;
2330 Serge 10452
	}
2327 Serge 10453
 
6084 serge 10454
	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10455
	if (ret)
10456
		goto fail;
10457
 
10458
	drm_mode_copy(&crtc_state->base.mode, mode);
10459
 
10460
	if (drm_atomic_commit(state)) {
2330 Serge 10461
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10462
		if (old->release_fb)
10463
			old->release_fb->funcs->destroy(old->release_fb);
5060 serge 10464
		goto fail;
2330 Serge 10465
	}
6084 serge 10466
	crtc->primary->crtc = crtc;
2327 Serge 10467
 
2330 Serge 10468
	/* let the connector get through one full cycle before testing */
10469
	intel_wait_for_vblank(dev, intel_crtc->pipe);
10470
	return true;
5060 serge 10471
 
6084 serge 10472
fail:
10473
	drm_atomic_state_free(state);
10474
	state = NULL;
10475
 
5060 serge 10476
	if (ret == -EDEADLK) {
10477
		drm_modeset_backoff(ctx);
10478
		goto retry;
10479
	}
10480
 
10481
	return false;
2330 Serge 10482
}
2327 Serge 10483
 
3031 serge 10484
void intel_release_load_detect_pipe(struct drm_connector *connector,
6084 serge 10485
				    struct intel_load_detect_pipe *old,
10486
				    struct drm_modeset_acquire_ctx *ctx)
2330 Serge 10487
{
6084 serge 10488
	struct drm_device *dev = connector->dev;
3031 serge 10489
	struct intel_encoder *intel_encoder =
10490
		intel_attached_encoder(connector);
2330 Serge 10491
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 10492
	struct drm_crtc *crtc = encoder->crtc;
5060 serge 10493
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 10494
	struct drm_atomic_state *state;
10495
	struct drm_connector_state *connector_state;
10496
	struct intel_crtc_state *crtc_state;
10497
	int ret;
2327 Serge 10498
 
2330 Serge 10499
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 10500
		      connector->base.id, connector->name,
10501
		      encoder->base.id, encoder->name);
2327 Serge 10502
 
2330 Serge 10503
	if (old->load_detect_temp) {
6084 serge 10504
		state = drm_atomic_state_alloc(dev);
10505
		if (!state)
10506
			goto fail;
3031 serge 10507
 
6084 serge 10508
		state->acquire_ctx = ctx;
10509
 
10510
		connector_state = drm_atomic_get_connector_state(state, connector);
10511
		if (IS_ERR(connector_state))
10512
			goto fail;
10513
 
10514
		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10515
		if (IS_ERR(crtc_state))
10516
			goto fail;
10517
 
10518
		connector_state->best_encoder = NULL;
10519
		connector_state->crtc = NULL;
10520
 
10521
		crtc_state->base.enable = crtc_state->base.active = false;
10522
 
10523
		ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10524
						      0, 0);
10525
		if (ret)
10526
			goto fail;
10527
 
10528
		ret = drm_atomic_commit(state);
10529
		if (ret)
10530
			goto fail;
10531
 
3480 Serge 10532
		if (old->release_fb) {
10533
			drm_framebuffer_unregister_private(old->release_fb);
10534
			drm_framebuffer_unreference(old->release_fb);
10535
		}
2327 Serge 10536
 
2330 Serge 10537
		return;
10538
	}
2327 Serge 10539
 
2330 Serge 10540
	/* Switch crtc and encoder back off if necessary */
3031 serge 10541
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
10542
		connector->funcs->dpms(connector, old->dpms_mode);
6084 serge 10543
 
10544
	return;
10545
fail:
10546
	DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10547
	drm_atomic_state_free(state);
2330 Serge 10548
}
2327 Serge 10549
 
4560 Serge 10550
static int i9xx_pll_refclk(struct drm_device *dev,
6084 serge 10551
			   const struct intel_crtc_state *pipe_config)
4560 Serge 10552
{
10553
	struct drm_i915_private *dev_priv = dev->dev_private;
10554
	u32 dpll = pipe_config->dpll_hw_state.dpll;
10555
 
10556
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10557
		return dev_priv->vbt.lvds_ssc_freq;
10558
	else if (HAS_PCH_SPLIT(dev))
10559
		return 120000;
10560
	else if (!IS_GEN2(dev))
10561
		return 96000;
10562
	else
10563
		return 48000;
10564
}
10565
 
2330 Serge 10566
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 10567
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6084 serge 10568
				struct intel_crtc_state *pipe_config)
2330 Serge 10569
{
4104 Serge 10570
	struct drm_device *dev = crtc->base.dev;
2330 Serge 10571
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10572
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 10573
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 10574
	u32 fp;
10575
	intel_clock_t clock;
6084 serge 10576
	int port_clock;
4560 Serge 10577
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 10578
 
2330 Serge 10579
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 10580
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 10581
	else
4560 Serge 10582
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 10583
 
2330 Serge 10584
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10585
	if (IS_PINEVIEW(dev)) {
10586
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10587
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10588
	} else {
10589
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10590
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10591
	}
2327 Serge 10592
 
2330 Serge 10593
	if (!IS_GEN2(dev)) {
10594
		if (IS_PINEVIEW(dev))
10595
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10596
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10597
		else
10598
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10599
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 10600
 
2330 Serge 10601
		switch (dpll & DPLL_MODE_MASK) {
10602
		case DPLLB_MODE_DAC_SERIAL:
10603
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10604
				5 : 10;
10605
			break;
10606
		case DPLLB_MODE_LVDS:
10607
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10608
				7 : 14;
10609
			break;
10610
		default:
10611
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10612
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 10613
			return;
2330 Serge 10614
		}
2327 Serge 10615
 
4104 Serge 10616
		if (IS_PINEVIEW(dev))
6084 serge 10617
			port_clock = pnv_calc_dpll_params(refclk, &clock);
4104 Serge 10618
		else
6084 serge 10619
			port_clock = i9xx_calc_dpll_params(refclk, &clock);
2330 Serge 10620
	} else {
4560 Serge 10621
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10622
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 10623
 
2330 Serge 10624
		if (is_lvds) {
10625
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10626
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 10627
 
10628
			if (lvds & LVDS_CLKB_POWER_UP)
10629
				clock.p2 = 7;
10630
			else
6084 serge 10631
				clock.p2 = 14;
2330 Serge 10632
		} else {
10633
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10634
				clock.p1 = 2;
10635
			else {
10636
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10637
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10638
			}
10639
			if (dpll & PLL_P2_DIVIDE_BY_4)
10640
				clock.p2 = 4;
10641
			else
10642
				clock.p2 = 2;
4560 Serge 10643
		}
2327 Serge 10644
 
6084 serge 10645
		port_clock = i9xx_calc_dpll_params(refclk, &clock);
2330 Serge 10646
	}
2327 Serge 10647
 
4560 Serge 10648
	/*
10649
	 * This value includes pixel_multiplier. We will use
10650
	 * port_clock to compute adjusted_mode.crtc_clock in the
10651
	 * encoder's get_config() function.
10652
	 */
6084 serge 10653
	pipe_config->port_clock = port_clock;
4104 Serge 10654
}
10655
 
4560 Serge 10656
int intel_dotclock_calculate(int link_freq,
10657
			     const struct intel_link_m_n *m_n)
4104 Serge 10658
{
10659
	/*
10660
	 * The calculation for the data clock is:
4560 Serge 10661
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 10662
	 * But we want to avoid losing precison if possible, so:
4560 Serge 10663
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 10664
	 *
10665
	 * and the link clock is simpler:
4560 Serge 10666
	 * link_clock = (m * link_clock) / n
2330 Serge 10667
	 */
2327 Serge 10668
 
4560 Serge 10669
	if (!m_n->link_n)
10670
		return 0;
4104 Serge 10671
 
4560 Serge 10672
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10673
}
4104 Serge 10674
 
4560 Serge 10675
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
6084 serge 10676
				   struct intel_crtc_state *pipe_config)
4560 Serge 10677
{
10678
	struct drm_device *dev = crtc->base.dev;
4104 Serge 10679
 
4560 Serge 10680
	/* read out port_clock from the DPLL */
10681
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 10682
 
4560 Serge 10683
	/*
10684
	 * This value does not include pixel_multiplier.
10685
	 * We will check that port_clock and adjusted_mode.crtc_clock
10686
	 * agree once we know their relationship in the encoder's
10687
	 * get_config() function.
10688
	 */
6084 serge 10689
	pipe_config->base.adjusted_mode.crtc_clock =
4560 Serge 10690
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10691
					 &pipe_config->fdi_m_n);
2330 Serge 10692
}
2327 Serge 10693
 
2330 Serge 10694
/** Returns the currently programmed mode of the given pipe. */
10695
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10696
					     struct drm_crtc *crtc)
10697
{
10698
	struct drm_i915_private *dev_priv = dev->dev_private;
10699
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 10700
	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
2330 Serge 10701
	struct drm_display_mode *mode;
6084 serge 10702
	struct intel_crtc_state pipe_config;
3243 Serge 10703
	int htot = I915_READ(HTOTAL(cpu_transcoder));
10704
	int hsync = I915_READ(HSYNC(cpu_transcoder));
10705
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10706
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 10707
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 10708
 
2330 Serge 10709
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10710
	if (!mode)
10711
		return NULL;
10712
 
4104 Serge 10713
	/*
10714
	 * Construct a pipe_config sufficient for getting the clock info
10715
	 * back out of crtc_clock_get.
10716
	 *
10717
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10718
	 * to use a real value here instead.
10719
	 */
4560 Serge 10720
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
4104 Serge 10721
	pipe_config.pixel_multiplier = 1;
4560 Serge 10722
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10723
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10724
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
4104 Serge 10725
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10726
 
4560 Serge 10727
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
2330 Serge 10728
	mode->hdisplay = (htot & 0xffff) + 1;
10729
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10730
	mode->hsync_start = (hsync & 0xffff) + 1;
10731
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10732
	mode->vdisplay = (vtot & 0xffff) + 1;
10733
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10734
	mode->vsync_start = (vsync & 0xffff) + 1;
10735
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10736
 
10737
	drm_mode_set_name(mode);
10738
 
10739
	return mode;
10740
}
10741
 
3031 serge 10742
void intel_mark_busy(struct drm_device *dev)
10743
{
4104 Serge 10744
	struct drm_i915_private *dev_priv = dev->dev_private;
10745
 
5060 serge 10746
	if (dev_priv->mm.busy)
10747
		return;
10748
 
10749
	intel_runtime_pm_get(dev_priv);
4104 Serge 10750
	i915_update_gfx_val(dev_priv);
6084 serge 10751
	if (INTEL_INFO(dev)->gen >= 6)
10752
		gen6_rps_busy(dev_priv);
5060 serge 10753
	dev_priv->mm.busy = true;
3031 serge 10754
}
2327 Serge 10755
 
3031 serge 10756
void intel_mark_idle(struct drm_device *dev)
10757
{
4104 Serge 10758
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 10759
 
5060 serge 10760
	if (!dev_priv->mm.busy)
3031 serge 10761
		return;
2327 Serge 10762
 
5060 serge 10763
	dev_priv->mm.busy = false;
10764
 
10765
	if (INTEL_INFO(dev)->gen >= 6)
4560 Serge 10766
		gen6_rps_idle(dev->dev_private);
5060 serge 10767
 
10768
	intel_runtime_pm_put(dev_priv);
3031 serge 10769
}
2327 Serge 10770
 
2330 Serge 10771
static void intel_crtc_destroy(struct drm_crtc *crtc)
10772
{
10773
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10774
	struct drm_device *dev = crtc->dev;
10775
	struct intel_unpin_work *work;
2327 Serge 10776
 
5354 serge 10777
	spin_lock_irq(&dev->event_lock);
2330 Serge 10778
	work = intel_crtc->unpin_work;
10779
	intel_crtc->unpin_work = NULL;
5354 serge 10780
	spin_unlock_irq(&dev->event_lock);
2327 Serge 10781
 
2330 Serge 10782
	if (work) {
4293 Serge 10783
		cancel_work_sync(&work->work);
2330 Serge 10784
		kfree(work);
10785
	}
2327 Serge 10786
 
2330 Serge 10787
	drm_crtc_cleanup(crtc);
2327 Serge 10788
 
2330 Serge 10789
	kfree(intel_crtc);
10790
}
2327 Serge 10791
 
3031 serge 10792
static void intel_unpin_work_fn(struct work_struct *__work)
10793
{
10794
	struct intel_unpin_work *work =
10795
		container_of(__work, struct intel_unpin_work, work);
6084 serge 10796
	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10797
	struct drm_device *dev = crtc->base.dev;
10798
	struct drm_plane *primary = crtc->base.primary;
2327 Serge 10799
 
3243 Serge 10800
	mutex_lock(&dev->struct_mutex);
6084 serge 10801
	intel_unpin_fb_obj(work->old_fb, primary->state);
3031 serge 10802
	drm_gem_object_unreference(&work->pending_flip_obj->base);
2327 Serge 10803
 
6084 serge 10804
	if (work->flip_queued_req)
10805
		i915_gem_request_assign(&work->flip_queued_req, NULL);
3243 Serge 10806
	mutex_unlock(&dev->struct_mutex);
10807
 
6084 serge 10808
	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10809
	drm_framebuffer_unreference(work->old_fb);
5354 serge 10810
 
6084 serge 10811
	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10812
	atomic_dec(&crtc->unpin_work_count);
3243 Serge 10813
 
3031 serge 10814
	kfree(work);
10815
}
2327 Serge 10816
 
3031 serge 10817
static void do_intel_finish_page_flip(struct drm_device *dev,
10818
				      struct drm_crtc *crtc)
10819
{
10820
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10821
	struct intel_unpin_work *work;
10822
	unsigned long flags;
2327 Serge 10823
 
3031 serge 10824
	/* Ignore early vblank irqs */
10825
	if (intel_crtc == NULL)
10826
		return;
2327 Serge 10827
 
5354 serge 10828
	/*
10829
	 * This is called both by irq handlers and the reset code (to complete
10830
	 * lost pageflips) so needs the full irqsave spinlocks.
10831
	 */
3031 serge 10832
	spin_lock_irqsave(&dev->event_lock, flags);
10833
	work = intel_crtc->unpin_work;
3243 Serge 10834
 
10835
	/* Ensure we don't miss a work->pending update ... */
10836
	smp_rmb();
10837
 
10838
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 10839
		spin_unlock_irqrestore(&dev->event_lock, flags);
10840
		return;
10841
	}
2327 Serge 10842
 
5354 serge 10843
	page_flip_completed(intel_crtc);
3243 Serge 10844
 
3031 serge 10845
	spin_unlock_irqrestore(&dev->event_lock, flags);
10846
}
2327 Serge 10847
 
3031 serge 10848
void intel_finish_page_flip(struct drm_device *dev, int pipe)
10849
{
5060 serge 10850
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10851
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 10852
 
3031 serge 10853
	do_intel_finish_page_flip(dev, crtc);
10854
}
2327 Serge 10855
 
3031 serge 10856
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10857
{
5060 serge 10858
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10859
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 10860
 
3031 serge 10861
	do_intel_finish_page_flip(dev, crtc);
10862
}
2327 Serge 10863
 
5060 serge 10864
/* Is 'a' after or equal to 'b'? */
10865
static bool g4x_flip_count_after_eq(u32 a, u32 b)
10866
{
10867
	return !((a - b) & 0x80000000);
10868
}
10869
 
10870
static bool page_flip_finished(struct intel_crtc *crtc)
10871
{
10872
	struct drm_device *dev = crtc->base.dev;
10873
	struct drm_i915_private *dev_priv = dev->dev_private;
10874
 
5354 serge 10875
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10876
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10877
		return true;
10878
 
5060 serge 10879
	/*
10880
	 * The relevant registers doen't exist on pre-ctg.
10881
	 * As the flip done interrupt doesn't trigger for mmio
10882
	 * flips on gmch platforms, a flip count check isn't
10883
	 * really needed there. But since ctg has the registers,
10884
	 * include it in the check anyway.
10885
	 */
10886
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10887
		return true;
10888
 
10889
	/*
10890
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10891
	 * used the same base address. In that case the mmio flip might
10892
	 * have completed, but the CS hasn't even executed the flip yet.
10893
	 *
10894
	 * A flip count check isn't enough as the CS might have updated
10895
	 * the base address just after start of vblank, but before we
10896
	 * managed to process the interrupt. This means we'd complete the
10897
	 * CS flip too soon.
10898
	 *
10899
	 * Combining both checks should get us a good enough result. It may
10900
	 * still happen that the CS flip has been executed, but has not
10901
	 * yet actually completed. But in case the base address is the same
10902
	 * anyway, we don't really care.
10903
	 */
10904
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10905
		crtc->unpin_work->gtt_offset &&
6084 serge 10906
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
5060 serge 10907
				    crtc->unpin_work->flip_count);
10908
}
10909
 
3031 serge 10910
void intel_prepare_page_flip(struct drm_device *dev, int plane)
10911
{
5060 serge 10912
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10913
	struct intel_crtc *intel_crtc =
10914
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10915
	unsigned long flags;
2327 Serge 10916
 
5354 serge 10917
 
10918
	/*
10919
	 * This is called both by irq handlers and the reset code (to complete
10920
	 * lost pageflips) so needs the full irqsave spinlocks.
10921
	 *
10922
	 * NB: An MMIO update of the plane base pointer will also
3243 Serge 10923
	 * generate a page-flip completion irq, i.e. every modeset
10924
	 * is also accompanied by a spurious intel_prepare_page_flip().
10925
	 */
3031 serge 10926
	spin_lock_irqsave(&dev->event_lock, flags);
5060 serge 10927
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
3243 Serge 10928
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 10929
	spin_unlock_irqrestore(&dev->event_lock, flags);
10930
}
2327 Serge 10931
 
6084 serge 10932
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
3243 Serge 10933
{
10934
	/* Ensure that the work item is consistent when activating it ... */
10935
	smp_wmb();
6084 serge 10936
	atomic_set(&work->pending, INTEL_FLIP_PENDING);
3243 Serge 10937
	/* and that it is marked active as soon as the irq could fire. */
10938
	smp_wmb();
10939
}
6320 serge 10940
 
3031 serge 10941
static int intel_gen2_queue_flip(struct drm_device *dev,
10942
				 struct drm_crtc *crtc,
10943
				 struct drm_framebuffer *fb,
4104 Serge 10944
				 struct drm_i915_gem_object *obj,
6084 serge 10945
				 struct drm_i915_gem_request *req,
4104 Serge 10946
				 uint32_t flags)
3031 serge 10947
{
6084 serge 10948
	struct intel_engine_cs *ring = req->ring;
3031 serge 10949
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10950
	u32 flip_mask;
10951
	int ret;
2327 Serge 10952
 
6084 serge 10953
	ret = intel_ring_begin(req, 6);
3031 serge 10954
	if (ret)
5060 serge 10955
		return ret;
2327 Serge 10956
 
3031 serge 10957
	/* Can't queue multiple flips, so wait for the previous
10958
	 * one to finish before executing the next.
10959
	 */
10960
	if (intel_crtc->plane)
10961
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10962
	else
10963
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10964
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
10965
	intel_ring_emit(ring, MI_NOOP);
10966
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
10967
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
10968
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 10969
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 10970
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 10971
 
6084 serge 10972
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 10973
	return 0;
10974
}
2327 Serge 10975
 
3031 serge 10976
static int intel_gen3_queue_flip(struct drm_device *dev,
10977
				 struct drm_crtc *crtc,
10978
				 struct drm_framebuffer *fb,
4104 Serge 10979
				 struct drm_i915_gem_object *obj,
6084 serge 10980
				 struct drm_i915_gem_request *req,
4104 Serge 10981
				 uint32_t flags)
3031 serge 10982
{
6084 serge 10983
	struct intel_engine_cs *ring = req->ring;
3031 serge 10984
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10985
	u32 flip_mask;
10986
	int ret;
2327 Serge 10987
 
6084 serge 10988
	ret = intel_ring_begin(req, 6);
3031 serge 10989
	if (ret)
5060 serge 10990
		return ret;
2327 Serge 10991
 
3031 serge 10992
	if (intel_crtc->plane)
10993
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10994
	else
10995
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10996
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
10997
	intel_ring_emit(ring, MI_NOOP);
10998
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
10999
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11000
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11001
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11002
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 11003
 
6084 serge 11004
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11005
	return 0;
11006
}
2327 Serge 11007
 
3031 serge 11008
static int intel_gen4_queue_flip(struct drm_device *dev,
11009
				 struct drm_crtc *crtc,
11010
				 struct drm_framebuffer *fb,
4104 Serge 11011
				 struct drm_i915_gem_object *obj,
6084 serge 11012
				 struct drm_i915_gem_request *req,
4104 Serge 11013
				 uint32_t flags)
3031 serge 11014
{
6084 serge 11015
	struct intel_engine_cs *ring = req->ring;
3031 serge 11016
	struct drm_i915_private *dev_priv = dev->dev_private;
11017
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11018
	uint32_t pf, pipesrc;
11019
	int ret;
2327 Serge 11020
 
6084 serge 11021
	ret = intel_ring_begin(req, 4);
3031 serge 11022
	if (ret)
5060 serge 11023
		return ret;
2327 Serge 11024
 
3031 serge 11025
	/* i965+ uses the linear or tiled offsets from the
11026
	 * Display Registers (which do not change across a page-flip)
11027
	 * so we need only reprogram the base address.
11028
	 */
11029
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11030
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11031
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 11032
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
3031 serge 11033
			obj->tiling_mode);
2327 Serge 11034
 
3031 serge 11035
	/* XXX Enabling the panel-fitter across page-flip is so far
11036
	 * untested on non-native modes, so ignore it for now.
11037
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11038
	 */
11039
	pf = 0;
11040
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11041
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 11042
 
6084 serge 11043
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11044
	return 0;
11045
}
2327 Serge 11046
 
3031 serge 11047
static int intel_gen6_queue_flip(struct drm_device *dev,
11048
				 struct drm_crtc *crtc,
11049
				 struct drm_framebuffer *fb,
4104 Serge 11050
				 struct drm_i915_gem_object *obj,
6084 serge 11051
				 struct drm_i915_gem_request *req,
4104 Serge 11052
				 uint32_t flags)
3031 serge 11053
{
6084 serge 11054
	struct intel_engine_cs *ring = req->ring;
3031 serge 11055
	struct drm_i915_private *dev_priv = dev->dev_private;
11056
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11057
	uint32_t pf, pipesrc;
11058
	int ret;
2327 Serge 11059
 
6084 serge 11060
	ret = intel_ring_begin(req, 4);
3031 serge 11061
	if (ret)
5060 serge 11062
		return ret;
2327 Serge 11063
 
3031 serge 11064
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
11065
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11066
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5060 serge 11067
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
2327 Serge 11068
 
3031 serge 11069
	/* Contrary to the suggestions in the documentation,
11070
	 * "Enable Panel Fitter" does not seem to be required when page
11071
	 * flipping with a non-native mode, and worse causes a normal
11072
	 * modeset to fail.
11073
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11074
	 */
11075
	pf = 0;
11076
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11077
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 11078
 
6084 serge 11079
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11080
	return 0;
11081
}
2327 Serge 11082
 
3031 serge 11083
static int intel_gen7_queue_flip(struct drm_device *dev,
11084
				 struct drm_crtc *crtc,
11085
				 struct drm_framebuffer *fb,
4104 Serge 11086
				 struct drm_i915_gem_object *obj,
6084 serge 11087
				 struct drm_i915_gem_request *req,
4104 Serge 11088
				 uint32_t flags)
3031 serge 11089
{
6084 serge 11090
	struct intel_engine_cs *ring = req->ring;
3031 serge 11091
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11092
	uint32_t plane_bit = 0;
4104 Serge 11093
	int len, ret;
2327 Serge 11094
 
5060 serge 11095
	switch (intel_crtc->plane) {
3031 serge 11096
	case PLANE_A:
11097
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11098
		break;
11099
	case PLANE_B:
11100
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11101
		break;
11102
	case PLANE_C:
11103
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11104
		break;
11105
	default:
11106
		WARN_ONCE(1, "unknown plane in flip command\n");
5060 serge 11107
		return -ENODEV;
3031 serge 11108
	}
2327 Serge 11109
 
4104 Serge 11110
	len = 4;
5060 serge 11111
	if (ring->id == RCS) {
4104 Serge 11112
		len += 6;
5060 serge 11113
		/*
11114
		 * On Gen 8, SRM is now taking an extra dword to accommodate
11115
		 * 48bits addresses, and we need a NOOP for the batch size to
11116
		 * stay even.
11117
		 */
11118
		if (IS_GEN8(dev))
11119
			len += 2;
11120
	}
4104 Serge 11121
 
5060 serge 11122
	/*
11123
	 * BSpec MI_DISPLAY_FLIP for IVB:
11124
	 * "The full packet must be contained within the same cache line."
11125
	 *
11126
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11127
	 * cacheline, if we ever start emitting more commands before
11128
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11129
	 * then do the cacheline alignment, and finally emit the
11130
	 * MI_DISPLAY_FLIP.
11131
	 */
6084 serge 11132
	ret = intel_ring_cacheline_align(req);
5060 serge 11133
	if (ret)
11134
		return ret;
11135
 
6084 serge 11136
	ret = intel_ring_begin(req, len);
3031 serge 11137
	if (ret)
5060 serge 11138
		return ret;
2327 Serge 11139
 
4104 Serge 11140
	/* Unmask the flip-done completion message. Note that the bspec says that
11141
	 * we should do this for both the BCS and RCS, and that we must not unmask
11142
	 * more than one flip event at any time (or ensure that one flip message
11143
	 * can be sent by waiting for flip-done prior to queueing new flips).
11144
	 * Experimentation says that BCS works despite DERRMR masking all
11145
	 * flip-done completion events and that unmasking all planes at once
11146
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11147
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11148
	 */
11149
	if (ring->id == RCS) {
11150
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11151
		intel_ring_emit(ring, DERRMR);
11152
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11153
					DERRMR_PIPEB_PRI_FLIP_DONE |
11154
					DERRMR_PIPEC_PRI_FLIP_DONE));
5060 serge 11155
		if (IS_GEN8(dev))
6084 serge 11156
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
5060 serge 11157
					      MI_SRM_LRM_GLOBAL_GTT);
11158
		else
6084 serge 11159
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11160
					      MI_SRM_LRM_GLOBAL_GTT);
4104 Serge 11161
		intel_ring_emit(ring, DERRMR);
11162
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
5060 serge 11163
		if (IS_GEN8(dev)) {
11164
			intel_ring_emit(ring, 0);
11165
			intel_ring_emit(ring, MI_NOOP);
11166
		}
4104 Serge 11167
	}
11168
 
3031 serge 11169
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11170
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5060 serge 11171
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 11172
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 11173
 
6084 serge 11174
	intel_mark_page_flip_active(intel_crtc->unpin_work);
3031 serge 11175
	return 0;
11176
}
2327 Serge 11177
 
6084 serge 11178
static bool use_mmio_flip(struct intel_engine_cs *ring,
11179
			  struct drm_i915_gem_object *obj)
11180
{
11181
	/*
11182
	 * This is not being used for older platforms, because
11183
	 * non-availability of flip done interrupt forces us to use
11184
	 * CS flips. Older platforms derive flip done using some clever
11185
	 * tricks involving the flip_pending status bits and vblank irqs.
11186
	 * So using MMIO flips there would disrupt this mechanism.
11187
	 */
11188
 
11189
	if (ring == NULL)
11190
		return true;
11191
 
11192
	if (INTEL_INFO(ring->dev)->gen < 5)
11193
		return false;
11194
 
11195
	if (i915.use_mmio_flip < 0)
11196
		return false;
11197
	else if (i915.use_mmio_flip > 0)
11198
		return true;
11199
	else if (i915.enable_execlists)
11200
		return true;
11201
	else
11202
		return ring != i915_gem_request_get_ring(obj->last_write_req);
11203
}
11204
 
11205
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11206
			     struct intel_unpin_work *work)
11207
{
11208
	struct drm_device *dev = intel_crtc->base.dev;
11209
	struct drm_i915_private *dev_priv = dev->dev_private;
11210
	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11211
	const enum pipe pipe = intel_crtc->pipe;
11212
	u32 ctl, stride;
11213
 
11214
	ctl = I915_READ(PLANE_CTL(pipe, 0));
11215
	ctl &= ~PLANE_CTL_TILED_MASK;
11216
	switch (fb->modifier[0]) {
11217
	case DRM_FORMAT_MOD_NONE:
11218
		break;
11219
	case I915_FORMAT_MOD_X_TILED:
11220
		ctl |= PLANE_CTL_TILED_X;
11221
		break;
11222
	case I915_FORMAT_MOD_Y_TILED:
11223
		ctl |= PLANE_CTL_TILED_Y;
11224
		break;
11225
	case I915_FORMAT_MOD_Yf_TILED:
11226
		ctl |= PLANE_CTL_TILED_YF;
11227
		break;
11228
	default:
11229
		MISSING_CASE(fb->modifier[0]);
11230
	}
11231
 
11232
	/*
11233
	 * The stride is either expressed as a multiple of 64 bytes chunks for
11234
	 * linear buffers or in number of tiles for tiled buffers.
11235
	 */
11236
	stride = fb->pitches[0] /
11237
		 intel_fb_stride_alignment(dev, fb->modifier[0],
11238
					   fb->pixel_format);
11239
 
11240
	/*
11241
	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11242
	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11243
	 */
11244
	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11245
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11246
 
11247
	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11248
	POSTING_READ(PLANE_SURF(pipe, 0));
11249
}
11250
 
11251
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11252
			     struct intel_unpin_work *work)
11253
{
11254
	struct drm_device *dev = intel_crtc->base.dev;
11255
	struct drm_i915_private *dev_priv = dev->dev_private;
11256
	struct intel_framebuffer *intel_fb =
11257
		to_intel_framebuffer(intel_crtc->base.primary->fb);
11258
	struct drm_i915_gem_object *obj = intel_fb->obj;
11259
	u32 dspcntr;
11260
	u32 reg;
11261
 
11262
	reg = DSPCNTR(intel_crtc->plane);
11263
	dspcntr = I915_READ(reg);
11264
 
11265
	if (obj->tiling_mode != I915_TILING_NONE)
11266
		dspcntr |= DISPPLANE_TILED;
11267
	else
11268
		dspcntr &= ~DISPPLANE_TILED;
11269
 
11270
	I915_WRITE(reg, dspcntr);
11271
 
11272
	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11273
	POSTING_READ(DSPSURF(intel_crtc->plane));
11274
}
11275
 
11276
/*
11277
 * XXX: This is the temporary way to update the plane registers until we get
11278
 * around to using the usual plane update functions for MMIO flips
11279
 */
11280
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11281
{
11282
	struct intel_crtc *crtc = mmio_flip->crtc;
11283
	struct intel_unpin_work *work;
11284
 
11285
	spin_lock_irq(&crtc->base.dev->event_lock);
11286
	work = crtc->unpin_work;
11287
	spin_unlock_irq(&crtc->base.dev->event_lock);
11288
	if (work == NULL)
11289
		return;
11290
 
11291
	intel_mark_page_flip_active(work);
11292
 
11293
	intel_pipe_update_start(crtc);
11294
 
11295
	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11296
		skl_do_mmio_flip(crtc, work);
11297
	else
11298
		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11299
		ilk_do_mmio_flip(crtc, work);
11300
 
11301
	intel_pipe_update_end(crtc);
11302
}
11303
 
11304
static void intel_mmio_flip_work_func(struct work_struct *work)
11305
{
11306
	struct intel_mmio_flip *mmio_flip =
11307
		container_of(work, struct intel_mmio_flip, work);
11308
 
11309
	if (mmio_flip->req) {
11310
		WARN_ON(__i915_wait_request(mmio_flip->req,
11311
					    mmio_flip->crtc->reset_counter,
11312
					    false, NULL,
11313
					    &mmio_flip->i915->rps.mmioflips));
11314
		i915_gem_request_unreference__unlocked(mmio_flip->req);
11315
	}
11316
 
11317
	intel_do_mmio_flip(mmio_flip);
11318
	kfree(mmio_flip);
11319
}
11320
 
11321
static int intel_queue_mmio_flip(struct drm_device *dev,
11322
				 struct drm_crtc *crtc,
11323
				 struct drm_framebuffer *fb,
11324
				 struct drm_i915_gem_object *obj,
11325
				 struct intel_engine_cs *ring,
11326
				 uint32_t flags)
11327
{
11328
	struct intel_mmio_flip *mmio_flip;
11329
 
11330
	mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11331
	if (mmio_flip == NULL)
11332
		return -ENOMEM;
11333
 
11334
	mmio_flip->i915 = to_i915(dev);
11335
	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11336
	mmio_flip->crtc = to_intel_crtc(crtc);
11337
 
11338
	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11339
	schedule_work(&mmio_flip->work);
11340
 
11341
	return 0;
11342
}
11343
 
3031 serge 11344
static int intel_default_queue_flip(struct drm_device *dev,
11345
				    struct drm_crtc *crtc,
11346
				    struct drm_framebuffer *fb,
4104 Serge 11347
				    struct drm_i915_gem_object *obj,
6084 serge 11348
				    struct drm_i915_gem_request *req,
4104 Serge 11349
				    uint32_t flags)
3031 serge 11350
{
11351
	return -ENODEV;
11352
}
2327 Serge 11353
 
6084 serge 11354
static bool __intel_pageflip_stall_check(struct drm_device *dev,
11355
					 struct drm_crtc *crtc)
11356
{
11357
	struct drm_i915_private *dev_priv = dev->dev_private;
11358
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11359
	struct intel_unpin_work *work = intel_crtc->unpin_work;
11360
	u32 addr;
11361
 
11362
	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11363
		return true;
11364
 
11365
	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11366
		return false;
11367
 
11368
	if (!work->enable_stall_check)
11369
		return false;
11370
 
11371
	if (work->flip_ready_vblank == 0) {
11372
		if (work->flip_queued_req &&
11373
		    !i915_gem_request_completed(work->flip_queued_req, true))
11374
			return false;
11375
 
11376
		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11377
	}
11378
 
11379
	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11380
		return false;
11381
 
11382
	/* Potential stall - if we see that the flip has happened,
11383
	 * assume a missed interrupt. */
11384
	if (INTEL_INFO(dev)->gen >= 4)
11385
		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11386
	else
11387
		addr = I915_READ(DSPADDR(intel_crtc->plane));
11388
 
11389
	/* There is a potential issue here with a false positive after a flip
11390
	 * to the same address. We could address this by checking for a
11391
	 * non-incrementing frame counter.
11392
	 */
11393
	return addr == work->gtt_offset;
11394
}
11395
 
11396
void intel_check_page_flip(struct drm_device *dev, int pipe)
11397
{
11398
	struct drm_i915_private *dev_priv = dev->dev_private;
11399
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11400
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11401
	struct intel_unpin_work *work;
11402
 
11403
	if (crtc == NULL)
11404
		return;
11405
 
11406
	spin_lock(&dev->event_lock);
11407
	work = intel_crtc->unpin_work;
11408
	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11409
		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11410
			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11411
		page_flip_completed(intel_crtc);
11412
		work = NULL;
11413
	}
11414
	if (work != NULL &&
11415
	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11416
		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11417
	spin_unlock(&dev->event_lock);
11418
}
6320 serge 11419
 
3031 serge 11420
static int intel_crtc_page_flip(struct drm_crtc *crtc,
11421
				struct drm_framebuffer *fb,
4104 Serge 11422
				struct drm_pending_vblank_event *event,
11423
				uint32_t page_flip_flags)
3031 serge 11424
{
11425
	struct drm_device *dev = crtc->dev;
11426
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 11427
	struct drm_framebuffer *old_fb = crtc->primary->fb;
11428
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3031 serge 11429
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6084 serge 11430
	struct drm_plane *primary = crtc->primary;
5060 serge 11431
	enum pipe pipe = intel_crtc->pipe;
3031 serge 11432
	struct intel_unpin_work *work;
5060 serge 11433
	struct intel_engine_cs *ring;
6084 serge 11434
	bool mmio_flip;
11435
	struct drm_i915_gem_request *request = NULL;
3031 serge 11436
	int ret;
2327 Serge 11437
 
5060 serge 11438
	/*
11439
	 * drm_mode_page_flip_ioctl() should already catch this, but double
11440
	 * check to be safe.  In the future we may enable pageflipping from
11441
	 * a disabled primary plane.
11442
	 */
11443
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11444
		return -EBUSY;
11445
 
3031 serge 11446
	/* Can't change pixel format via MI display flips. */
5060 serge 11447
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
3031 serge 11448
		return -EINVAL;
2327 Serge 11449
 
3031 serge 11450
	/*
11451
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11452
	 * Note that pitch changes could also affect these register.
11453
	 */
11454
	if (INTEL_INFO(dev)->gen > 3 &&
5060 serge 11455
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11456
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
3031 serge 11457
		return -EINVAL;
2327 Serge 11458
 
5354 serge 11459
	if (i915_terminally_wedged(&dev_priv->gpu_error))
11460
		goto out_hang;
11461
 
4560 Serge 11462
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 11463
	if (work == NULL)
11464
		return -ENOMEM;
2327 Serge 11465
 
3031 serge 11466
	work->event = event;
3243 Serge 11467
	work->crtc = crtc;
6084 serge 11468
	work->old_fb = old_fb;
6320 serge 11469
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 11470
 
5060 serge 11471
	ret = drm_crtc_vblank_get(crtc);
3031 serge 11472
	if (ret)
11473
		goto free_work;
2327 Serge 11474
 
3031 serge 11475
	/* We borrow the event spin lock for protecting unpin_work */
5354 serge 11476
	spin_lock_irq(&dev->event_lock);
3031 serge 11477
	if (intel_crtc->unpin_work) {
5354 serge 11478
		/* Before declaring the flip queue wedged, check if
11479
		 * the hardware completed the operation behind our backs.
11480
		 */
11481
		if (__intel_pageflip_stall_check(dev, crtc)) {
11482
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11483
			page_flip_completed(intel_crtc);
11484
		} else {
11485
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11486
			spin_unlock_irq(&dev->event_lock);
11487
 
11488
			drm_crtc_vblank_put(crtc);
6084 serge 11489
			kfree(work);
11490
			return -EBUSY;
11491
		}
3031 serge 11492
	}
11493
	intel_crtc->unpin_work = work;
5354 serge 11494
	spin_unlock_irq(&dev->event_lock);
2327 Serge 11495
 
6320 serge 11496
//   if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11497
//       flush_workqueue(dev_priv->wq);
3243 Serge 11498
 
3031 serge 11499
	/* Reference the objects for the scheduled work. */
6084 serge 11500
	drm_framebuffer_reference(work->old_fb);
3031 serge 11501
	drm_gem_object_reference(&obj->base);
2327 Serge 11502
 
5060 serge 11503
	crtc->primary->fb = fb;
6084 serge 11504
	update_state_fb(crtc->primary);
2327 Serge 11505
 
3031 serge 11506
	work->pending_flip_obj = obj;
2327 Serge 11507
 
6084 serge 11508
	ret = i915_mutex_lock_interruptible(dev);
11509
	if (ret)
11510
		goto cleanup;
11511
 
3243 Serge 11512
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 11513
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 11514
 
5060 serge 11515
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
6084 serge 11516
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
5060 serge 11517
 
11518
	if (IS_VALLEYVIEW(dev)) {
11519
		ring = &dev_priv->ring[BCS];
6084 serge 11520
		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
5060 serge 11521
			/* vlv: DISPLAY_FLIP fails to change tiling */
11522
			ring = NULL;
6084 serge 11523
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5060 serge 11524
		ring = &dev_priv->ring[BCS];
11525
	} else if (INTEL_INFO(dev)->gen >= 7) {
6084 serge 11526
		ring = i915_gem_request_get_ring(obj->last_write_req);
5060 serge 11527
		if (ring == NULL || ring->id != RCS)
11528
			ring = &dev_priv->ring[BCS];
11529
	} else {
11530
		ring = &dev_priv->ring[RCS];
11531
	}
11532
 
6084 serge 11533
	mmio_flip = use_mmio_flip(ring, obj);
11534
 
11535
	/* When using CS flips, we want to emit semaphores between rings.
11536
	 * However, when using mmio flips we will create a task to do the
11537
	 * synchronisation, so all we want here is to pin the framebuffer
11538
	 * into the display plane and skip any waits.
11539
	 */
11540
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11541
					 crtc->primary->state,
11542
					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
3031 serge 11543
	if (ret)
11544
		goto cleanup_pending;
11545
 
6084 serge 11546
	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11547
						  obj, 0);
11548
	work->gtt_offset += intel_crtc->dspaddr_offset;
5060 serge 11549
 
6084 serge 11550
	if (mmio_flip) {
5060 serge 11551
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
11552
					    page_flip_flags);
5354 serge 11553
		if (ret)
11554
			goto cleanup_unpin;
11555
 
6084 serge 11556
		i915_gem_request_assign(&work->flip_queued_req,
11557
					obj->last_write_req);
5354 serge 11558
	} else {
6084 serge 11559
		if (!request) {
11560
			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11561
			if (ret)
11562
				goto cleanup_unpin;
11563
		}
5060 serge 11564
 
6084 serge 11565
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11566
						   page_flip_flags);
11567
		if (ret)
11568
			goto cleanup_unpin;
11569
 
11570
		i915_gem_request_assign(&work->flip_queued_req, request);
5354 serge 11571
	}
11572
 
6084 serge 11573
	if (request)
11574
		i915_add_request_no_flush(request);
11575
 
11576
	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
5354 serge 11577
	work->enable_stall_check = true;
11578
 
6084 serge 11579
	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11580
			  to_intel_plane(primary)->frontbuffer_bit);
3031 serge 11581
	mutex_unlock(&dev->struct_mutex);
11582
 
6084 serge 11583
	intel_fbc_disable_crtc(intel_crtc);
11584
	intel_frontbuffer_flip_prepare(dev,
11585
				       to_intel_plane(primary)->frontbuffer_bit);
11586
 
3031 serge 11587
	trace_i915_flip_request(intel_crtc->plane, obj);
11588
 
11589
	return 0;
11590
 
5060 serge 11591
cleanup_unpin:
6084 serge 11592
	intel_unpin_fb_obj(fb, crtc->primary->state);
3031 serge 11593
cleanup_pending:
6084 serge 11594
	if (request)
11595
		i915_gem_request_cancel(request);
3243 Serge 11596
	atomic_dec(&intel_crtc->unpin_work_count);
6084 serge 11597
	mutex_unlock(&dev->struct_mutex);
11598
cleanup:
5060 serge 11599
	crtc->primary->fb = old_fb;
6084 serge 11600
	update_state_fb(crtc->primary);
3031 serge 11601
 
6084 serge 11602
	drm_gem_object_unreference_unlocked(&obj->base);
11603
	drm_framebuffer_unreference(work->old_fb);
11604
 
5354 serge 11605
	spin_lock_irq(&dev->event_lock);
3031 serge 11606
	intel_crtc->unpin_work = NULL;
5354 serge 11607
	spin_unlock_irq(&dev->event_lock);
3031 serge 11608
 
5060 serge 11609
	drm_crtc_vblank_put(crtc);
3031 serge 11610
free_work:
11611
	kfree(work);
11612
 
5060 serge 11613
	if (ret == -EIO) {
6084 serge 11614
		struct drm_atomic_state *state;
11615
		struct drm_plane_state *plane_state;
11616
 
5060 serge 11617
out_hang:
6084 serge 11618
		state = drm_atomic_state_alloc(dev);
11619
		if (!state)
11620
			return -ENOMEM;
11621
		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11622
 
11623
retry:
11624
		plane_state = drm_atomic_get_plane_state(state, primary);
11625
		ret = PTR_ERR_OR_ZERO(plane_state);
11626
		if (!ret) {
11627
			drm_atomic_set_fb_for_plane(plane_state, fb);
11628
 
11629
			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11630
			if (!ret)
11631
				ret = drm_atomic_commit(state);
11632
		}
11633
 
11634
		if (ret == -EDEADLK) {
11635
			drm_modeset_backoff(state->acquire_ctx);
11636
			drm_atomic_state_clear(state);
11637
			goto retry;
11638
		}
11639
 
11640
		if (ret)
11641
			drm_atomic_state_free(state);
11642
 
5354 serge 11643
		if (ret == 0 && event) {
11644
			spin_lock_irq(&dev->event_lock);
5060 serge 11645
			drm_send_vblank_event(dev, pipe, event);
5354 serge 11646
			spin_unlock_irq(&dev->event_lock);
11647
		}
5060 serge 11648
	}
3031 serge 11649
	return ret;
11650
}
11651
 
11652
 
11653
/**
6084 serge 11654
 * intel_wm_need_update - Check whether watermarks need updating
11655
 * @plane: drm plane
11656
 * @state: new plane state
3031 serge 11657
 *
6084 serge 11658
 * Check current plane state versus the new one to determine whether
11659
 * watermarks need to be recalculated.
11660
 *
11661
 * Returns true or false.
3031 serge 11662
 */
6084 serge 11663
static bool intel_wm_need_update(struct drm_plane *plane,
11664
				 struct drm_plane_state *state)
3031 serge 11665
{
6084 serge 11666
	/* Update watermarks on tiling changes. */
11667
	if (!plane->state->fb || !state->fb ||
11668
	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
11669
	    plane->state->rotation != state->rotation)
11670
		return true;
3031 serge 11671
 
6084 serge 11672
	if (plane->state->crtc_w != state->crtc_w)
11673
		return true;
11674
 
11675
	return false;
11676
}
11677
 
11678
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11679
				    struct drm_plane_state *plane_state)
11680
{
11681
	struct drm_crtc *crtc = crtc_state->crtc;
11682
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11683
	struct drm_plane *plane = plane_state->plane;
11684
	struct drm_device *dev = crtc->dev;
11685
	struct drm_i915_private *dev_priv = dev->dev_private;
11686
	struct intel_plane_state *old_plane_state =
11687
		to_intel_plane_state(plane->state);
11688
	int idx = intel_crtc->base.base.id, ret;
11689
	int i = drm_plane_index(plane);
11690
	bool mode_changed = needs_modeset(crtc_state);
11691
	bool was_crtc_enabled = crtc->state->active;
11692
	bool is_crtc_enabled = crtc_state->active;
11693
 
11694
	bool turn_off, turn_on, visible, was_visible;
11695
	struct drm_framebuffer *fb = plane_state->fb;
11696
 
11697
	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11698
	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11699
		ret = skl_update_scaler_plane(
11700
			to_intel_crtc_state(crtc_state),
11701
			to_intel_plane_state(plane_state));
11702
		if (ret)
11703
			return ret;
3031 serge 11704
	}
11705
 
6084 serge 11706
	/*
11707
	 * Disabling a plane is always okay; we just need to update
11708
	 * fb tracking in a special way since cleanup_fb() won't
11709
	 * get called by the plane helpers.
11710
	 */
11711
	if (old_plane_state->base.fb && !fb)
11712
		intel_crtc->atomic.disabled_planes |= 1 << i;
11713
 
11714
	was_visible = old_plane_state->visible;
11715
	visible = to_intel_plane_state(plane_state)->visible;
11716
 
11717
	if (!was_crtc_enabled && WARN_ON(was_visible))
11718
		was_visible = false;
11719
 
11720
	if (!is_crtc_enabled && WARN_ON(visible))
11721
		visible = false;
11722
 
11723
	if (!was_visible && !visible)
11724
		return 0;
11725
 
11726
	turn_off = was_visible && (!visible || mode_changed);
11727
	turn_on = visible && (!was_visible || mode_changed);
11728
 
11729
	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11730
			 plane->base.id, fb ? fb->base.id : -1);
11731
 
11732
	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11733
			 plane->base.id, was_visible, visible,
11734
			 turn_off, turn_on, mode_changed);
11735
 
11736
	if (turn_on) {
11737
		intel_crtc->atomic.update_wm_pre = true;
11738
		/* must disable cxsr around plane enable/disable */
11739
		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11740
			intel_crtc->atomic.disable_cxsr = true;
11741
			/* to potentially re-enable cxsr */
11742
			intel_crtc->atomic.wait_vblank = true;
11743
			intel_crtc->atomic.update_wm_post = true;
11744
		}
11745
	} else if (turn_off) {
11746
		intel_crtc->atomic.update_wm_post = true;
11747
		/* must disable cxsr around plane enable/disable */
11748
		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11749
			if (is_crtc_enabled)
11750
				intel_crtc->atomic.wait_vblank = true;
11751
			intel_crtc->atomic.disable_cxsr = true;
11752
		}
11753
	} else if (intel_wm_need_update(plane, plane_state)) {
11754
		intel_crtc->atomic.update_wm_pre = true;
3031 serge 11755
	}
5060 serge 11756
 
6084 serge 11757
	if (visible || was_visible)
11758
		intel_crtc->atomic.fb_bits |=
11759
			to_intel_plane(plane)->frontbuffer_bit;
5060 serge 11760
 
6084 serge 11761
	switch (plane->type) {
11762
	case DRM_PLANE_TYPE_PRIMARY:
11763
		intel_crtc->atomic.wait_for_flips = true;
11764
		intel_crtc->atomic.pre_disable_primary = turn_off;
11765
		intel_crtc->atomic.post_enable_primary = turn_on;
11766
 
11767
		if (turn_off) {
11768
			/*
11769
			 * FIXME: Actually if we will still have any other
11770
			 * plane enabled on the pipe we could let IPS enabled
11771
			 * still, but for now lets consider that when we make
11772
			 * primary invisible by setting DSPCNTR to 0 on
11773
			 * update_primary_plane function IPS needs to be
11774
			 * disable.
11775
			 */
11776
			intel_crtc->atomic.disable_ips = true;
11777
 
11778
			intel_crtc->atomic.disable_fbc = true;
11779
		}
11780
 
11781
		/*
11782
		 * FBC does not work on some platforms for rotated
11783
		 * planes, so disable it when rotation is not 0 and
11784
		 * update it when rotation is set back to 0.
11785
		 *
11786
		 * FIXME: This is redundant with the fbc update done in
11787
		 * the primary plane enable function except that that
11788
		 * one is done too late. We eventually need to unify
11789
		 * this.
11790
		 */
11791
 
11792
		if (visible &&
11793
		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11794
		    dev_priv->fbc.crtc == intel_crtc &&
11795
		    plane_state->rotation != BIT(DRM_ROTATE_0))
11796
			intel_crtc->atomic.disable_fbc = true;
11797
 
11798
		/*
11799
		 * BDW signals flip done immediately if the plane
11800
		 * is disabled, even if the plane enable is already
11801
		 * armed to occur at the next vblank :(
11802
		 */
11803
		if (turn_on && IS_BROADWELL(dev))
11804
			intel_crtc->atomic.wait_vblank = true;
11805
 
11806
		intel_crtc->atomic.update_fbc |= visible || mode_changed;
11807
		break;
11808
	case DRM_PLANE_TYPE_CURSOR:
11809
		break;
11810
	case DRM_PLANE_TYPE_OVERLAY:
11811
		if (turn_off && !mode_changed) {
11812
			intel_crtc->atomic.wait_vblank = true;
11813
			intel_crtc->atomic.update_sprite_watermarks |=
11814
				1 << i;
11815
		}
5060 serge 11816
	}
6084 serge 11817
	return 0;
3031 serge 11818
}
11819
 
6084 serge 11820
static bool encoders_cloneable(const struct intel_encoder *a,
11821
			       const struct intel_encoder *b)
3031 serge 11822
{
6084 serge 11823
	/* masks could be asymmetric, so check both ways */
11824
	return a == b || (a->cloneable & (1 << b->type) &&
11825
			  b->cloneable & (1 << a->type));
11826
}
11827
 
11828
static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11829
					 struct intel_crtc *crtc,
11830
					 struct intel_encoder *encoder)
11831
{
11832
	struct intel_encoder *source_encoder;
11833
	struct drm_connector *connector;
11834
	struct drm_connector_state *connector_state;
11835
	int i;
11836
 
11837
	for_each_connector_in_state(state, connector, connector_state, i) {
11838
		if (connector_state->crtc != &crtc->base)
11839
			continue;
11840
 
11841
		source_encoder =
11842
			to_intel_encoder(connector_state->best_encoder);
11843
		if (!encoders_cloneable(encoder, source_encoder))
11844
			return false;
11845
	}
11846
 
11847
	return true;
11848
}
11849
 
11850
static bool check_encoder_cloning(struct drm_atomic_state *state,
11851
				  struct intel_crtc *crtc)
11852
{
3031 serge 11853
	struct intel_encoder *encoder;
6084 serge 11854
	struct drm_connector *connector;
11855
	struct drm_connector_state *connector_state;
11856
	int i;
3031 serge 11857
 
6084 serge 11858
	for_each_connector_in_state(state, connector, connector_state, i) {
11859
		if (connector_state->crtc != &crtc->base)
11860
			continue;
11861
 
11862
		encoder = to_intel_encoder(connector_state->best_encoder);
11863
		if (!check_single_encoder_cloning(state, crtc, encoder))
11864
			return false;
3031 serge 11865
	}
11866
 
6084 serge 11867
	return true;
11868
}
11869
 
11870
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11871
				   struct drm_crtc_state *crtc_state)
11872
{
11873
	struct drm_device *dev = crtc->dev;
11874
	struct drm_i915_private *dev_priv = dev->dev_private;
11875
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11876
	struct intel_crtc_state *pipe_config =
11877
		to_intel_crtc_state(crtc_state);
11878
	struct drm_atomic_state *state = crtc_state->state;
11879
	int ret;
11880
	bool mode_changed = needs_modeset(crtc_state);
11881
 
11882
	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11883
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11884
		return -EINVAL;
3031 serge 11885
	}
5060 serge 11886
 
6084 serge 11887
	if (mode_changed && !crtc_state->active)
11888
		intel_crtc->atomic.update_wm_post = true;
11889
 
11890
	if (mode_changed && crtc_state->enable &&
11891
	    dev_priv->display.crtc_compute_clock &&
11892
	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11893
		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11894
							   pipe_config);
11895
		if (ret)
11896
			return ret;
5060 serge 11897
	}
6084 serge 11898
 
11899
	ret = 0;
11900
	if (INTEL_INFO(dev)->gen >= 9) {
11901
		if (mode_changed)
11902
			ret = skl_update_scaler_crtc(pipe_config);
11903
 
11904
		if (!ret)
11905
			ret = intel_atomic_setup_scalers(dev, intel_crtc,
11906
							 pipe_config);
11907
	}
11908
 
11909
	return ret;
3031 serge 11910
}
11911
 
6084 serge 11912
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11913
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
11914
	.load_lut = intel_crtc_load_lut,
11915
	.atomic_begin = intel_begin_crtc_commit,
11916
	.atomic_flush = intel_finish_crtc_commit,
11917
	.atomic_check = intel_crtc_atomic_check,
11918
};
11919
 
11920
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11921
{
11922
	struct intel_connector *connector;
11923
 
11924
	for_each_intel_connector(dev, connector) {
11925
		if (connector->base.encoder) {
11926
			connector->base.state->best_encoder =
11927
				connector->base.encoder;
11928
			connector->base.state->crtc =
11929
				connector->base.encoder->crtc;
11930
		} else {
11931
			connector->base.state->best_encoder = NULL;
11932
			connector->base.state->crtc = NULL;
11933
		}
11934
	}
11935
}
11936
 
4104 Serge 11937
static void
5060 serge 11938
connected_sink_compute_bpp(struct intel_connector *connector,
6084 serge 11939
			   struct intel_crtc_state *pipe_config)
4104 Serge 11940
{
11941
	int bpp = pipe_config->pipe_bpp;
11942
 
11943
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
11944
		connector->base.base.id,
5060 serge 11945
		connector->base.name);
4104 Serge 11946
 
11947
	/* Don't use an invalid EDID bpc value */
11948
	if (connector->base.display_info.bpc &&
11949
	    connector->base.display_info.bpc * 3 < bpp) {
11950
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
11951
			      bpp, connector->base.display_info.bpc*3);
11952
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
11953
	}
11954
 
6660 serge 11955
	/* Clamp bpp to 8 on screens without EDID 1.4 */
11956
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
11957
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
11958
			      bpp);
11959
		pipe_config->pipe_bpp = 24;
4104 Serge 11960
	}
11961
}
11962
 
3746 Serge 11963
static int
4104 Serge 11964
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
6084 serge 11965
			  struct intel_crtc_state *pipe_config)
3746 Serge 11966
{
4104 Serge 11967
	struct drm_device *dev = crtc->base.dev;
6084 serge 11968
	struct drm_atomic_state *state;
11969
	struct drm_connector *connector;
11970
	struct drm_connector_state *connector_state;
11971
	int bpp, i;
3746 Serge 11972
 
6084 serge 11973
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
11974
		bpp = 10*3;
11975
	else if (INTEL_INFO(dev)->gen >= 5)
11976
		bpp = 12*3;
11977
	else
3746 Serge 11978
		bpp = 8*3;
11979
 
6084 serge 11980
 
3746 Serge 11981
	pipe_config->pipe_bpp = bpp;
11982
 
6084 serge 11983
	state = pipe_config->base.state;
11984
 
3746 Serge 11985
	/* Clamp display bpp to EDID value */
6084 serge 11986
	for_each_connector_in_state(state, connector, connector_state, i) {
11987
		if (connector_state->crtc != &crtc->base)
3746 Serge 11988
			continue;
11989
 
6084 serge 11990
		connected_sink_compute_bpp(to_intel_connector(connector),
11991
					   pipe_config);
3746 Serge 11992
	}
11993
 
11994
	return bpp;
11995
}
11996
 
4560 Serge 11997
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11998
{
11999
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12000
			"type: 0x%x flags: 0x%x\n",
12001
		mode->crtc_clock,
12002
		mode->crtc_hdisplay, mode->crtc_hsync_start,
12003
		mode->crtc_hsync_end, mode->crtc_htotal,
12004
		mode->crtc_vdisplay, mode->crtc_vsync_start,
12005
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12006
}
12007
 
4104 Serge 12008
static void intel_dump_pipe_config(struct intel_crtc *crtc,
6084 serge 12009
				   struct intel_crtc_state *pipe_config,
4104 Serge 12010
				   const char *context)
12011
{
6084 serge 12012
	struct drm_device *dev = crtc->base.dev;
12013
	struct drm_plane *plane;
12014
	struct intel_plane *intel_plane;
12015
	struct intel_plane_state *state;
12016
	struct drm_framebuffer *fb;
4104 Serge 12017
 
6084 serge 12018
	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12019
		      context, pipe_config, pipe_name(crtc->pipe));
12020
 
4104 Serge 12021
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12022
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12023
		      pipe_config->pipe_bpp, pipe_config->dither);
12024
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12025
		      pipe_config->has_pch_encoder,
12026
		      pipe_config->fdi_lanes,
12027
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12028
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12029
		      pipe_config->fdi_m_n.tu);
6084 serge 12030
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
4560 Serge 12031
		      pipe_config->has_dp_encoder,
6084 serge 12032
		      pipe_config->lane_count,
4560 Serge 12033
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12034
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12035
		      pipe_config->dp_m_n.tu);
5354 serge 12036
 
6084 serge 12037
	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
5354 serge 12038
		      pipe_config->has_dp_encoder,
6084 serge 12039
		      pipe_config->lane_count,
5354 serge 12040
		      pipe_config->dp_m2_n2.gmch_m,
12041
		      pipe_config->dp_m2_n2.gmch_n,
12042
		      pipe_config->dp_m2_n2.link_m,
12043
		      pipe_config->dp_m2_n2.link_n,
12044
		      pipe_config->dp_m2_n2.tu);
12045
 
12046
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12047
		      pipe_config->has_audio,
12048
		      pipe_config->has_infoframe);
12049
 
4104 Serge 12050
	DRM_DEBUG_KMS("requested mode:\n");
6084 serge 12051
	drm_mode_debug_printmodeline(&pipe_config->base.mode);
4104 Serge 12052
	DRM_DEBUG_KMS("adjusted mode:\n");
6084 serge 12053
	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12054
	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
4560 Serge 12055
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12056
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12057
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
6084 serge 12058
	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12059
		      crtc->num_scalers,
12060
		      pipe_config->scaler_state.scaler_users,
12061
		      pipe_config->scaler_state.scaler_id);
4104 Serge 12062
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12063
		      pipe_config->gmch_pfit.control,
12064
		      pipe_config->gmch_pfit.pgm_ratios,
12065
		      pipe_config->gmch_pfit.lvds_border_bits);
12066
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12067
		      pipe_config->pch_pfit.pos,
12068
		      pipe_config->pch_pfit.size,
12069
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12070
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 12071
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 12072
 
6084 serge 12073
	if (IS_BROXTON(dev)) {
12074
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12075
			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12076
			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12077
			      pipe_config->ddi_pll_sel,
12078
			      pipe_config->dpll_hw_state.ebb0,
12079
			      pipe_config->dpll_hw_state.ebb4,
12080
			      pipe_config->dpll_hw_state.pll0,
12081
			      pipe_config->dpll_hw_state.pll1,
12082
			      pipe_config->dpll_hw_state.pll2,
12083
			      pipe_config->dpll_hw_state.pll3,
12084
			      pipe_config->dpll_hw_state.pll6,
12085
			      pipe_config->dpll_hw_state.pll8,
12086
			      pipe_config->dpll_hw_state.pll9,
12087
			      pipe_config->dpll_hw_state.pll10,
12088
			      pipe_config->dpll_hw_state.pcsdw12);
12089
	} else if (IS_SKYLAKE(dev)) {
12090
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12091
			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12092
			      pipe_config->ddi_pll_sel,
12093
			      pipe_config->dpll_hw_state.ctrl1,
12094
			      pipe_config->dpll_hw_state.cfgcr1,
12095
			      pipe_config->dpll_hw_state.cfgcr2);
12096
	} else if (HAS_DDI(dev)) {
12097
		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12098
			      pipe_config->ddi_pll_sel,
12099
			      pipe_config->dpll_hw_state.wrpll,
12100
			      pipe_config->dpll_hw_state.spll);
12101
	} else {
12102
		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12103
			      "fp0: 0x%x, fp1: 0x%x\n",
12104
			      pipe_config->dpll_hw_state.dpll,
12105
			      pipe_config->dpll_hw_state.dpll_md,
12106
			      pipe_config->dpll_hw_state.fp0,
12107
			      pipe_config->dpll_hw_state.fp1);
12108
	}
5060 serge 12109
 
6084 serge 12110
	DRM_DEBUG_KMS("planes on this crtc\n");
12111
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12112
		intel_plane = to_intel_plane(plane);
12113
		if (intel_plane->pipe != crtc->pipe)
5060 serge 12114
			continue;
12115
 
6084 serge 12116
		state = to_intel_plane_state(plane->state);
12117
		fb = state->base.fb;
12118
		if (!fb) {
12119
			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12120
				"disabled, scaler_id = %d\n",
12121
				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12122
				plane->base.id, intel_plane->pipe,
12123
				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12124
				drm_plane_index(plane), state->scaler_id);
4104 Serge 12125
			continue;
6084 serge 12126
		}
4104 Serge 12127
 
6084 serge 12128
		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12129
			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12130
			plane->base.id, intel_plane->pipe,
12131
			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12132
			drm_plane_index(plane));
12133
		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12134
			fb->base.id, fb->width, fb->height, fb->pixel_format);
12135
		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12136
			state->scaler_id,
12137
			state->src.x1 >> 16, state->src.y1 >> 16,
12138
			drm_rect_width(&state->src) >> 16,
12139
			drm_rect_height(&state->src) >> 16,
12140
			state->dst.x1, state->dst.y1,
12141
			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
4104 Serge 12142
	}
12143
}
12144
 
6084 serge 12145
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
5354 serge 12146
{
6084 serge 12147
	struct drm_device *dev = state->dev;
12148
	struct drm_connector *connector;
5354 serge 12149
	unsigned int used_ports = 0;
12150
 
12151
	/*
12152
	 * Walk the connector list instead of the encoder
12153
	 * list to detect the problem on ddi platforms
12154
	 * where there's just one encoder per digital port.
12155
	 */
6084 serge 12156
	drm_for_each_connector(connector, dev) {
12157
		struct drm_connector_state *connector_state;
12158
		struct intel_encoder *encoder;
5354 serge 12159
 
6084 serge 12160
		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12161
		if (!connector_state)
12162
			connector_state = connector->state;
12163
 
12164
		if (!connector_state->best_encoder)
5354 serge 12165
			continue;
12166
 
6084 serge 12167
		encoder = to_intel_encoder(connector_state->best_encoder);
5354 serge 12168
 
6084 serge 12169
		WARN_ON(!connector_state->crtc);
12170
 
5354 serge 12171
		switch (encoder->type) {
12172
			unsigned int port_mask;
12173
		case INTEL_OUTPUT_UNKNOWN:
12174
			if (WARN_ON(!HAS_DDI(dev)))
12175
				break;
12176
		case INTEL_OUTPUT_DISPLAYPORT:
12177
		case INTEL_OUTPUT_HDMI:
12178
		case INTEL_OUTPUT_EDP:
12179
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12180
 
12181
			/* the same port mustn't appear more than once */
12182
			if (used_ports & port_mask)
12183
				return false;
12184
 
12185
			used_ports |= port_mask;
12186
		default:
12187
			break;
12188
		}
12189
	}
12190
 
12191
	return true;
12192
}
12193
 
6084 serge 12194
static void
12195
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12196
{
12197
	struct drm_crtc_state tmp_state;
12198
	struct intel_crtc_scaler_state scaler_state;
12199
	struct intel_dpll_hw_state dpll_hw_state;
12200
	enum intel_dpll_id shared_dpll;
12201
	uint32_t ddi_pll_sel;
12202
	bool force_thru;
12203
 
12204
	/* FIXME: before the switch to atomic started, a new pipe_config was
12205
	 * kzalloc'd. Code that depends on any field being zero should be
12206
	 * fixed, so that the crtc_state can be safely duplicated. For now,
12207
	 * only fields that are know to not cause problems are preserved. */
12208
 
12209
	tmp_state = crtc_state->base;
12210
	scaler_state = crtc_state->scaler_state;
12211
	shared_dpll = crtc_state->shared_dpll;
12212
	dpll_hw_state = crtc_state->dpll_hw_state;
12213
	ddi_pll_sel = crtc_state->ddi_pll_sel;
12214
	force_thru = crtc_state->pch_pfit.force_thru;
12215
 
12216
	memset(crtc_state, 0, sizeof *crtc_state);
12217
 
12218
	crtc_state->base = tmp_state;
12219
	crtc_state->scaler_state = scaler_state;
12220
	crtc_state->shared_dpll = shared_dpll;
12221
	crtc_state->dpll_hw_state = dpll_hw_state;
12222
	crtc_state->ddi_pll_sel = ddi_pll_sel;
12223
	crtc_state->pch_pfit.force_thru = force_thru;
12224
}
12225
 
12226
static int
3746 Serge 12227
intel_modeset_pipe_config(struct drm_crtc *crtc,
6084 serge 12228
			  struct intel_crtc_state *pipe_config)
3031 serge 12229
{
6084 serge 12230
	struct drm_atomic_state *state = pipe_config->base.state;
3031 serge 12231
	struct intel_encoder *encoder;
6084 serge 12232
	struct drm_connector *connector;
12233
	struct drm_connector_state *connector_state;
12234
	int base_bpp, ret = -EINVAL;
12235
	int i;
4104 Serge 12236
	bool retry = true;
3031 serge 12237
 
6084 serge 12238
	clear_intel_crtc_state(pipe_config);
4104 Serge 12239
 
12240
	pipe_config->cpu_transcoder =
12241
		(enum transcoder) to_intel_crtc(crtc)->pipe;
3746 Serge 12242
 
4104 Serge 12243
	/*
12244
	 * Sanitize sync polarity flags based on requested ones. If neither
12245
	 * positive or negative polarity is requested, treat this as meaning
12246
	 * negative polarity.
12247
	 */
6084 serge 12248
	if (!(pipe_config->base.adjusted_mode.flags &
4104 Serge 12249
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
6084 serge 12250
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4104 Serge 12251
 
6084 serge 12252
	if (!(pipe_config->base.adjusted_mode.flags &
4104 Serge 12253
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
6084 serge 12254
		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4104 Serge 12255
 
6084 serge 12256
	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12257
					     pipe_config);
12258
	if (base_bpp < 0)
3746 Serge 12259
		goto fail;
12260
 
4560 Serge 12261
	/*
12262
	 * Determine the real pipe dimensions. Note that stereo modes can
12263
	 * increase the actual pipe size due to the frame doubling and
12264
	 * insertion of additional space for blanks between the frame. This
12265
	 * is stored in the crtc timings. We use the requested mode to do this
12266
	 * computation to clearly distinguish it from the adjusted mode, which
12267
	 * can be changed by the connectors in the below retry loop.
12268
	 */
6084 serge 12269
	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12270
			       &pipe_config->pipe_src_w,
12271
			       &pipe_config->pipe_src_h);
4560 Serge 12272
 
4104 Serge 12273
encoder_retry:
12274
	/* Ensure the port clock defaults are reset when retrying. */
12275
	pipe_config->port_clock = 0;
12276
	pipe_config->pixel_multiplier = 1;
12277
 
12278
	/* Fill in default crtc timings, allow encoders to overwrite them. */
6084 serge 12279
	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12280
			      CRTC_STEREO_DOUBLE);
4104 Serge 12281
 
3031 serge 12282
	/* Pass our mode to the connectors and the CRTC to give them a chance to
12283
	 * adjust it according to limitations or connector properties, and also
12284
	 * a chance to reject the mode entirely.
2330 Serge 12285
	 */
6084 serge 12286
	for_each_connector_in_state(state, connector, connector_state, i) {
12287
		if (connector_state->crtc != crtc)
3031 serge 12288
			continue;
3746 Serge 12289
 
6084 serge 12290
		encoder = to_intel_encoder(connector_state->best_encoder);
12291
 
12292
		if (!(encoder->compute_config(encoder, pipe_config))) {
12293
			DRM_DEBUG_KMS("Encoder config failure\n");
12294
			goto fail;
3746 Serge 12295
		}
6084 serge 12296
	}
3746 Serge 12297
 
4104 Serge 12298
	/* Set default port clock if not overwritten by the encoder. Needs to be
12299
	 * done afterwards in case the encoder adjusts the mode. */
12300
	if (!pipe_config->port_clock)
6084 serge 12301
		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
4560 Serge 12302
			* pipe_config->pixel_multiplier;
2327 Serge 12303
 
4104 Serge 12304
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12305
	if (ret < 0) {
3031 serge 12306
		DRM_DEBUG_KMS("CRTC fixup failed\n");
12307
		goto fail;
12308
	}
2327 Serge 12309
 
4104 Serge 12310
	if (ret == RETRY) {
12311
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12312
			ret = -EINVAL;
12313
			goto fail;
12314
		}
12315
 
12316
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12317
		retry = false;
12318
		goto encoder_retry;
12319
	}
12320
 
6084 serge 12321
	/* Dithering seems to not pass-through bits correctly when it should, so
12322
	 * only enable it on 6bpc panels. */
12323
	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12324
	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12325
		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
3746 Serge 12326
 
3031 serge 12327
fail:
6084 serge 12328
	return ret;
3031 serge 12329
}
2327 Serge 12330
 
3031 serge 12331
static void
6084 serge 12332
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
3031 serge 12333
{
6084 serge 12334
	struct drm_crtc *crtc;
12335
	struct drm_crtc_state *crtc_state;
12336
	int i;
3031 serge 12337
 
6084 serge 12338
	/* Double check state. */
12339
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12340
		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
3031 serge 12341
 
6084 serge 12342
		/* Update hwmode for vblank functions */
12343
		if (crtc->state->active)
12344
			crtc->hwmode = crtc->state->adjusted_mode;
5060 serge 12345
		else
6084 serge 12346
			crtc->hwmode.crtc_clock = 0;
3031 serge 12347
	}
2330 Serge 12348
}
2327 Serge 12349
 
4560 Serge 12350
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 12351
{
4560 Serge 12352
	int diff;
4104 Serge 12353
 
12354
	if (clock1 == clock2)
12355
		return true;
12356
 
12357
	if (!clock1 || !clock2)
12358
		return false;
12359
 
12360
	diff = abs(clock1 - clock2);
12361
 
12362
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12363
		return true;
12364
 
12365
	return false;
12366
}
12367
 
3031 serge 12368
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12369
	list_for_each_entry((intel_crtc), \
12370
			    &(dev)->mode_config.crtc_list, \
12371
			    base.head) \
4104 Serge 12372
		if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 12373
 
3746 Serge 12374
static bool
6084 serge 12375
intel_compare_m_n(unsigned int m, unsigned int n,
12376
		  unsigned int m2, unsigned int n2,
12377
		  bool exact)
12378
{
12379
	if (m == m2 && n == n2)
12380
		return true;
12381
 
12382
	if (exact || !m || !n || !m2 || !n2)
12383
		return false;
12384
 
12385
	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12386
 
12387
	if (m > m2) {
12388
		while (m > m2) {
12389
			m2 <<= 1;
12390
			n2 <<= 1;
12391
		}
12392
	} else if (m < m2) {
12393
		while (m < m2) {
12394
			m <<= 1;
12395
			n <<= 1;
12396
		}
12397
	}
12398
 
12399
	return m == m2 && n == n2;
12400
}
12401
 
12402
static bool
12403
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12404
		       struct intel_link_m_n *m2_n2,
12405
		       bool adjust)
12406
{
12407
	if (m_n->tu == m2_n2->tu &&
12408
	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12409
			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12410
	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12411
			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12412
		if (adjust)
12413
			*m2_n2 = *m_n;
12414
 
12415
		return true;
12416
	}
12417
 
12418
	return false;
12419
}
12420
 
12421
static bool
4104 Serge 12422
intel_pipe_config_compare(struct drm_device *dev,
6084 serge 12423
			  struct intel_crtc_state *current_config,
12424
			  struct intel_crtc_state *pipe_config,
12425
			  bool adjust)
3746 Serge 12426
{
6084 serge 12427
	bool ret = true;
12428
 
12429
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12430
	do { \
12431
		if (!adjust) \
12432
			DRM_ERROR(fmt, ##__VA_ARGS__); \
12433
		else \
12434
			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12435
	} while (0)
12436
 
4104 Serge 12437
#define PIPE_CONF_CHECK_X(name)	\
12438
	if (current_config->name != pipe_config->name) { \
6084 serge 12439
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4104 Serge 12440
			  "(expected 0x%08x, found 0x%08x)\n", \
12441
			  current_config->name, \
12442
			  pipe_config->name); \
6084 serge 12443
		ret = false; \
3746 Serge 12444
	}
12445
 
4104 Serge 12446
#define PIPE_CONF_CHECK_I(name)	\
12447
	if (current_config->name != pipe_config->name) { \
6084 serge 12448
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4104 Serge 12449
			  "(expected %i, found %i)\n", \
12450
			  current_config->name, \
12451
			  pipe_config->name); \
6084 serge 12452
		ret = false; \
4104 Serge 12453
	}
12454
 
6084 serge 12455
#define PIPE_CONF_CHECK_M_N(name) \
12456
	if (!intel_compare_link_m_n(¤t_config->name, \
12457
				    &pipe_config->name,\
12458
				    adjust)) { \
12459
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12460
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12461
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12462
			  current_config->name.tu, \
12463
			  current_config->name.gmch_m, \
12464
			  current_config->name.gmch_n, \
12465
			  current_config->name.link_m, \
12466
			  current_config->name.link_n, \
12467
			  pipe_config->name.tu, \
12468
			  pipe_config->name.gmch_m, \
12469
			  pipe_config->name.gmch_n, \
12470
			  pipe_config->name.link_m, \
12471
			  pipe_config->name.link_n); \
12472
		ret = false; \
12473
	}
12474
 
12475
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12476
	if (!intel_compare_link_m_n(¤t_config->name, \
12477
				    &pipe_config->name, adjust) && \
12478
	    !intel_compare_link_m_n(¤t_config->alt_name, \
12479
				    &pipe_config->name, adjust)) { \
12480
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12481
			  "(expected tu %i gmch %i/%i link %i/%i, " \
12482
			  "or tu %i gmch %i/%i link %i/%i, " \
12483
			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12484
			  current_config->name.tu, \
12485
			  current_config->name.gmch_m, \
12486
			  current_config->name.gmch_n, \
12487
			  current_config->name.link_m, \
12488
			  current_config->name.link_n, \
12489
			  current_config->alt_name.tu, \
12490
			  current_config->alt_name.gmch_m, \
12491
			  current_config->alt_name.gmch_n, \
12492
			  current_config->alt_name.link_m, \
12493
			  current_config->alt_name.link_n, \
12494
			  pipe_config->name.tu, \
12495
			  pipe_config->name.gmch_m, \
12496
			  pipe_config->name.gmch_n, \
12497
			  pipe_config->name.link_m, \
12498
			  pipe_config->name.link_n); \
12499
		ret = false; \
12500
	}
12501
 
5354 serge 12502
/* This is required for BDW+ where there is only one set of registers for
12503
 * switching between high and low RR.
12504
 * This macro can be used whenever a comparison has to be made between one
12505
 * hw state and multiple sw state variables.
12506
 */
12507
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12508
	if ((current_config->name != pipe_config->name) && \
12509
		(current_config->alt_name != pipe_config->name)) { \
6084 serge 12510
			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5354 serge 12511
				  "(expected %i or %i, found %i)\n", \
12512
				  current_config->name, \
12513
				  current_config->alt_name, \
12514
				  pipe_config->name); \
6084 serge 12515
			ret = false; \
5354 serge 12516
	}
12517
 
4104 Serge 12518
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12519
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
6084 serge 12520
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
4104 Serge 12521
			  "(expected %i, found %i)\n", \
12522
			  current_config->name & (mask), \
12523
			  pipe_config->name & (mask)); \
6084 serge 12524
		ret = false; \
4104 Serge 12525
	}
12526
 
4560 Serge 12527
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12528
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6084 serge 12529
		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
4560 Serge 12530
			  "(expected %i, found %i)\n", \
12531
			  current_config->name, \
12532
			  pipe_config->name); \
6084 serge 12533
		ret = false; \
4560 Serge 12534
	}
12535
 
4104 Serge 12536
#define PIPE_CONF_QUIRK(quirk)	\
12537
	((current_config->quirks | pipe_config->quirks) & (quirk))
12538
 
12539
	PIPE_CONF_CHECK_I(cpu_transcoder);
12540
 
12541
	PIPE_CONF_CHECK_I(has_pch_encoder);
12542
	PIPE_CONF_CHECK_I(fdi_lanes);
6084 serge 12543
	PIPE_CONF_CHECK_M_N(fdi_m_n);
4104 Serge 12544
 
4560 Serge 12545
	PIPE_CONF_CHECK_I(has_dp_encoder);
6084 serge 12546
	PIPE_CONF_CHECK_I(lane_count);
5354 serge 12547
 
12548
	if (INTEL_INFO(dev)->gen < 8) {
6084 serge 12549
		PIPE_CONF_CHECK_M_N(dp_m_n);
4560 Serge 12550
 
6084 serge 12551
		if (current_config->has_drrs)
12552
			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12553
	} else
12554
		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
5354 serge 12555
 
6084 serge 12556
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12557
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12558
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12559
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12560
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12561
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
4104 Serge 12562
 
6084 serge 12563
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12564
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12565
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12566
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12567
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12568
	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
4104 Serge 12569
 
6084 serge 12570
	PIPE_CONF_CHECK_I(pixel_multiplier);
5060 serge 12571
	PIPE_CONF_CHECK_I(has_hdmi_sink);
12572
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12573
	    IS_VALLEYVIEW(dev))
12574
		PIPE_CONF_CHECK_I(limited_color_range);
5354 serge 12575
	PIPE_CONF_CHECK_I(has_infoframe);
4104 Serge 12576
 
5060 serge 12577
	PIPE_CONF_CHECK_I(has_audio);
12578
 
6084 serge 12579
	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12580
			      DRM_MODE_FLAG_INTERLACE);
12581
 
12582
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6084 serge 12583
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12584
				      DRM_MODE_FLAG_PHSYNC);
6084 serge 12585
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12586
				      DRM_MODE_FLAG_NHSYNC);
6084 serge 12587
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12588
				      DRM_MODE_FLAG_PVSYNC);
6084 serge 12589
		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
4104 Serge 12590
				      DRM_MODE_FLAG_NVSYNC);
12591
	}
12592
 
6084 serge 12593
	PIPE_CONF_CHECK_X(gmch_pfit.control);
4104 Serge 12594
	/* pfit ratios are autocomputed by the hw on gen4+ */
12595
	if (INTEL_INFO(dev)->gen < 4)
12596
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
6084 serge 12597
	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5060 serge 12598
 
6084 serge 12599
	if (!adjust) {
12600
		PIPE_CONF_CHECK_I(pipe_src_w);
12601
		PIPE_CONF_CHECK_I(pipe_src_h);
12602
 
12603
		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12604
		if (current_config->pch_pfit.enabled) {
12605
			PIPE_CONF_CHECK_X(pch_pfit.pos);
12606
			PIPE_CONF_CHECK_X(pch_pfit.size);
12607
		}
12608
 
12609
		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
4104 Serge 12610
	}
12611
 
4560 Serge 12612
	/* BDW+ don't expose a synchronous way to read the state */
12613
	if (IS_HASWELL(dev))
6084 serge 12614
		PIPE_CONF_CHECK_I(ips_enabled);
4104 Serge 12615
 
4560 Serge 12616
	PIPE_CONF_CHECK_I(double_wide);
12617
 
5060 serge 12618
	PIPE_CONF_CHECK_X(ddi_pll_sel);
12619
 
4104 Serge 12620
	PIPE_CONF_CHECK_I(shared_dpll);
12621
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12622
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12623
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12624
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5060 serge 12625
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6084 serge 12626
	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
5354 serge 12627
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12628
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12629
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
4104 Serge 12630
 
4280 Serge 12631
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12632
		PIPE_CONF_CHECK_I(pipe_bpp);
12633
 
6084 serge 12634
	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12635
	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
4560 Serge 12636
 
4104 Serge 12637
#undef PIPE_CONF_CHECK_X
12638
#undef PIPE_CONF_CHECK_I
5354 serge 12639
#undef PIPE_CONF_CHECK_I_ALT
4104 Serge 12640
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 12641
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 12642
#undef PIPE_CONF_QUIRK
6084 serge 12643
#undef INTEL_ERR_OR_DBG_KMS
4104 Serge 12644
 
6084 serge 12645
	return ret;
3746 Serge 12646
}
12647
 
5354 serge 12648
static void check_wm_state(struct drm_device *dev)
12649
{
12650
	struct drm_i915_private *dev_priv = dev->dev_private;
12651
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12652
	struct intel_crtc *intel_crtc;
12653
	int plane;
12654
 
12655
	if (INTEL_INFO(dev)->gen < 9)
12656
		return;
12657
 
12658
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12659
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12660
 
12661
	for_each_intel_crtc(dev, intel_crtc) {
12662
		struct skl_ddb_entry *hw_entry, *sw_entry;
12663
		const enum pipe pipe = intel_crtc->pipe;
12664
 
12665
		if (!intel_crtc->active)
12666
			continue;
12667
 
12668
		/* planes */
6084 serge 12669
		for_each_plane(dev_priv, pipe, plane) {
5354 serge 12670
			hw_entry = &hw_ddb.plane[pipe][plane];
12671
			sw_entry = &sw_ddb->plane[pipe][plane];
12672
 
12673
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
12674
				continue;
12675
 
12676
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12677
				  "(expected (%u,%u), found (%u,%u))\n",
12678
				  pipe_name(pipe), plane + 1,
12679
				  sw_entry->start, sw_entry->end,
12680
				  hw_entry->start, hw_entry->end);
12681
		}
12682
 
12683
		/* cursor */
6084 serge 12684
		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12685
		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
5354 serge 12686
 
12687
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12688
			continue;
12689
 
12690
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12691
			  "(expected (%u,%u), found (%u,%u))\n",
12692
			  pipe_name(pipe),
12693
			  sw_entry->start, sw_entry->end,
12694
			  hw_entry->start, hw_entry->end);
12695
	}
12696
}
12697
 
4104 Serge 12698
static void
6084 serge 12699
check_connector_state(struct drm_device *dev,
12700
		      struct drm_atomic_state *old_state)
3031 serge 12701
{
6084 serge 12702
	struct drm_connector_state *old_conn_state;
12703
	struct drm_connector *connector;
12704
	int i;
3031 serge 12705
 
6084 serge 12706
	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12707
		struct drm_encoder *encoder = connector->encoder;
12708
		struct drm_connector_state *state = connector->state;
12709
 
3031 serge 12710
		/* This also checks the encoder/connector hw state with the
12711
		 * ->get_hw_state callbacks. */
6084 serge 12712
		intel_connector_check_state(to_intel_connector(connector));
3031 serge 12713
 
6084 serge 12714
		I915_STATE_WARN(state->best_encoder != encoder,
12715
		     "connector's atomic encoder doesn't match legacy encoder\n");
3031 serge 12716
	}
4104 Serge 12717
}
3031 serge 12718
 
4104 Serge 12719
static void
12720
check_encoder_state(struct drm_device *dev)
12721
{
12722
	struct intel_encoder *encoder;
12723
	struct intel_connector *connector;
12724
 
5354 serge 12725
	for_each_intel_encoder(dev, encoder) {
3031 serge 12726
		bool enabled = false;
6084 serge 12727
		enum pipe pipe;
3031 serge 12728
 
12729
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12730
			      encoder->base.base.id,
5060 serge 12731
			      encoder->base.name);
3031 serge 12732
 
6084 serge 12733
		for_each_intel_connector(dev, connector) {
12734
			if (connector->base.state->best_encoder != &encoder->base)
3031 serge 12735
				continue;
12736
			enabled = true;
6084 serge 12737
 
12738
			I915_STATE_WARN(connector->base.state->crtc !=
12739
					encoder->base.crtc,
12740
			     "connector's crtc doesn't match encoder crtc\n");
3031 serge 12741
		}
5060 serge 12742
 
6084 serge 12743
		I915_STATE_WARN(!!encoder->base.crtc != enabled,
3031 serge 12744
		     "encoder's enabled state mismatch "
12745
		     "(expected %i, found %i)\n",
12746
		     !!encoder->base.crtc, enabled);
12747
 
6084 serge 12748
		if (!encoder->base.crtc) {
12749
			bool active;
3031 serge 12750
 
6084 serge 12751
			active = encoder->get_hw_state(encoder, &pipe);
12752
			I915_STATE_WARN(active,
12753
			     "encoder detached but still enabled on pipe %c.\n",
12754
			     pipe_name(pipe));
12755
		}
3031 serge 12756
	}
4104 Serge 12757
}
3031 serge 12758
 
4104 Serge 12759
static void
6084 serge 12760
check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
4104 Serge 12761
{
5060 serge 12762
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 12763
	struct intel_encoder *encoder;
6084 serge 12764
	struct drm_crtc_state *old_crtc_state;
12765
	struct drm_crtc *crtc;
12766
	int i;
4104 Serge 12767
 
6084 serge 12768
	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12769
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12770
		struct intel_crtc_state *pipe_config, *sw_config;
12771
		bool active;
3031 serge 12772
 
6084 serge 12773
		if (!needs_modeset(crtc->state) &&
12774
		    !to_intel_crtc_state(crtc->state)->update_pipe)
12775
			continue;
4104 Serge 12776
 
6084 serge 12777
		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12778
		pipe_config = to_intel_crtc_state(old_crtc_state);
12779
		memset(pipe_config, 0, sizeof(*pipe_config));
12780
		pipe_config->base.crtc = crtc;
12781
		pipe_config->base.state = old_state;
12782
 
3031 serge 12783
		DRM_DEBUG_KMS("[CRTC:%d]\n",
6084 serge 12784
			      crtc->base.id);
3031 serge 12785
 
6084 serge 12786
		active = dev_priv->display.get_pipe_config(intel_crtc,
12787
							   pipe_config);
3031 serge 12788
 
6084 serge 12789
		/* hw state is inconsistent with the pipe quirk */
12790
		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12791
		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12792
			active = crtc->state->active;
4104 Serge 12793
 
6084 serge 12794
		I915_STATE_WARN(crtc->state->active != active,
12795
		     "crtc active state doesn't match with hw state "
12796
		     "(expected %i, found %i)\n", crtc->state->active, active);
3031 serge 12797
 
6084 serge 12798
		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12799
		     "transitional active state does not match atomic hw state "
12800
		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
3746 Serge 12801
 
6084 serge 12802
		for_each_encoder_on_crtc(dev, crtc, encoder) {
12803
			enum pipe pipe;
3746 Serge 12804
 
6084 serge 12805
			active = encoder->get_hw_state(encoder, &pipe);
12806
			I915_STATE_WARN(active != crtc->state->active,
12807
				"[ENCODER:%i] active %i with crtc active %i\n",
12808
				encoder->base.base.id, active, crtc->state->active);
12809
 
12810
			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12811
					"Encoder connected to wrong pipe %c\n",
12812
					pipe_name(pipe));
12813
 
12814
			if (active)
12815
				encoder->get_config(encoder, pipe_config);
4104 Serge 12816
		}
12817
 
6084 serge 12818
		if (!crtc->state->active)
12819
			continue;
3746 Serge 12820
 
6084 serge 12821
		sw_config = to_intel_crtc_state(crtc->state);
12822
		if (!intel_pipe_config_compare(dev, sw_config,
12823
					       pipe_config, false)) {
12824
			I915_STATE_WARN(1, "pipe state doesn't match!\n");
12825
			intel_dump_pipe_config(intel_crtc, pipe_config,
4104 Serge 12826
					       "[hw state]");
6084 serge 12827
			intel_dump_pipe_config(intel_crtc, sw_config,
4104 Serge 12828
					       "[sw state]");
12829
		}
3031 serge 12830
	}
12831
}
12832
 
4104 Serge 12833
static void
12834
check_shared_dpll_state(struct drm_device *dev)
12835
{
5060 serge 12836
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 12837
	struct intel_crtc *crtc;
12838
	struct intel_dpll_hw_state dpll_hw_state;
12839
	int i;
12840
 
12841
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12842
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12843
		int enabled_crtcs = 0, active_crtcs = 0;
12844
		bool active;
12845
 
12846
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12847
 
12848
		DRM_DEBUG_KMS("%s\n", pll->name);
12849
 
12850
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12851
 
6084 serge 12852
		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
4104 Serge 12853
		     "more active pll users than references: %i vs %i\n",
5354 serge 12854
		     pll->active, hweight32(pll->config.crtc_mask));
6084 serge 12855
		I915_STATE_WARN(pll->active && !pll->on,
4104 Serge 12856
		     "pll in active use but not on in sw tracking\n");
6084 serge 12857
		I915_STATE_WARN(pll->on && !pll->active,
4104 Serge 12858
		     "pll in on but not on in use in sw tracking\n");
6084 serge 12859
		I915_STATE_WARN(pll->on != active,
4104 Serge 12860
		     "pll on state mismatch (expected %i, found %i)\n",
12861
		     pll->on, active);
12862
 
5060 serge 12863
		for_each_intel_crtc(dev, crtc) {
6084 serge 12864
			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
4104 Serge 12865
				enabled_crtcs++;
12866
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12867
				active_crtcs++;
12868
		}
6084 serge 12869
		I915_STATE_WARN(pll->active != active_crtcs,
4104 Serge 12870
		     "pll active crtcs mismatch (expected %i, found %i)\n",
12871
		     pll->active, active_crtcs);
6084 serge 12872
		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
4104 Serge 12873
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
5354 serge 12874
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
4104 Serge 12875
 
6084 serge 12876
		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
4104 Serge 12877
				       sizeof(dpll_hw_state)),
12878
		     "pll hw state mismatch\n");
12879
	}
12880
}
12881
 
6084 serge 12882
static void
12883
intel_modeset_check_state(struct drm_device *dev,
12884
			  struct drm_atomic_state *old_state)
4104 Serge 12885
{
5354 serge 12886
	check_wm_state(dev);
6084 serge 12887
	check_connector_state(dev, old_state);
4104 Serge 12888
	check_encoder_state(dev);
6084 serge 12889
	check_crtc_state(dev, old_state);
4104 Serge 12890
	check_shared_dpll_state(dev);
12891
}
12892
 
6084 serge 12893
void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
4560 Serge 12894
				     int dotclock)
12895
{
12896
	/*
12897
	 * FDI already provided one idea for the dotclock.
12898
	 * Yell if the encoder disagrees.
12899
	 */
6084 serge 12900
	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
4560 Serge 12901
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6084 serge 12902
	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
4560 Serge 12903
}
12904
 
5060 serge 12905
static void update_scanline_offset(struct intel_crtc *crtc)
12906
{
12907
	struct drm_device *dev = crtc->base.dev;
12908
 
12909
	/*
12910
	 * The scanline counter increments at the leading edge of hsync.
12911
	 *
12912
	 * On most platforms it starts counting from vtotal-1 on the
12913
	 * first active line. That means the scanline counter value is
12914
	 * always one less than what we would expect. Ie. just after
12915
	 * start of vblank, which also occurs at start of hsync (on the
12916
	 * last active line), the scanline counter will read vblank_start-1.
12917
	 *
12918
	 * On gen2 the scanline counter starts counting from 1 instead
12919
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12920
	 * to keep the value positive), instead of adding one.
12921
	 *
12922
	 * On HSW+ the behaviour of the scanline counter depends on the output
12923
	 * type. For DP ports it behaves like most other platforms, but on HDMI
12924
	 * there's an extra 1 line difference. So we need to add two instead of
12925
	 * one to the value.
12926
	 */
12927
	if (IS_GEN2(dev)) {
6084 serge 12928
		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
5060 serge 12929
		int vtotal;
12930
 
6084 serge 12931
		vtotal = adjusted_mode->crtc_vtotal;
12932
		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5060 serge 12933
			vtotal /= 2;
12934
 
12935
		crtc->scanline_offset = vtotal - 1;
12936
	} else if (HAS_DDI(dev) &&
5354 serge 12937
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
5060 serge 12938
		crtc->scanline_offset = 2;
12939
	} else
12940
		crtc->scanline_offset = 1;
12941
}
12942
 
6084 serge 12943
static void intel_modeset_clear_plls(struct drm_atomic_state *state)
5354 serge 12944
{
6084 serge 12945
	struct drm_device *dev = state->dev;
12946
	struct drm_i915_private *dev_priv = to_i915(dev);
12947
	struct intel_shared_dpll_config *shared_dpll = NULL;
12948
	struct intel_crtc *intel_crtc;
12949
	struct intel_crtc_state *intel_crtc_state;
12950
	struct drm_crtc *crtc;
12951
	struct drm_crtc_state *crtc_state;
12952
	int i;
5354 serge 12953
 
6084 serge 12954
	if (!dev_priv->display.crtc_compute_clock)
12955
		return;
5354 serge 12956
 
6084 serge 12957
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12958
		int dpll;
5354 serge 12959
 
6084 serge 12960
		intel_crtc = to_intel_crtc(crtc);
12961
		intel_crtc_state = to_intel_crtc_state(crtc_state);
12962
		dpll = intel_crtc_state->shared_dpll;
12963
 
12964
		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
12965
			continue;
12966
 
12967
		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
12968
 
12969
		if (!shared_dpll)
12970
			shared_dpll = intel_atomic_get_shared_dpll_state(state);
12971
 
12972
		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
5354 serge 12973
	}
12974
}
12975
 
6084 serge 12976
/*
12977
 * This implements the workaround described in the "notes" section of the mode
12978
 * set sequence documentation. When going from no pipes or single pipe to
12979
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12980
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12981
 */
12982
static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
3031 serge 12983
{
6084 serge 12984
	struct drm_crtc_state *crtc_state;
3031 serge 12985
	struct intel_crtc *intel_crtc;
6084 serge 12986
	struct drm_crtc *crtc;
12987
	struct intel_crtc_state *first_crtc_state = NULL;
12988
	struct intel_crtc_state *other_crtc_state = NULL;
12989
	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12990
	int i;
3031 serge 12991
 
6084 serge 12992
	/* look at all crtc's that are going to be enabled in during modeset */
12993
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12994
		intel_crtc = to_intel_crtc(crtc);
3480 Serge 12995
 
6084 serge 12996
		if (!crtc_state->active || !needs_modeset(crtc_state))
12997
			continue;
3031 serge 12998
 
6084 serge 12999
		if (first_crtc_state) {
13000
			other_crtc_state = to_intel_crtc_state(crtc_state);
13001
			break;
13002
		} else {
13003
			first_crtc_state = to_intel_crtc_state(crtc_state);
13004
			first_pipe = intel_crtc->pipe;
13005
		}
13006
	}
3031 serge 13007
 
6084 serge 13008
	/* No workaround needed? */
13009
	if (!first_crtc_state)
13010
		return 0;
4560 Serge 13011
 
6084 serge 13012
	/* w/a possibly needed, check how many crtc's are already enabled. */
13013
	for_each_intel_crtc(state->dev, intel_crtc) {
13014
		struct intel_crtc_state *pipe_config;
4560 Serge 13015
 
6084 serge 13016
		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13017
		if (IS_ERR(pipe_config))
13018
			return PTR_ERR(pipe_config);
5354 serge 13019
 
6084 serge 13020
		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
5354 serge 13021
 
6084 serge 13022
		if (!pipe_config->base.active ||
13023
		    needs_modeset(&pipe_config->base))
13024
			continue;
5354 serge 13025
 
6084 serge 13026
		/* 2 or more enabled crtcs means no need for w/a */
13027
		if (enabled_pipe != INVALID_PIPE)
13028
			return 0;
3746 Serge 13029
 
6084 serge 13030
		enabled_pipe = intel_crtc->pipe;
3031 serge 13031
	}
13032
 
6084 serge 13033
	if (enabled_pipe != INVALID_PIPE)
13034
		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13035
	else if (other_crtc_state)
13036
		other_crtc_state->hsw_workaround_pipe = first_pipe;
4560 Serge 13037
 
6084 serge 13038
	return 0;
13039
}
2327 Serge 13040
 
6084 serge 13041
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13042
{
13043
	struct drm_crtc *crtc;
13044
	struct drm_crtc_state *crtc_state;
13045
	int ret = 0;
3031 serge 13046
 
6084 serge 13047
	/* add all active pipes to the state */
13048
	for_each_crtc(state->dev, crtc) {
13049
		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13050
		if (IS_ERR(crtc_state))
13051
			return PTR_ERR(crtc_state);
3243 Serge 13052
 
6084 serge 13053
		if (!crtc_state->active || needs_modeset(crtc_state))
13054
			continue;
5060 serge 13055
 
6084 serge 13056
		crtc_state->mode_changed = true;
5060 serge 13057
 
6084 serge 13058
		ret = drm_atomic_add_affected_connectors(state, crtc);
13059
		if (ret)
13060
			break;
3031 serge 13061
 
6084 serge 13062
		ret = drm_atomic_add_affected_planes(state, crtc);
13063
		if (ret)
13064
			break;
5060 serge 13065
	}
3031 serge 13066
 
13067
	return ret;
2330 Serge 13068
}
2327 Serge 13069
 
6084 serge 13070
static int intel_modeset_checks(struct drm_atomic_state *state)
3746 Serge 13071
{
6084 serge 13072
	struct drm_device *dev = state->dev;
13073
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 13074
	int ret;
13075
 
6084 serge 13076
	if (!check_digital_port_conflicts(state)) {
13077
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13078
		return -EINVAL;
13079
	}
3746 Serge 13080
 
6084 serge 13081
	/*
13082
	 * See if the config requires any additional preparation, e.g.
13083
	 * to adjust global state with pipes off.  We need to do this
13084
	 * here so we can get the modeset_pipe updated config for the new
13085
	 * mode set on this crtc.  For other crtcs we need to use the
13086
	 * adjusted_mode bits in the crtc directly.
13087
	 */
13088
	if (dev_priv->display.modeset_calc_cdclk) {
13089
		unsigned int cdclk;
3746 Serge 13090
 
6084 serge 13091
		ret = dev_priv->display.modeset_calc_cdclk(state);
3746 Serge 13092
 
6084 serge 13093
		cdclk = to_intel_atomic_state(state)->cdclk;
13094
		if (!ret && cdclk != dev_priv->cdclk_freq)
13095
			ret = intel_modeset_all_pipes(state);
5354 serge 13096
 
6084 serge 13097
		if (ret < 0)
13098
			return ret;
13099
	} else
13100
		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
5354 serge 13101
 
6084 serge 13102
	intel_modeset_clear_plls(state);
5354 serge 13103
 
6084 serge 13104
	if (IS_HASWELL(dev))
13105
		return haswell_mode_set_planes_workaround(state);
5354 serge 13106
 
6084 serge 13107
	return 0;
3480 Serge 13108
}
13109
 
6084 serge 13110
/**
13111
 * intel_atomic_check - validate state object
13112
 * @dev: drm device
13113
 * @state: state to validate
13114
 */
13115
static int intel_atomic_check(struct drm_device *dev,
13116
			      struct drm_atomic_state *state)
3031 serge 13117
{
6084 serge 13118
	struct drm_crtc *crtc;
13119
	struct drm_crtc_state *crtc_state;
13120
	int ret, i;
13121
	bool any_ms = false;
3031 serge 13122
 
6084 serge 13123
	ret = drm_atomic_helper_check_modeset(dev, state);
13124
	if (ret)
13125
		return ret;
3031 serge 13126
 
6084 serge 13127
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13128
		struct intel_crtc_state *pipe_config =
13129
			to_intel_crtc_state(crtc_state);
3031 serge 13130
 
6084 serge 13131
		memset(&to_intel_crtc(crtc)->atomic, 0,
13132
		       sizeof(struct intel_crtc_atomic_commit));
5060 serge 13133
 
6084 serge 13134
		/* Catch I915_MODE_FLAG_INHERITED */
13135
		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13136
			crtc_state->mode_changed = true;
3031 serge 13137
 
6084 serge 13138
		if (!crtc_state->enable) {
13139
			if (needs_modeset(crtc_state))
13140
				any_ms = true;
13141
			continue;
13142
		}
3031 serge 13143
 
6084 serge 13144
		if (!needs_modeset(crtc_state))
13145
			continue;
5060 serge 13146
 
6084 serge 13147
		/* FIXME: For only active_changed we shouldn't need to do any
13148
		 * state recomputation at all. */
3031 serge 13149
 
6084 serge 13150
		ret = drm_atomic_add_affected_connectors(state, crtc);
13151
		if (ret)
13152
			return ret;
3031 serge 13153
 
6084 serge 13154
		ret = intel_modeset_pipe_config(crtc, pipe_config);
13155
		if (ret)
13156
			return ret;
3031 serge 13157
 
6084 serge 13158
		if (i915.fastboot &&
13159
		    intel_pipe_config_compare(state->dev,
13160
					to_intel_crtc_state(crtc->state),
13161
					pipe_config, true)) {
13162
			crtc_state->mode_changed = false;
13163
			to_intel_crtc_state(crtc_state)->update_pipe = true;
13164
		}
3031 serge 13165
 
6084 serge 13166
		if (needs_modeset(crtc_state)) {
13167
			any_ms = true;
5060 serge 13168
 
6084 serge 13169
			ret = drm_atomic_add_affected_planes(state, crtc);
13170
			if (ret)
13171
				return ret;
13172
		}
5060 serge 13173
 
6084 serge 13174
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13175
				       needs_modeset(crtc_state) ?
13176
				       "[modeset]" : "[fastset]");
3031 serge 13177
	}
13178
 
6084 serge 13179
	if (any_ms) {
13180
		ret = intel_modeset_checks(state);
3031 serge 13181
 
6084 serge 13182
		if (ret)
13183
			return ret;
13184
	} else
13185
		to_intel_atomic_state(state)->cdclk =
13186
			to_i915(state->dev)->cdclk_freq;
3746 Serge 13187
 
6084 serge 13188
	return drm_atomic_helper_check_planes(state->dev, state);
3746 Serge 13189
}
13190
 
6084 serge 13191
/**
13192
 * intel_atomic_commit - commit validated state object
13193
 * @dev: DRM device
13194
 * @state: the top-level driver state object
13195
 * @async: asynchronous commit
13196
 *
13197
 * This function commits a top-level state object that has been validated
13198
 * with drm_atomic_helper_check().
13199
 *
13200
 * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13201
 * we can only handle plane-related operations and do not yet support
13202
 * asynchronous commit.
13203
 *
13204
 * RETURNS
13205
 * Zero for success or -errno.
13206
 */
13207
static int intel_atomic_commit(struct drm_device *dev,
13208
			       struct drm_atomic_state *state,
13209
			       bool async)
3031 serge 13210
{
6084 serge 13211
	struct drm_i915_private *dev_priv = dev->dev_private;
13212
	struct drm_crtc *crtc;
13213
	struct drm_crtc_state *crtc_state;
13214
	int ret = 0;
13215
	int i;
13216
	bool any_ms = false;
3031 serge 13217
 
6084 serge 13218
	if (async) {
13219
		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13220
		return -EINVAL;
3031 serge 13221
	}
13222
 
6084 serge 13223
	ret = drm_atomic_helper_prepare_planes(dev, state);
13224
	if (ret)
13225
		return ret;
3031 serge 13226
 
6084 serge 13227
	drm_atomic_helper_swap_state(dev, state);
4104 Serge 13228
 
6084 serge 13229
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13230
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 13231
 
6084 serge 13232
		if (!needs_modeset(crtc->state))
13233
			continue;
3031 serge 13234
 
6084 serge 13235
		any_ms = true;
13236
		intel_pre_plane_update(intel_crtc);
3031 serge 13237
 
6084 serge 13238
		if (crtc_state->active) {
13239
			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13240
			dev_priv->display.crtc_disable(crtc);
13241
			intel_crtc->active = false;
13242
			intel_disable_shared_dpll(intel_crtc);
3031 serge 13243
		}
6084 serge 13244
	}
3031 serge 13245
 
6084 serge 13246
	/* Only after disabling all output pipelines that will be changed can we
13247
	 * update the the output configuration. */
13248
	intel_modeset_update_crtc_state(state);
3031 serge 13249
 
6084 serge 13250
	if (any_ms) {
13251
		intel_shared_dpll_commit(state);
3031 serge 13252
 
6084 serge 13253
		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13254
		modeset_update_crtc_power_domains(state);
3031 serge 13255
	}
13256
 
6084 serge 13257
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13258
	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13259
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13260
		bool modeset = needs_modeset(crtc->state);
13261
		bool update_pipe = !modeset &&
13262
			to_intel_crtc_state(crtc->state)->update_pipe;
13263
		unsigned long put_domains = 0;
5060 serge 13264
 
6084 serge 13265
		if (modeset && crtc->state->active) {
13266
			update_scanline_offset(to_intel_crtc(crtc));
13267
			dev_priv->display.crtc_enable(crtc);
3031 serge 13268
		}
13269
 
6084 serge 13270
		if (update_pipe) {
13271
			put_domains = modeset_get_crtc_power_domains(crtc);
3031 serge 13272
 
6084 serge 13273
			/* make sure intel_modeset_check_state runs */
13274
			any_ms = true;
3031 serge 13275
		}
4560 Serge 13276
 
6084 serge 13277
		if (!modeset)
13278
			intel_pre_plane_update(intel_crtc);
4560 Serge 13279
 
6084 serge 13280
		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
3031 serge 13281
 
6084 serge 13282
		if (put_domains)
13283
			modeset_put_power_domains(dev_priv, put_domains);
5060 serge 13284
 
6084 serge 13285
		intel_post_plane_update(intel_crtc);
5060 serge 13286
	}
13287
 
6084 serge 13288
	/* FIXME: add subpixel order */
3031 serge 13289
 
6088 serge 13290
	drm_atomic_helper_wait_for_vblanks(dev, state);
6084 serge 13291
	drm_atomic_helper_cleanup_planes(dev, state);
5060 serge 13292
 
6084 serge 13293
	if (any_ms)
13294
		intel_modeset_check_state(dev, state);
5060 serge 13295
 
6084 serge 13296
	drm_atomic_state_free(state);
5060 serge 13297
 
6084 serge 13298
	return 0;
5060 serge 13299
}
13300
 
6084 serge 13301
void intel_crtc_restore_mode(struct drm_crtc *crtc)
3031 serge 13302
{
6084 serge 13303
	struct drm_device *dev = crtc->dev;
13304
	struct drm_atomic_state *state;
13305
	struct drm_crtc_state *crtc_state;
3031 serge 13306
	int ret;
13307
 
6084 serge 13308
	state = drm_atomic_state_alloc(dev);
13309
	if (!state) {
13310
		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13311
			      crtc->base.id);
13312
		return;
3031 serge 13313
	}
13314
 
6084 serge 13315
	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
3031 serge 13316
 
6084 serge 13317
retry:
13318
	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13319
	ret = PTR_ERR_OR_ZERO(crtc_state);
13320
	if (!ret) {
13321
		if (!crtc_state->active)
13322
			goto out;
3031 serge 13323
 
6084 serge 13324
		crtc_state->mode_changed = true;
13325
		ret = drm_atomic_commit(state);
5354 serge 13326
	}
13327
 
6084 serge 13328
	if (ret == -EDEADLK) {
13329
		drm_atomic_state_clear(state);
13330
		drm_modeset_backoff(state->acquire_ctx);
13331
		goto retry;
3031 serge 13332
	}
13333
 
6084 serge 13334
	if (ret)
13335
out:
13336
		drm_atomic_state_free(state);
13337
}
3031 serge 13338
 
6084 serge 13339
#undef for_each_intel_crtc_masked
5060 serge 13340
 
2330 Serge 13341
static const struct drm_crtc_funcs intel_crtc_funcs = {
13342
	.gamma_set = intel_crtc_gamma_set,
6084 serge 13343
	.set_config = drm_atomic_helper_set_config,
2330 Serge 13344
	.destroy = intel_crtc_destroy,
6320 serge 13345
	.page_flip = intel_crtc_page_flip,
6084 serge 13346
	.atomic_duplicate_state = intel_crtc_duplicate_state,
13347
	.atomic_destroy_state = intel_crtc_destroy_state,
2330 Serge 13348
};
2327 Serge 13349
 
4104 Serge 13350
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13351
				      struct intel_shared_dpll *pll,
13352
				      struct intel_dpll_hw_state *hw_state)
3031 serge 13353
{
4104 Serge 13354
	uint32_t val;
3031 serge 13355
 
5354 serge 13356
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
5060 serge 13357
		return false;
13358
 
4104 Serge 13359
	val = I915_READ(PCH_DPLL(pll->id));
13360
	hw_state->dpll = val;
13361
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13362
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13363
 
13364
	return val & DPLL_VCO_ENABLE;
13365
}
13366
 
13367
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13368
				  struct intel_shared_dpll *pll)
13369
{
5354 serge 13370
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13371
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
4104 Serge 13372
}
13373
 
13374
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13375
				struct intel_shared_dpll *pll)
13376
{
13377
	/* PCH refclock must be enabled first */
4560 Serge 13378
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 13379
 
5354 serge 13380
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 13381
 
13382
	/* Wait for the clocks to stabilize. */
13383
	POSTING_READ(PCH_DPLL(pll->id));
13384
	udelay(150);
13385
 
13386
	/* The pixel multiplier can only be updated once the
13387
	 * DPLL is enabled and the clocks are stable.
13388
	 *
13389
	 * So write it again.
13390
	 */
5354 serge 13391
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 13392
	POSTING_READ(PCH_DPLL(pll->id));
13393
	udelay(200);
13394
}
13395
 
13396
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13397
				 struct intel_shared_dpll *pll)
13398
{
13399
	struct drm_device *dev = dev_priv->dev;
13400
	struct intel_crtc *crtc;
13401
 
13402
	/* Make sure no transcoder isn't still depending on us. */
5060 serge 13403
	for_each_intel_crtc(dev, crtc) {
4104 Serge 13404
		if (intel_crtc_to_shared_dpll(crtc) == pll)
13405
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 13406
	}
13407
 
4104 Serge 13408
	I915_WRITE(PCH_DPLL(pll->id), 0);
13409
	POSTING_READ(PCH_DPLL(pll->id));
13410
	udelay(200);
13411
}
13412
 
13413
static char *ibx_pch_dpll_names[] = {
13414
	"PCH DPLL A",
13415
	"PCH DPLL B",
13416
};
13417
 
13418
static void ibx_pch_dpll_init(struct drm_device *dev)
13419
{
13420
	struct drm_i915_private *dev_priv = dev->dev_private;
13421
	int i;
13422
 
13423
	dev_priv->num_shared_dpll = 2;
13424
 
13425
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13426
		dev_priv->shared_dplls[i].id = i;
13427
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13428
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13429
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13430
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13431
		dev_priv->shared_dplls[i].get_hw_state =
13432
			ibx_pch_dpll_get_hw_state;
3031 serge 13433
	}
13434
}
13435
 
4104 Serge 13436
static void intel_shared_dpll_init(struct drm_device *dev)
13437
{
13438
	struct drm_i915_private *dev_priv = dev->dev_private;
13439
 
5060 serge 13440
	if (HAS_DDI(dev))
13441
		intel_ddi_pll_init(dev);
13442
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4104 Serge 13443
		ibx_pch_dpll_init(dev);
13444
	else
13445
		dev_priv->num_shared_dpll = 0;
13446
 
13447
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13448
}
13449
 
6084 serge 13450
/**
13451
 * intel_prepare_plane_fb - Prepare fb for usage on plane
13452
 * @plane: drm plane to prepare for
13453
 * @fb: framebuffer to prepare for presentation
13454
 *
13455
 * Prepares a framebuffer for usage on a display plane.  Generally this
13456
 * involves pinning the underlying object and updating the frontbuffer tracking
13457
 * bits.  Some older platforms need special physical address handling for
13458
 * cursor planes.
13459
 *
13460
 * Returns 0 on success, negative error code on failure.
13461
 */
13462
int
13463
intel_prepare_plane_fb(struct drm_plane *plane,
13464
		       const struct drm_plane_state *new_state)
5060 serge 13465
{
13466
	struct drm_device *dev = plane->dev;
6084 serge 13467
	struct drm_framebuffer *fb = new_state->fb;
13468
	struct intel_plane *intel_plane = to_intel_plane(plane);
13469
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13470
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
13471
	int ret = 0;
5060 serge 13472
 
6084 serge 13473
	if (!obj)
5060 serge 13474
		return 0;
13475
 
6084 serge 13476
	mutex_lock(&dev->struct_mutex);
5060 serge 13477
 
6084 serge 13478
	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13479
	    INTEL_INFO(dev)->cursor_needs_physical) {
13480
		int align = IS_I830(dev) ? 16 * 1024 : 256;
13481
        ret = 1;
13482
		if (ret)
13483
			DRM_DEBUG_KMS("failed to attach phys object\n");
13484
	} else {
13485
		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
13486
	}
5060 serge 13487
 
6084 serge 13488
	if (ret == 0)
13489
		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
5060 serge 13490
 
13491
	mutex_unlock(&dev->struct_mutex);
13492
 
6084 serge 13493
	return ret;
5060 serge 13494
}
13495
 
6084 serge 13496
/**
13497
 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13498
 * @plane: drm plane to clean up for
13499
 * @fb: old framebuffer that was on plane
13500
 *
13501
 * Cleans up a framebuffer that has just been removed from a plane.
13502
 */
13503
void
13504
intel_cleanup_plane_fb(struct drm_plane *plane,
13505
		       const struct drm_plane_state *old_state)
5060 serge 13506
{
6084 serge 13507
	struct drm_device *dev = plane->dev;
13508
	struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
5354 serge 13509
 
6084 serge 13510
	if (!obj)
13511
		return;
13512
 
13513
	if (plane->type != DRM_PLANE_TYPE_CURSOR ||
13514
	    !INTEL_INFO(dev)->cursor_needs_physical) {
13515
		mutex_lock(&dev->struct_mutex);
13516
		intel_unpin_fb_obj(old_state->fb, old_state);
13517
		mutex_unlock(&dev->struct_mutex);
13518
	}
5354 serge 13519
}
13520
 
6084 serge 13521
int
13522
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
5354 serge 13523
{
6084 serge 13524
	int max_scale;
13525
	struct drm_device *dev;
13526
	struct drm_i915_private *dev_priv;
13527
	int crtc_clock, cdclk;
5060 serge 13528
 
6084 serge 13529
	if (!intel_crtc || !crtc_state)
13530
		return DRM_PLANE_HELPER_NO_SCALING;
5060 serge 13531
 
6084 serge 13532
	dev = intel_crtc->base.dev;
13533
	dev_priv = dev->dev_private;
13534
	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13535
	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
5060 serge 13536
 
6084 serge 13537
	if (!crtc_clock || !cdclk)
13538
		return DRM_PLANE_HELPER_NO_SCALING;
13539
 
13540
	/*
13541
	 * skl max scale is lower of:
13542
	 *    close to 3 but not 3, -1 is for that purpose
13543
	 *            or
13544
	 *    cdclk/crtc_clock
13545
	 */
13546
	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13547
 
13548
	return max_scale;
13549
}
13550
 
13551
static int
13552
intel_check_primary_plane(struct drm_plane *plane,
13553
			  struct intel_crtc_state *crtc_state,
13554
			  struct intel_plane_state *state)
13555
{
13556
	struct drm_crtc *crtc = state->base.crtc;
13557
	struct drm_framebuffer *fb = state->base.fb;
13558
	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13559
	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13560
	bool can_position = false;
13561
 
6320 serge 13562
	if (INTEL_INFO(plane->dev)->gen >= 9) {
6084 serge 13563
	/* use scaler when colorkey is not required */
6320 serge 13564
		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
6084 serge 13565
		min_scale = 1;
13566
		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
6320 serge 13567
		}
6084 serge 13568
		can_position = true;
5354 serge 13569
	}
5060 serge 13570
 
6084 serge 13571
	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13572
					     &state->dst, &state->clip,
13573
					     min_scale, max_scale,
13574
					     can_position, true,
13575
					     &state->visible);
5354 serge 13576
}
13577
 
13578
static void
13579
intel_commit_primary_plane(struct drm_plane *plane,
13580
			   struct intel_plane_state *state)
13581
{
6084 serge 13582
	struct drm_crtc *crtc = state->base.crtc;
13583
	struct drm_framebuffer *fb = state->base.fb;
13584
	struct drm_device *dev = plane->dev;
5354 serge 13585
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 13586
	struct intel_crtc *intel_crtc;
5354 serge 13587
	struct drm_rect *src = &state->src;
13588
 
6084 serge 13589
	crtc = crtc ? crtc : plane->crtc;
13590
	intel_crtc = to_intel_crtc(crtc);
13591
 
13592
	plane->fb = fb;
5354 serge 13593
	crtc->x = src->x1 >> 16;
13594
	crtc->y = src->y1 >> 16;
13595
 
6084 serge 13596
	if (!crtc->state->active)
13597
		return;
5354 serge 13598
 
6084 serge 13599
	dev_priv->display.update_primary_plane(crtc, fb,
13600
					       state->src.x1 >> 16,
13601
					       state->src.y1 >> 16);
13602
}
5060 serge 13603
 
6084 serge 13604
static void
13605
intel_disable_primary_plane(struct drm_plane *plane,
13606
			    struct drm_crtc *crtc)
13607
{
13608
	struct drm_device *dev = plane->dev;
13609
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 13610
 
6084 serge 13611
	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
13612
}
5060 serge 13613
 
6084 serge 13614
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13615
				    struct drm_crtc_state *old_crtc_state)
13616
{
13617
	struct drm_device *dev = crtc->dev;
13618
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13619
	struct intel_crtc_state *old_intel_state =
13620
		to_intel_crtc_state(old_crtc_state);
13621
	bool modeset = needs_modeset(crtc->state);
5060 serge 13622
 
6084 serge 13623
	if (intel_crtc->atomic.update_wm_pre)
13624
		intel_update_watermarks(crtc);
5060 serge 13625
 
6084 serge 13626
	/* Perform vblank evasion around commit operation */
13627
	if (crtc->state->active)
13628
		intel_pipe_update_start(intel_crtc);
5354 serge 13629
 
6084 serge 13630
	if (modeset)
13631
		return;
5354 serge 13632
 
6084 serge 13633
	if (to_intel_crtc_state(crtc->state)->update_pipe)
13634
		intel_update_pipe_config(intel_crtc, old_intel_state);
13635
	else if (INTEL_INFO(dev)->gen >= 9)
13636
		skl_detach_scalers(intel_crtc);
5354 serge 13637
}
5060 serge 13638
 
6084 serge 13639
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13640
				     struct drm_crtc_state *old_crtc_state)
5354 serge 13641
{
13642
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 13643
 
6084 serge 13644
	if (crtc->state->active)
13645
		intel_pipe_update_end(intel_crtc);
5060 serge 13646
}
13647
 
6084 serge 13648
/**
13649
 * intel_plane_destroy - destroy a plane
13650
 * @plane: plane to destroy
13651
 *
13652
 * Common destruction function for all types of planes (primary, cursor,
13653
 * sprite).
13654
 */
13655
void intel_plane_destroy(struct drm_plane *plane)
5060 serge 13656
{
13657
	struct intel_plane *intel_plane = to_intel_plane(plane);
13658
	drm_plane_cleanup(plane);
13659
	kfree(intel_plane);
13660
}
13661
 
6084 serge 13662
const struct drm_plane_funcs intel_plane_funcs = {
13663
	.update_plane = drm_atomic_helper_update_plane,
13664
	.disable_plane = drm_atomic_helper_disable_plane,
5060 serge 13665
	.destroy = intel_plane_destroy,
6084 serge 13666
	.set_property = drm_atomic_helper_plane_set_property,
13667
	.atomic_get_property = intel_plane_atomic_get_property,
13668
	.atomic_set_property = intel_plane_atomic_set_property,
13669
	.atomic_duplicate_state = intel_plane_duplicate_state,
13670
	.atomic_destroy_state = intel_plane_destroy_state,
13671
 
5060 serge 13672
};
13673
 
13674
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13675
						    int pipe)
13676
{
13677
	struct intel_plane *primary;
6084 serge 13678
	struct intel_plane_state *state;
5060 serge 13679
	const uint32_t *intel_primary_formats;
6084 serge 13680
	unsigned int num_formats;
5060 serge 13681
 
13682
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13683
	if (primary == NULL)
13684
		return NULL;
13685
 
6084 serge 13686
	state = intel_create_plane_state(&primary->base);
13687
	if (!state) {
13688
		kfree(primary);
13689
		return NULL;
13690
	}
13691
	primary->base.state = &state->base;
13692
 
5060 serge 13693
	primary->can_scale = false;
13694
	primary->max_downscale = 1;
6084 serge 13695
	if (INTEL_INFO(dev)->gen >= 9) {
13696
		primary->can_scale = true;
13697
		state->scaler_id = -1;
13698
	}
5060 serge 13699
	primary->pipe = pipe;
13700
	primary->plane = pipe;
6084 serge 13701
	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
13702
	primary->check_plane = intel_check_primary_plane;
13703
	primary->commit_plane = intel_commit_primary_plane;
13704
	primary->disable_plane = intel_disable_primary_plane;
5060 serge 13705
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
13706
		primary->plane = !pipe;
13707
 
6084 serge 13708
	if (INTEL_INFO(dev)->gen >= 9) {
13709
		intel_primary_formats = skl_primary_formats;
13710
		num_formats = ARRAY_SIZE(skl_primary_formats);
13711
	} else if (INTEL_INFO(dev)->gen >= 4) {
13712
		intel_primary_formats = i965_primary_formats;
13713
		num_formats = ARRAY_SIZE(i965_primary_formats);
5060 serge 13714
	} else {
6084 serge 13715
		intel_primary_formats = i8xx_primary_formats;
13716
		num_formats = ARRAY_SIZE(i8xx_primary_formats);
5060 serge 13717
	}
13718
 
13719
	drm_universal_plane_init(dev, &primary->base, 0,
6084 serge 13720
				 &intel_plane_funcs,
5060 serge 13721
				 intel_primary_formats, num_formats,
13722
				 DRM_PLANE_TYPE_PRIMARY);
5354 serge 13723
 
6084 serge 13724
	if (INTEL_INFO(dev)->gen >= 4)
13725
		intel_create_rotation_property(dev, primary);
5354 serge 13726
 
6084 serge 13727
	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13728
 
5060 serge 13729
	return &primary->base;
13730
}
13731
 
6084 serge 13732
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
5060 serge 13733
{
6084 serge 13734
	if (!dev->mode_config.rotation_property) {
13735
		unsigned long flags = BIT(DRM_ROTATE_0) |
13736
			BIT(DRM_ROTATE_180);
5060 serge 13737
 
6084 serge 13738
		if (INTEL_INFO(dev)->gen >= 9)
13739
			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
5060 serge 13740
 
6084 serge 13741
		dev->mode_config.rotation_property =
13742
			drm_mode_create_rotation_property(dev, flags);
13743
	}
13744
	if (dev->mode_config.rotation_property)
13745
		drm_object_attach_property(&plane->base.base,
13746
				dev->mode_config.rotation_property,
13747
				plane->base.state->rotation);
5060 serge 13748
}
13749
 
13750
static int
5354 serge 13751
intel_check_cursor_plane(struct drm_plane *plane,
6084 serge 13752
			 struct intel_crtc_state *crtc_state,
5354 serge 13753
			 struct intel_plane_state *state)
5060 serge 13754
{
6084 serge 13755
	struct drm_crtc *crtc = crtc_state->base.crtc;
13756
	struct drm_framebuffer *fb = state->base.fb;
5354 serge 13757
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6084 serge 13758
	enum pipe pipe = to_intel_plane(plane)->pipe;
5354 serge 13759
	unsigned stride;
5060 serge 13760
	int ret;
13761
 
6084 serge 13762
	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13763
					    &state->dst, &state->clip,
5060 serge 13764
					    DRM_PLANE_HELPER_NO_SCALING,
13765
					    DRM_PLANE_HELPER_NO_SCALING,
5354 serge 13766
					    true, true, &state->visible);
5060 serge 13767
	if (ret)
13768
		return ret;
13769
 
5354 serge 13770
	/* if we want to turn off the cursor ignore width and height */
13771
	if (!obj)
13772
		return 0;
13773
 
13774
	/* Check for which cursor types we support */
6084 serge 13775
	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
13776
		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13777
			  state->base.crtc_w, state->base.crtc_h);
5354 serge 13778
		return -EINVAL;
13779
	}
13780
 
6084 serge 13781
	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
13782
	if (obj->base.size < stride * state->base.crtc_h) {
5354 serge 13783
		DRM_DEBUG_KMS("buffer is too small\n");
13784
		return -ENOMEM;
13785
	}
13786
 
6084 serge 13787
	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
5354 serge 13788
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
6084 serge 13789
		return -EINVAL;
5354 serge 13790
	}
13791
 
6084 serge 13792
	/*
13793
	 * There's something wrong with the cursor on CHV pipe C.
13794
	 * If it straddles the left edge of the screen then
13795
	 * moving it away from the edge or disabling it often
13796
	 * results in a pipe underrun, and often that can lead to
13797
	 * dead pipe (constant underrun reported, and it scans
13798
	 * out just a solid color). To recover from that, the
13799
	 * display power well must be turned off and on again.
13800
	 * Refuse the put the cursor into that compromised position.
13801
	 */
13802
	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13803
	    state->visible && state->base.crtc_x < 0) {
13804
		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13805
		return -EINVAL;
13806
	}
13807
 
13808
	return 0;
5354 serge 13809
}
13810
 
6084 serge 13811
static void
13812
intel_disable_cursor_plane(struct drm_plane *plane,
13813
			   struct drm_crtc *crtc)
5354 serge 13814
{
6084 serge 13815
	intel_crtc_update_cursor(crtc, false);
5060 serge 13816
}
5354 serge 13817
 
6084 serge 13818
static void
13819
intel_commit_cursor_plane(struct drm_plane *plane,
13820
			  struct intel_plane_state *state)
5354 serge 13821
{
6084 serge 13822
	struct drm_crtc *crtc = state->base.crtc;
13823
	struct drm_device *dev = plane->dev;
13824
	struct intel_crtc *intel_crtc;
13825
	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
13826
	uint32_t addr;
5354 serge 13827
 
6084 serge 13828
	crtc = crtc ? crtc : plane->crtc;
13829
	intel_crtc = to_intel_crtc(crtc);
5354 serge 13830
 
6084 serge 13831
	if (!obj)
13832
		addr = 0;
13833
	else if (!INTEL_INFO(dev)->cursor_needs_physical)
13834
		addr = i915_gem_obj_ggtt_offset(obj);
13835
	else
13836
		addr = obj->phys_handle->busaddr;
5354 serge 13837
 
6084 serge 13838
	intel_crtc->cursor_addr = addr;
5354 serge 13839
 
6084 serge 13840
	if (crtc->state->active)
13841
		intel_crtc_update_cursor(crtc, state->visible);
5354 serge 13842
}
13843
 
5060 serge 13844
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13845
						   int pipe)
13846
{
13847
	struct intel_plane *cursor;
6084 serge 13848
	struct intel_plane_state *state;
5060 serge 13849
 
13850
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13851
	if (cursor == NULL)
13852
		return NULL;
13853
 
6084 serge 13854
	state = intel_create_plane_state(&cursor->base);
13855
	if (!state) {
13856
		kfree(cursor);
13857
		return NULL;
13858
	}
13859
	cursor->base.state = &state->base;
13860
 
5060 serge 13861
	cursor->can_scale = false;
13862
	cursor->max_downscale = 1;
13863
	cursor->pipe = pipe;
13864
	cursor->plane = pipe;
6084 serge 13865
	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
13866
	cursor->check_plane = intel_check_cursor_plane;
13867
	cursor->commit_plane = intel_commit_cursor_plane;
13868
	cursor->disable_plane = intel_disable_cursor_plane;
5060 serge 13869
 
13870
	drm_universal_plane_init(dev, &cursor->base, 0,
6084 serge 13871
				 &intel_plane_funcs,
5060 serge 13872
				 intel_cursor_formats,
13873
				 ARRAY_SIZE(intel_cursor_formats),
13874
				 DRM_PLANE_TYPE_CURSOR);
5354 serge 13875
 
13876
	if (INTEL_INFO(dev)->gen >= 4) {
13877
		if (!dev->mode_config.rotation_property)
13878
			dev->mode_config.rotation_property =
13879
				drm_mode_create_rotation_property(dev,
13880
							BIT(DRM_ROTATE_0) |
13881
							BIT(DRM_ROTATE_180));
13882
		if (dev->mode_config.rotation_property)
13883
			drm_object_attach_property(&cursor->base.base,
13884
				dev->mode_config.rotation_property,
6084 serge 13885
				state->base.rotation);
5354 serge 13886
	}
13887
 
6084 serge 13888
	if (INTEL_INFO(dev)->gen >=9)
13889
		state->scaler_id = -1;
13890
 
13891
	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13892
 
5060 serge 13893
	return &cursor->base;
13894
}
13895
 
6084 serge 13896
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
13897
	struct intel_crtc_state *crtc_state)
13898
{
13899
	int i;
13900
	struct intel_scaler *intel_scaler;
13901
	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
13902
 
13903
	for (i = 0; i < intel_crtc->num_scalers; i++) {
13904
		intel_scaler = &scaler_state->scalers[i];
13905
		intel_scaler->in_use = 0;
13906
		intel_scaler->mode = PS_SCALER_MODE_DYN;
13907
	}
13908
 
13909
	scaler_state->scaler_id = -1;
13910
}
13911
 
2330 Serge 13912
static void intel_crtc_init(struct drm_device *dev, int pipe)
13913
{
5060 serge 13914
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 13915
	struct intel_crtc *intel_crtc;
6084 serge 13916
	struct intel_crtc_state *crtc_state = NULL;
5060 serge 13917
	struct drm_plane *primary = NULL;
13918
	struct drm_plane *cursor = NULL;
13919
	int i, ret;
2327 Serge 13920
 
4560 Serge 13921
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 13922
	if (intel_crtc == NULL)
13923
		return;
2327 Serge 13924
 
6084 serge 13925
	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13926
	if (!crtc_state)
13927
		goto fail;
13928
	intel_crtc->config = crtc_state;
13929
	intel_crtc->base.state = &crtc_state->base;
13930
	crtc_state->base.crtc = &intel_crtc->base;
13931
 
13932
	/* initialize shared scalers */
13933
	if (INTEL_INFO(dev)->gen >= 9) {
13934
		if (pipe == PIPE_C)
13935
			intel_crtc->num_scalers = 1;
13936
		else
13937
			intel_crtc->num_scalers = SKL_NUM_SCALERS;
13938
 
13939
		skl_init_scalers(dev, intel_crtc, crtc_state);
13940
	}
13941
 
5060 serge 13942
	primary = intel_primary_plane_create(dev, pipe);
13943
	if (!primary)
13944
		goto fail;
2327 Serge 13945
 
5060 serge 13946
	cursor = intel_cursor_plane_create(dev, pipe);
13947
	if (!cursor)
13948
		goto fail;
13949
 
13950
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
13951
					cursor, &intel_crtc_funcs);
13952
	if (ret)
13953
		goto fail;
13954
 
2330 Serge 13955
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
13956
	for (i = 0; i < 256; i++) {
13957
		intel_crtc->lut_r[i] = i;
13958
		intel_crtc->lut_g[i] = i;
13959
		intel_crtc->lut_b[i] = i;
13960
	}
2327 Serge 13961
 
4560 Serge 13962
	/*
13963
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
5060 serge 13964
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
4560 Serge 13965
	 */
2330 Serge 13966
	intel_crtc->pipe = pipe;
13967
	intel_crtc->plane = pipe;
4560 Serge 13968
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 13969
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
13970
		intel_crtc->plane = !pipe;
13971
	}
2327 Serge 13972
 
5060 serge 13973
	intel_crtc->cursor_base = ~0;
13974
	intel_crtc->cursor_cntl = ~0;
5354 serge 13975
	intel_crtc->cursor_size = ~0;
5060 serge 13976
 
6084 serge 13977
	intel_crtc->wm.cxsr_allowed = true;
13978
 
2330 Serge 13979
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13980
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
13981
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
13982
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 13983
 
2330 Serge 13984
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5060 serge 13985
 
13986
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13987
	return;
13988
 
13989
fail:
13990
	if (primary)
13991
		drm_plane_cleanup(primary);
13992
	if (cursor)
13993
		drm_plane_cleanup(cursor);
6084 serge 13994
	kfree(crtc_state);
5060 serge 13995
	kfree(intel_crtc);
2330 Serge 13996
}
2327 Serge 13997
 
4560 Serge 13998
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
13999
{
14000
	struct drm_encoder *encoder = connector->base.encoder;
5060 serge 14001
	struct drm_device *dev = connector->base.dev;
4560 Serge 14002
 
5060 serge 14003
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4560 Serge 14004
 
5354 serge 14005
	if (!encoder || WARN_ON(!encoder->crtc))
4560 Serge 14006
		return INVALID_PIPE;
14007
 
14008
	return to_intel_crtc(encoder->crtc)->pipe;
14009
}
14010
 
3031 serge 14011
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14012
				struct drm_file *file)
14013
{
14014
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5060 serge 14015
	struct drm_crtc *drmmode_crtc;
3031 serge 14016
	struct intel_crtc *crtc;
2327 Serge 14017
 
5060 serge 14018
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
2327 Serge 14019
 
5060 serge 14020
	if (!drmmode_crtc) {
3031 serge 14021
		DRM_ERROR("no such CRTC id\n");
4560 Serge 14022
		return -ENOENT;
3031 serge 14023
	}
2327 Serge 14024
 
5060 serge 14025
	crtc = to_intel_crtc(drmmode_crtc);
3031 serge 14026
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 14027
 
3031 serge 14028
	return 0;
14029
}
2327 Serge 14030
 
3031 serge 14031
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 14032
{
3031 serge 14033
	struct drm_device *dev = encoder->base.dev;
14034
	struct intel_encoder *source_encoder;
2330 Serge 14035
	int index_mask = 0;
14036
	int entry = 0;
2327 Serge 14037
 
5354 serge 14038
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 14039
		if (encoders_cloneable(encoder, source_encoder))
2330 Serge 14040
			index_mask |= (1 << entry);
3031 serge 14041
 
2330 Serge 14042
		entry++;
14043
	}
2327 Serge 14044
 
2330 Serge 14045
	return index_mask;
14046
}
2327 Serge 14047
 
2330 Serge 14048
static bool has_edp_a(struct drm_device *dev)
14049
{
14050
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14051
 
2330 Serge 14052
	if (!IS_MOBILE(dev))
14053
		return false;
2327 Serge 14054
 
2330 Serge 14055
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14056
		return false;
2327 Serge 14057
 
5060 serge 14058
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
2330 Serge 14059
		return false;
2327 Serge 14060
 
2330 Serge 14061
	return true;
14062
}
2327 Serge 14063
 
5060 serge 14064
static bool intel_crt_present(struct drm_device *dev)
14065
{
14066
	struct drm_i915_private *dev_priv = dev->dev_private;
14067
 
5354 serge 14068
	if (INTEL_INFO(dev)->gen >= 9)
5060 serge 14069
		return false;
14070
 
5354 serge 14071
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14072
		return false;
14073
 
5060 serge 14074
	if (IS_CHERRYVIEW(dev))
14075
		return false;
14076
 
14077
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
14078
		return false;
14079
 
14080
	return true;
14081
}
14082
 
2330 Serge 14083
static void intel_setup_outputs(struct drm_device *dev)
14084
{
14085
	struct drm_i915_private *dev_priv = dev->dev_private;
14086
	struct intel_encoder *encoder;
14087
	bool dpd_is_edp = false;
2327 Serge 14088
 
4104 Serge 14089
	intel_lvds_init(dev);
2327 Serge 14090
 
5060 serge 14091
	if (intel_crt_present(dev))
6084 serge 14092
		intel_crt_init(dev);
2327 Serge 14093
 
6084 serge 14094
	if (IS_BROXTON(dev)) {
14095
		/*
14096
		 * FIXME: Broxton doesn't support port detection via the
14097
		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14098
		 * detect the ports.
14099
		 */
14100
		intel_ddi_init(dev, PORT_A);
14101
		intel_ddi_init(dev, PORT_B);
14102
		intel_ddi_init(dev, PORT_C);
14103
	} else if (HAS_DDI(dev)) {
2330 Serge 14104
		int found;
2327 Serge 14105
 
6084 serge 14106
		/*
14107
		 * Haswell uses DDI functions to detect digital outputs.
14108
		 * On SKL pre-D0 the strap isn't connected, so we assume
14109
		 * it's there.
14110
		 */
14111
		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14112
		/* WaIgnoreDDIAStrap: skl */
14113
		if (found || IS_SKYLAKE(dev))
3031 serge 14114
			intel_ddi_init(dev, PORT_A);
14115
 
14116
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14117
		 * register */
14118
		found = I915_READ(SFUSE_STRAP);
14119
 
14120
		if (found & SFUSE_STRAP_DDIB_DETECTED)
14121
			intel_ddi_init(dev, PORT_B);
14122
		if (found & SFUSE_STRAP_DDIC_DETECTED)
14123
			intel_ddi_init(dev, PORT_C);
14124
		if (found & SFUSE_STRAP_DDID_DETECTED)
14125
			intel_ddi_init(dev, PORT_D);
6084 serge 14126
		/*
14127
		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14128
		 */
14129
		if (IS_SKYLAKE(dev) &&
14130
		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14131
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14132
		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14133
			intel_ddi_init(dev, PORT_E);
14134
 
3031 serge 14135
	} else if (HAS_PCH_SPLIT(dev)) {
14136
		int found;
4560 Serge 14137
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 14138
 
3243 Serge 14139
		if (has_edp_a(dev))
14140
			intel_dp_init(dev, DP_A, PORT_A);
14141
 
3746 Serge 14142
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 14143
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 14144
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 14145
			if (!found)
3746 Serge 14146
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 14147
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 14148
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 14149
		}
2327 Serge 14150
 
3746 Serge 14151
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14152
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 14153
 
3746 Serge 14154
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14155
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 14156
 
2330 Serge 14157
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 14158
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 14159
 
3243 Serge 14160
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 14161
			intel_dp_init(dev, PCH_DP_D, PORT_D);
14162
	} else if (IS_VALLEYVIEW(dev)) {
6660 serge 14163
		bool has_edp, has_port;
14164
 
5354 serge 14165
		/*
14166
		 * The DP_DETECTED bit is the latched state of the DDC
14167
		 * SDA pin at boot. However since eDP doesn't require DDC
14168
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14169
		 * eDP ports may have been muxed to an alternate function.
14170
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14171
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14172
		 * detect eDP ports.
6660 serge 14173
		 *
14174
		 * Sadly the straps seem to be missing sometimes even for HDMI
14175
		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14176
		 * and VBT for the presence of the port. Additionally we can't
14177
		 * trust the port type the VBT declares as we've seen at least
14178
		 * HDMI ports that the VBT claim are DP or eDP.
5354 serge 14179
		 */
6660 serge 14180
		has_edp = intel_dp_is_edp(dev, PORT_B);
14181
		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14182
		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14183
			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14184
		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
6084 serge 14185
			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
4560 Serge 14186
 
6660 serge 14187
		has_edp = intel_dp_is_edp(dev, PORT_C);
14188
		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14189
		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14190
			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14191
		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
6084 serge 14192
			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
3243 Serge 14193
 
5060 serge 14194
		if (IS_CHERRYVIEW(dev)) {
6660 serge 14195
			/*
14196
			 * eDP not supported on port D,
14197
			 * so no need to worry about it
14198
			 */
14199
			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14200
			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14201
				intel_dp_init(dev, CHV_DP_D, PORT_D);
14202
			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
6084 serge 14203
				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14204
		}
5060 serge 14205
 
4560 Serge 14206
		intel_dsi_init(dev);
6084 serge 14207
	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
2330 Serge 14208
		bool found = false;
2327 Serge 14209
 
3746 Serge 14210
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 14211
			DRM_DEBUG_KMS("probing SDVOB\n");
3746 Serge 14212
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
6084 serge 14213
			if (!found && IS_G4X(dev)) {
2330 Serge 14214
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 14215
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 14216
			}
2327 Serge 14217
 
6084 serge 14218
			if (!found && IS_G4X(dev))
3031 serge 14219
				intel_dp_init(dev, DP_B, PORT_B);
6084 serge 14220
		}
2327 Serge 14221
 
2330 Serge 14222
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 14223
 
3746 Serge 14224
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 14225
			DRM_DEBUG_KMS("probing SDVOC\n");
3746 Serge 14226
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
2330 Serge 14227
		}
2327 Serge 14228
 
3746 Serge 14229
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 14230
 
6084 serge 14231
			if (IS_G4X(dev)) {
2330 Serge 14232
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 14233
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 14234
			}
6084 serge 14235
			if (IS_G4X(dev))
3031 serge 14236
				intel_dp_init(dev, DP_C, PORT_C);
6084 serge 14237
		}
2327 Serge 14238
 
6084 serge 14239
		if (IS_G4X(dev) &&
4104 Serge 14240
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 14241
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 14242
	} else if (IS_GEN2(dev))
14243
		intel_dvo_init(dev);
2327 Serge 14244
 
5354 serge 14245
	intel_psr_init(dev);
5060 serge 14246
 
5354 serge 14247
	for_each_intel_encoder(dev, encoder) {
2330 Serge 14248
		encoder->base.possible_crtcs = encoder->crtc_mask;
14249
		encoder->base.possible_clones =
3031 serge 14250
			intel_encoder_clones(encoder);
2330 Serge 14251
	}
2327 Serge 14252
 
3243 Serge 14253
	intel_init_pch_refclk(dev);
14254
 
14255
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 14256
}
14257
 
6084 serge 14258
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14259
{
14260
	struct drm_device *dev = fb->dev;
14261
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2330 Serge 14262
 
6084 serge 14263
	drm_framebuffer_cleanup(fb);
14264
	mutex_lock(&dev->struct_mutex);
14265
	WARN_ON(!intel_fb->obj->framebuffer_references--);
14266
	drm_gem_object_unreference(&intel_fb->obj->base);
14267
	mutex_unlock(&dev->struct_mutex);
14268
	kfree(intel_fb);
14269
}
2330 Serge 14270
 
6084 serge 14271
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14272
						struct drm_file *file,
14273
						unsigned int *handle)
14274
{
14275
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14276
	struct drm_i915_gem_object *obj = intel_fb->obj;
14277
 
14278
	if (obj->userptr.mm) {
14279
		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14280
		return -EINVAL;
14281
	}
14282
 
14283
	return drm_gem_handle_create(file, &obj->base, handle);
14284
}
14285
 
14286
static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14287
					struct drm_file *file,
14288
					unsigned flags, unsigned color,
14289
					struct drm_clip_rect *clips,
14290
					unsigned num_clips)
14291
{
14292
	struct drm_device *dev = fb->dev;
14293
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14294
	struct drm_i915_gem_object *obj = intel_fb->obj;
14295
 
14296
	mutex_lock(&dev->struct_mutex);
14297
	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14298
	mutex_unlock(&dev->struct_mutex);
14299
 
14300
	return 0;
14301
}
14302
 
2335 Serge 14303
static const struct drm_framebuffer_funcs intel_fb_funcs = {
6084 serge 14304
	.destroy = intel_user_framebuffer_destroy,
14305
	.create_handle = intel_user_framebuffer_create_handle,
14306
	.dirty = intel_user_framebuffer_dirty,
2335 Serge 14307
};
2327 Serge 14308
 
6084 serge 14309
static
14310
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14311
			 uint32_t pixel_format)
14312
{
14313
	u32 gen = INTEL_INFO(dev)->gen;
14314
 
14315
	if (gen >= 9) {
14316
		/* "The stride in bytes must not exceed the of the size of 8K
14317
		 *  pixels and 32K bytes."
14318
		 */
14319
		 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14320
	} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
14321
		return 32*1024;
14322
	} else if (gen >= 4) {
14323
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14324
			return 16*1024;
14325
		else
14326
			return 32*1024;
14327
	} else if (gen >= 3) {
14328
		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14329
			return 8*1024;
14330
		else
14331
			return 16*1024;
14332
	} else {
14333
		/* XXX DSPC is limited to 4k tiled */
14334
		return 8*1024;
14335
	}
14336
}
14337
 
5060 serge 14338
static int intel_framebuffer_init(struct drm_device *dev,
6084 serge 14339
				  struct intel_framebuffer *intel_fb,
14340
				  struct drm_mode_fb_cmd2 *mode_cmd,
14341
				  struct drm_i915_gem_object *obj)
2335 Serge 14342
{
6084 serge 14343
	unsigned int aligned_height;
2335 Serge 14344
	int ret;
6084 serge 14345
	u32 pitch_limit, stride_alignment;
2327 Serge 14346
 
4560 Serge 14347
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14348
 
6084 serge 14349
	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14350
		/* Enforce that fb modifier and tiling mode match, but only for
14351
		 * X-tiled. This is needed for FBC. */
14352
		if (!!(obj->tiling_mode == I915_TILING_X) !=
14353
		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14354
			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14355
			return -EINVAL;
14356
		}
14357
	} else {
14358
		if (obj->tiling_mode == I915_TILING_X)
14359
			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14360
		else if (obj->tiling_mode == I915_TILING_Y) {
14361
			DRM_DEBUG("No Y tiling for legacy addfb\n");
14362
			return -EINVAL;
14363
		}
14364
	}
14365
 
14366
	/* Passed in modifier sanity checking. */
14367
	switch (mode_cmd->modifier[0]) {
14368
	case I915_FORMAT_MOD_Y_TILED:
14369
	case I915_FORMAT_MOD_Yf_TILED:
14370
		if (INTEL_INFO(dev)->gen < 9) {
14371
			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14372
				  mode_cmd->modifier[0]);
14373
			return -EINVAL;
14374
		}
14375
	case DRM_FORMAT_MOD_NONE:
14376
	case I915_FORMAT_MOD_X_TILED:
14377
		break;
14378
	default:
14379
		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14380
			  mode_cmd->modifier[0]);
2335 Serge 14381
		return -EINVAL;
3243 Serge 14382
	}
2327 Serge 14383
 
6084 serge 14384
	stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14385
						     mode_cmd->pixel_format);
14386
	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14387
		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14388
			  mode_cmd->pitches[0], stride_alignment);
3243 Serge 14389
		return -EINVAL;
14390
	}
14391
 
6084 serge 14392
	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14393
					   mode_cmd->pixel_format);
4104 Serge 14394
	if (mode_cmd->pitches[0] > pitch_limit) {
6084 serge 14395
		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14396
			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14397
			  "tiled" : "linear",
4104 Serge 14398
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 14399
		return -EINVAL;
14400
	}
14401
 
6084 serge 14402
	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
3243 Serge 14403
	    mode_cmd->pitches[0] != obj->stride) {
14404
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14405
			  mode_cmd->pitches[0], obj->stride);
6084 serge 14406
		return -EINVAL;
3243 Serge 14407
	}
2327 Serge 14408
 
3243 Serge 14409
	/* Reject formats not supported by any plane early. */
2342 Serge 14410
	switch (mode_cmd->pixel_format) {
3243 Serge 14411
	case DRM_FORMAT_C8:
2342 Serge 14412
	case DRM_FORMAT_RGB565:
14413
	case DRM_FORMAT_XRGB8888:
3243 Serge 14414
	case DRM_FORMAT_ARGB8888:
14415
		break;
14416
	case DRM_FORMAT_XRGB1555:
14417
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 14418
			DRM_DEBUG("unsupported pixel format: %s\n",
14419
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14420
			return -EINVAL;
14421
		}
14422
		break;
6084 serge 14423
	case DRM_FORMAT_ABGR8888:
14424
		if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
14425
			DRM_DEBUG("unsupported pixel format: %s\n",
14426
				  drm_get_format_name(mode_cmd->pixel_format));
14427
			return -EINVAL;
14428
		}
14429
		break;
3031 serge 14430
	case DRM_FORMAT_XBGR8888:
2342 Serge 14431
	case DRM_FORMAT_XRGB2101010:
3243 Serge 14432
	case DRM_FORMAT_XBGR2101010:
14433
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 14434
			DRM_DEBUG("unsupported pixel format: %s\n",
14435
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14436
			return -EINVAL;
14437
		}
2335 Serge 14438
		break;
6084 serge 14439
	case DRM_FORMAT_ABGR2101010:
14440
		if (!IS_VALLEYVIEW(dev)) {
14441
			DRM_DEBUG("unsupported pixel format: %s\n",
14442
				  drm_get_format_name(mode_cmd->pixel_format));
14443
			return -EINVAL;
14444
		}
14445
		break;
2342 Serge 14446
	case DRM_FORMAT_YUYV:
14447
	case DRM_FORMAT_UYVY:
14448
	case DRM_FORMAT_YVYU:
14449
	case DRM_FORMAT_VYUY:
3243 Serge 14450
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 14451
			DRM_DEBUG("unsupported pixel format: %s\n",
14452
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 14453
			return -EINVAL;
14454
		}
2342 Serge 14455
		break;
2335 Serge 14456
	default:
4104 Serge 14457
		DRM_DEBUG("unsupported pixel format: %s\n",
14458
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 14459
		return -EINVAL;
14460
	}
2327 Serge 14461
 
3243 Serge 14462
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14463
	if (mode_cmd->offsets[0] != 0)
14464
		return -EINVAL;
14465
 
6084 serge 14466
	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14467
					       mode_cmd->pixel_format,
14468
					       mode_cmd->modifier[0]);
4560 Serge 14469
	/* FIXME drm helper for size checks (especially planar formats)? */
14470
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14471
		return -EINVAL;
14472
 
3480 Serge 14473
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14474
	intel_fb->obj = obj;
4560 Serge 14475
	intel_fb->obj->framebuffer_references++;
3480 Serge 14476
 
2335 Serge 14477
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14478
	if (ret) {
14479
		DRM_ERROR("framebuffer init failed %d\n", ret);
14480
		return ret;
14481
	}
6283 serge 14482
	kolibri_framebuffer_init(intel_fb);
2335 Serge 14483
	return 0;
14484
}
2327 Serge 14485
 
6084 serge 14486
static struct drm_framebuffer *
14487
intel_user_framebuffer_create(struct drm_device *dev,
14488
			      struct drm_file *filp,
14489
			      struct drm_mode_fb_cmd2 *user_mode_cmd)
14490
{
14491
	struct drm_i915_gem_object *obj;
14492
	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14493
 
14494
	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14495
						mode_cmd.handles[0]));
14496
	if (&obj->base == NULL)
14497
		return ERR_PTR(-ENOENT);
14498
 
14499
	return intel_framebuffer_create(dev, &mode_cmd, obj);
14500
}
14501
 
14502
#ifndef CONFIG_DRM_FBDEV_EMULATION
4560 Serge 14503
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14504
{
14505
}
14506
#endif
2327 Serge 14507
 
2360 Serge 14508
static const struct drm_mode_config_funcs intel_mode_funcs = {
6084 serge 14509
	.fb_create = intel_user_framebuffer_create,
4560 Serge 14510
	.output_poll_changed = intel_fbdev_output_poll_changed,
6084 serge 14511
	.atomic_check = intel_atomic_check,
14512
	.atomic_commit = intel_atomic_commit,
14513
	.atomic_state_alloc = intel_atomic_state_alloc,
14514
	.atomic_state_clear = intel_atomic_state_clear,
2360 Serge 14515
};
2327 Serge 14516
 
3031 serge 14517
/* Set up chip specific display functions */
14518
static void intel_init_display(struct drm_device *dev)
14519
{
14520
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14521
 
4104 Serge 14522
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14523
		dev_priv->display.find_dpll = g4x_find_best_dpll;
5060 serge 14524
	else if (IS_CHERRYVIEW(dev))
14525
		dev_priv->display.find_dpll = chv_find_best_dpll;
4104 Serge 14526
	else if (IS_VALLEYVIEW(dev))
14527
		dev_priv->display.find_dpll = vlv_find_best_dpll;
14528
	else if (IS_PINEVIEW(dev))
14529
		dev_priv->display.find_dpll = pnv_find_best_dpll;
14530
	else
14531
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
14532
 
6084 serge 14533
	if (INTEL_INFO(dev)->gen >= 9) {
3746 Serge 14534
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
6084 serge 14535
		dev_priv->display.get_initial_plane_config =
14536
			skylake_get_initial_plane_config;
5354 serge 14537
		dev_priv->display.crtc_compute_clock =
14538
			haswell_crtc_compute_clock;
3243 Serge 14539
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14540
		dev_priv->display.crtc_disable = haswell_crtc_disable;
5060 serge 14541
		dev_priv->display.update_primary_plane =
6084 serge 14542
			skylake_update_primary_plane;
14543
	} else if (HAS_DDI(dev)) {
14544
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14545
		dev_priv->display.get_initial_plane_config =
14546
			ironlake_get_initial_plane_config;
14547
		dev_priv->display.crtc_compute_clock =
14548
			haswell_crtc_compute_clock;
14549
		dev_priv->display.crtc_enable = haswell_crtc_enable;
14550
		dev_priv->display.crtc_disable = haswell_crtc_disable;
14551
		dev_priv->display.update_primary_plane =
5060 serge 14552
			ironlake_update_primary_plane;
3243 Serge 14553
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 14554
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
6084 serge 14555
		dev_priv->display.get_initial_plane_config =
14556
			ironlake_get_initial_plane_config;
5354 serge 14557
		dev_priv->display.crtc_compute_clock =
14558
			ironlake_crtc_compute_clock;
3031 serge 14559
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14560
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
5060 serge 14561
		dev_priv->display.update_primary_plane =
14562
			ironlake_update_primary_plane;
4104 Serge 14563
	} else if (IS_VALLEYVIEW(dev)) {
14564
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
6084 serge 14565
		dev_priv->display.get_initial_plane_config =
14566
			i9xx_get_initial_plane_config;
5354 serge 14567
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
4104 Serge 14568
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14569
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
5060 serge 14570
		dev_priv->display.update_primary_plane =
14571
			i9xx_update_primary_plane;
3031 serge 14572
	} else {
3746 Serge 14573
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
6084 serge 14574
		dev_priv->display.get_initial_plane_config =
14575
			i9xx_get_initial_plane_config;
5354 serge 14576
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
3031 serge 14577
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14578
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
5060 serge 14579
		dev_priv->display.update_primary_plane =
14580
			i9xx_update_primary_plane;
3031 serge 14581
	}
2327 Serge 14582
 
3031 serge 14583
	/* Returns the core display clock speed */
6084 serge 14584
	if (IS_SKYLAKE(dev))
3031 serge 14585
		dev_priv->display.get_display_clock_speed =
6084 serge 14586
			skylake_get_display_clock_speed;
14587
	else if (IS_BROXTON(dev))
14588
		dev_priv->display.get_display_clock_speed =
14589
			broxton_get_display_clock_speed;
14590
	else if (IS_BROADWELL(dev))
14591
		dev_priv->display.get_display_clock_speed =
14592
			broadwell_get_display_clock_speed;
14593
	else if (IS_HASWELL(dev))
14594
		dev_priv->display.get_display_clock_speed =
14595
			haswell_get_display_clock_speed;
14596
	else if (IS_VALLEYVIEW(dev))
14597
		dev_priv->display.get_display_clock_speed =
3031 serge 14598
			valleyview_get_display_clock_speed;
6084 serge 14599
	else if (IS_GEN5(dev))
3031 serge 14600
		dev_priv->display.get_display_clock_speed =
6084 serge 14601
			ilk_get_display_clock_speed;
14602
	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14603
		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14604
		dev_priv->display.get_display_clock_speed =
3031 serge 14605
			i945_get_display_clock_speed;
6084 serge 14606
	else if (IS_GM45(dev))
14607
		dev_priv->display.get_display_clock_speed =
14608
			gm45_get_display_clock_speed;
14609
	else if (IS_CRESTLINE(dev))
14610
		dev_priv->display.get_display_clock_speed =
14611
			i965gm_get_display_clock_speed;
14612
	else if (IS_PINEVIEW(dev))
14613
		dev_priv->display.get_display_clock_speed =
14614
			pnv_get_display_clock_speed;
14615
	else if (IS_G33(dev) || IS_G4X(dev))
14616
		dev_priv->display.get_display_clock_speed =
14617
			g33_get_display_clock_speed;
3031 serge 14618
	else if (IS_I915G(dev))
14619
		dev_priv->display.get_display_clock_speed =
14620
			i915_get_display_clock_speed;
4104 Serge 14621
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 14622
		dev_priv->display.get_display_clock_speed =
14623
			i9xx_misc_get_display_clock_speed;
4104 Serge 14624
	else if (IS_PINEVIEW(dev))
14625
		dev_priv->display.get_display_clock_speed =
14626
			pnv_get_display_clock_speed;
3031 serge 14627
	else if (IS_I915GM(dev))
14628
		dev_priv->display.get_display_clock_speed =
14629
			i915gm_get_display_clock_speed;
14630
	else if (IS_I865G(dev))
14631
		dev_priv->display.get_display_clock_speed =
14632
			i865_get_display_clock_speed;
14633
	else if (IS_I85X(dev))
14634
		dev_priv->display.get_display_clock_speed =
6084 serge 14635
			i85x_get_display_clock_speed;
14636
	else { /* 830 */
14637
		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
3031 serge 14638
		dev_priv->display.get_display_clock_speed =
14639
			i830_get_display_clock_speed;
6084 serge 14640
	}
2327 Serge 14641
 
6084 serge 14642
	if (IS_GEN5(dev)) {
14643
		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14644
	} else if (IS_GEN6(dev)) {
14645
		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14646
	} else if (IS_IVYBRIDGE(dev)) {
14647
		/* FIXME: detect B0+ stepping and use auto training */
14648
		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
5354 serge 14649
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 14650
		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14651
		if (IS_BROADWELL(dev)) {
14652
			dev_priv->display.modeset_commit_cdclk =
14653
				broadwell_modeset_commit_cdclk;
14654
			dev_priv->display.modeset_calc_cdclk =
14655
				broadwell_modeset_calc_cdclk;
14656
		}
4560 Serge 14657
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 14658
		dev_priv->display.modeset_commit_cdclk =
14659
			valleyview_modeset_commit_cdclk;
14660
		dev_priv->display.modeset_calc_cdclk =
14661
			valleyview_modeset_calc_cdclk;
14662
	} else if (IS_BROXTON(dev)) {
14663
		dev_priv->display.modeset_commit_cdclk =
14664
			broxton_modeset_commit_cdclk;
14665
		dev_priv->display.modeset_calc_cdclk =
14666
			broxton_modeset_calc_cdclk;
3031 serge 14667
	}
2327 Serge 14668
 
6320 serge 14669
	switch (INTEL_INFO(dev)->gen) {
14670
	case 2:
14671
		dev_priv->display.queue_flip = intel_gen2_queue_flip;
14672
		break;
2327 Serge 14673
 
6320 serge 14674
	case 3:
14675
		dev_priv->display.queue_flip = intel_gen3_queue_flip;
14676
		break;
2327 Serge 14677
 
6320 serge 14678
	case 4:
14679
	case 5:
14680
		dev_priv->display.queue_flip = intel_gen4_queue_flip;
14681
		break;
2327 Serge 14682
 
6320 serge 14683
	case 6:
14684
		dev_priv->display.queue_flip = intel_gen6_queue_flip;
14685
		break;
14686
	case 7:
14687
	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
14688
		dev_priv->display.queue_flip = intel_gen7_queue_flip;
14689
		break;
14690
	case 9:
14691
		/* Drop through - unsupported since execlist only. */
14692
	default:
14693
		/* Default just returns -ENODEV to indicate unsupported */
14694
		dev_priv->display.queue_flip = intel_default_queue_flip;
14695
	}
2327 Serge 14696
 
5354 serge 14697
	mutex_init(&dev_priv->pps_mutex);
3031 serge 14698
}
14699
 
14700
/*
14701
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14702
 * resume, or other times.  This quirk makes sure that's the case for
14703
 * affected systems.
14704
 */
14705
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 14706
{
14707
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 14708
 
3031 serge 14709
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14710
	DRM_INFO("applying pipe a force quirk\n");
14711
}
2327 Serge 14712
 
5354 serge 14713
static void quirk_pipeb_force(struct drm_device *dev)
14714
{
14715
	struct drm_i915_private *dev_priv = dev->dev_private;
14716
 
14717
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14718
	DRM_INFO("applying pipe b force quirk\n");
14719
}
14720
 
3031 serge 14721
/*
14722
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14723
 */
14724
static void quirk_ssc_force_disable(struct drm_device *dev)
14725
{
14726
	struct drm_i915_private *dev_priv = dev->dev_private;
14727
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14728
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 14729
}
2327 Serge 14730
 
3031 serge 14731
/*
14732
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14733
 * brightness value
14734
 */
14735
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 14736
{
14737
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 14738
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14739
	DRM_INFO("applying inverted panel brightness quirk\n");
14740
}
2327 Serge 14741
 
5060 serge 14742
/* Some VBT's incorrectly indicate no backlight is present */
14743
static void quirk_backlight_present(struct drm_device *dev)
14744
{
14745
	struct drm_i915_private *dev_priv = dev->dev_private;
14746
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14747
	DRM_INFO("applying backlight present quirk\n");
14748
}
14749
 
3031 serge 14750
struct intel_quirk {
14751
	int device;
14752
	int subsystem_vendor;
14753
	int subsystem_device;
14754
	void (*hook)(struct drm_device *dev);
14755
};
2327 Serge 14756
 
3031 serge 14757
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14758
struct intel_dmi_quirk {
14759
	void (*hook)(struct drm_device *dev);
14760
	const struct dmi_system_id (*dmi_id_list)[];
14761
};
2327 Serge 14762
 
3031 serge 14763
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14764
{
14765
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14766
	return 1;
2330 Serge 14767
}
2327 Serge 14768
 
3031 serge 14769
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14770
	{
14771
		.dmi_id_list = &(const struct dmi_system_id[]) {
14772
			{
14773
				.callback = intel_dmi_reverse_brightness,
14774
				.ident = "NCR Corporation",
14775
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14776
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
14777
				},
14778
			},
14779
			{ }  /* terminating entry */
14780
		},
14781
		.hook = quirk_invert_brightness,
14782
	},
14783
};
2327 Serge 14784
 
3031 serge 14785
static struct intel_quirk intel_quirks[] = {
14786
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14787
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 14788
 
3031 serge 14789
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14790
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 14791
 
5367 serge 14792
	/* 830 needs to leave pipe A & dpll A up */
14793
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14794
 
14795
	/* 830 needs to leave pipe B & dpll B up */
14796
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14797
 
3031 serge 14798
	/* Lenovo U160 cannot use SSC on LVDS */
14799
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 14800
 
3031 serge 14801
	/* Sony Vaio Y cannot use SSC on LVDS */
14802
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 14803
 
3031 serge 14804
	/* Acer Aspire 5734Z must invert backlight brightness */
14805
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 14806
 
14807
	/* Acer/eMachines G725 */
14808
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14809
 
14810
	/* Acer/eMachines e725 */
14811
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14812
 
14813
	/* Acer/Packard Bell NCL20 */
14814
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14815
 
14816
	/* Acer Aspire 4736Z */
14817
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
5060 serge 14818
 
14819
	/* Acer Aspire 5336 */
14820
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14821
 
14822
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14823
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14824
 
5097 serge 14825
	/* Acer C720 Chromebook (Core i3 4005U) */
14826
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14827
 
5354 serge 14828
	/* Apple Macbook 2,1 (Core 2 T7400) */
14829
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14830
 
6084 serge 14831
	/* Apple Macbook 4,1 */
14832
	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14833
 
5060 serge 14834
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
14835
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14836
 
14837
	/* HP Chromebook 14 (Celeron 2955U) */
14838
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
6084 serge 14839
 
14840
	/* Dell Chromebook 11 */
14841
	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14842
 
14843
	/* Dell Chromebook 11 (2015 version) */
14844
	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
3031 serge 14845
};
2327 Serge 14846
 
3031 serge 14847
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 14848
{
3031 serge 14849
	struct pci_dev *d = dev->pdev;
14850
	int i;
2327 Serge 14851
 
3031 serge 14852
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14853
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 14854
 
3031 serge 14855
		if (d->device == q->device &&
14856
		    (d->subsystem_vendor == q->subsystem_vendor ||
14857
		     q->subsystem_vendor == PCI_ANY_ID) &&
14858
		    (d->subsystem_device == q->subsystem_device ||
14859
		     q->subsystem_device == PCI_ANY_ID))
14860
			q->hook(dev);
14861
	}
5097 serge 14862
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14863
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14864
			intel_dmi_quirks[i].hook(dev);
14865
	}
2330 Serge 14866
}
2327 Serge 14867
 
3031 serge 14868
/* Disable the VGA plane that we never use */
14869
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 14870
{
14871
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 14872
	u8 sr1;
3480 Serge 14873
	u32 vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 14874
 
6084 serge 14875
	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
4560 Serge 14876
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14877
	outb(SR01, VGA_SR_INDEX);
14878
	sr1 = inb(VGA_SR_DATA);
14879
	outb(sr1 | 1<<5, VGA_SR_DATA);
14880
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 14881
	udelay(300);
2327 Serge 14882
 
6084 serge 14883
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
3031 serge 14884
	POSTING_READ(vga_reg);
2330 Serge 14885
}
14886
 
3031 serge 14887
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 14888
{
6084 serge 14889
	intel_update_cdclk(dev);
3031 serge 14890
	intel_prepare_ddi(dev);
14891
	intel_init_clock_gating(dev);
6084 serge 14892
	intel_enable_gt_powersave(dev);
2342 Serge 14893
}
14894
 
3031 serge 14895
void intel_modeset_init(struct drm_device *dev)
2330 Serge 14896
{
3031 serge 14897
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 14898
	int sprite, ret;
14899
	enum pipe pipe;
14900
	struct intel_crtc *crtc;
6088 serge 14901
 
3031 serge 14902
	drm_mode_config_init(dev);
2330 Serge 14903
 
3031 serge 14904
	dev->mode_config.min_width = 0;
14905
	dev->mode_config.min_height = 0;
2330 Serge 14906
 
3031 serge 14907
	dev->mode_config.preferred_depth = 24;
14908
	dev->mode_config.prefer_shadow = 1;
2330 Serge 14909
 
6084 serge 14910
	dev->mode_config.allow_fb_modifiers = true;
14911
 
3031 serge 14912
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 14913
 
3031 serge 14914
	intel_init_quirks(dev);
2330 Serge 14915
 
3031 serge 14916
	intel_init_pm(dev);
2330 Serge 14917
 
3746 Serge 14918
	if (INTEL_INFO(dev)->num_pipes == 0)
14919
		return;
14920
 
6084 serge 14921
	/*
14922
	 * There may be no VBT; and if the BIOS enabled SSC we can
14923
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
14924
	 * BIOS isn't using it, don't assume it will work even if the VBT
14925
	 * indicates as much.
14926
	 */
14927
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
14928
		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14929
					    DREF_SSC1_ENABLE);
14930
 
14931
		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14932
			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14933
				     bios_lvds_use_ssc ? "en" : "dis",
14934
				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14935
			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14936
		}
14937
	}
14938
 
3031 serge 14939
	intel_init_display(dev);
2330 Serge 14940
 
3031 serge 14941
	if (IS_GEN2(dev)) {
14942
		dev->mode_config.max_width = 2048;
14943
		dev->mode_config.max_height = 2048;
14944
	} else if (IS_GEN3(dev)) {
14945
		dev->mode_config.max_width = 4096;
14946
		dev->mode_config.max_height = 4096;
14947
	} else {
14948
		dev->mode_config.max_width = 8192;
14949
		dev->mode_config.max_height = 8192;
14950
	}
5060 serge 14951
 
14952
	if (IS_GEN2(dev)) {
14953
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
14954
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
14955
	} else {
14956
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
14957
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
14958
	}
14959
 
3480 Serge 14960
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 14961
 
3031 serge 14962
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 14963
		      INTEL_INFO(dev)->num_pipes,
14964
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 14965
 
5354 serge 14966
	for_each_pipe(dev_priv, pipe) {
5060 serge 14967
		intel_crtc_init(dev, pipe);
6084 serge 14968
		for_each_sprite(dev_priv, pipe, sprite) {
5060 serge 14969
			ret = intel_plane_init(dev, pipe, sprite);
6084 serge 14970
			if (ret)
4104 Serge 14971
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
5060 serge 14972
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
3746 Serge 14973
		}
2330 Serge 14974
	}
14975
 
6084 serge 14976
	intel_update_czclk(dev_priv);
14977
	intel_update_cdclk(dev);
4560 Serge 14978
 
4104 Serge 14979
	intel_shared_dpll_init(dev);
2330 Serge 14980
 
3031 serge 14981
	/* Just disable it once at startup */
14982
	i915_disable_vga(dev);
14983
	intel_setup_outputs(dev);
3480 Serge 14984
 
14985
	/* Just in case the BIOS is doing something questionable. */
6084 serge 14986
	intel_fbc_disable(dev_priv);
2330 Serge 14987
 
5060 serge 14988
	drm_modeset_lock_all(dev);
6084 serge 14989
	intel_modeset_setup_hw_state(dev);
5060 serge 14990
	drm_modeset_unlock_all(dev);
14991
 
14992
	for_each_intel_crtc(dev, crtc) {
6084 serge 14993
		struct intel_initial_plane_config plane_config = {};
14994
 
5060 serge 14995
		if (!crtc->active)
14996
			continue;
14997
 
14998
		/*
14999
		 * Note that reserving the BIOS fb up front prevents us
15000
		 * from stuffing other stolen allocations like the ring
15001
		 * on top.  This prevents some ugliness at boot time, and
15002
		 * can even allow for smooth boot transitions if the BIOS
15003
		 * fb is large enough for the active pipe configuration.
15004
		 */
6084 serge 15005
		dev_priv->display.get_initial_plane_config(crtc,
15006
							   &plane_config);
15007
 
15008
		/*
15009
		 * If the fb is shared between multiple heads, we'll
15010
		 * just get the first one.
15011
		 */
15012
		intel_find_initial_plane_obj(crtc, &plane_config);
5060 serge 15013
	}
2330 Serge 15014
}
15015
 
3031 serge 15016
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 15017
{
3031 serge 15018
	struct intel_connector *connector;
15019
	struct drm_connector *crt = NULL;
15020
	struct intel_load_detect_pipe load_detect_temp;
5060 serge 15021
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
2330 Serge 15022
 
3031 serge 15023
	/* We can't just switch on the pipe A, we need to set things up with a
15024
	 * proper mode and output configuration. As a gross hack, enable pipe A
15025
	 * by enabling the load detect pipe once. */
6084 serge 15026
	for_each_intel_connector(dev, connector) {
3031 serge 15027
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15028
			crt = &connector->base;
15029
			break;
2330 Serge 15030
		}
15031
	}
15032
 
3031 serge 15033
	if (!crt)
15034
		return;
2330 Serge 15035
 
5060 serge 15036
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
6084 serge 15037
		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
2327 Serge 15038
}
15039
 
3031 serge 15040
static bool
15041
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 15042
{
3746 Serge 15043
	struct drm_device *dev = crtc->base.dev;
15044
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 15045
	u32 val;
2327 Serge 15046
 
3746 Serge 15047
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 15048
		return true;
2327 Serge 15049
 
6084 serge 15050
	val = I915_READ(DSPCNTR(!crtc->plane));
2327 Serge 15051
 
3031 serge 15052
	if ((val & DISPLAY_PLANE_ENABLE) &&
15053
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15054
		return false;
2327 Serge 15055
 
3031 serge 15056
	return true;
2327 Serge 15057
}
15058
 
6084 serge 15059
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15060
{
15061
	struct drm_device *dev = crtc->base.dev;
15062
	struct intel_encoder *encoder;
15063
 
15064
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15065
		return true;
15066
 
15067
	return false;
15068
}
15069
 
3031 serge 15070
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 15071
{
3031 serge 15072
	struct drm_device *dev = crtc->base.dev;
2327 Serge 15073
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15074
	u32 reg;
2327 Serge 15075
 
3031 serge 15076
	/* Clear any frame start delays used for debugging left by the BIOS */
6084 serge 15077
	reg = PIPECONF(crtc->config->cpu_transcoder);
3031 serge 15078
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 15079
 
5060 serge 15080
	/* restore vblank interrupts to correct state */
6084 serge 15081
	drm_crtc_vblank_reset(&crtc->base);
5354 serge 15082
	if (crtc->active) {
6084 serge 15083
		struct intel_plane *plane;
5060 serge 15084
 
6084 serge 15085
		drm_crtc_vblank_on(&crtc->base);
15086
 
15087
		/* Disable everything but the primary plane */
15088
		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15089
			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15090
				continue;
15091
 
15092
			plane->disable_plane(&plane->base, &crtc->base);
15093
		}
15094
	}
15095
 
3031 serge 15096
	/* We need to sanitize the plane -> pipe mapping first because this will
15097
	 * disable the crtc (and hence change the state) if it is wrong. Note
15098
	 * that gen4+ has a fixed plane -> pipe mapping.  */
15099
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15100
		bool plane;
2327 Serge 15101
 
3031 serge 15102
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15103
			      crtc->base.base.id);
2327 Serge 15104
 
3031 serge 15105
		/* Pipe has the wrong plane attached and the plane is active.
15106
		 * Temporarily change the plane mapping and disable everything
15107
		 * ...  */
15108
		plane = crtc->plane;
6084 serge 15109
		to_intel_plane_state(crtc->base.primary->state)->visible = true;
3031 serge 15110
		crtc->plane = !plane;
6084 serge 15111
		intel_crtc_disable_noatomic(&crtc->base);
3031 serge 15112
		crtc->plane = plane;
15113
	}
2327 Serge 15114
 
3031 serge 15115
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15116
	    crtc->pipe == PIPE_A && !crtc->active) {
15117
		/* BIOS forgot to enable pipe A, this mostly happens after
15118
		 * resume. Force-enable the pipe to fix this, the update_dpms
15119
		 * call below we restore the pipe to the right state, but leave
15120
		 * the required bits on. */
15121
		intel_enable_pipe_a(dev);
15122
	}
2327 Serge 15123
 
3031 serge 15124
	/* Adjust the state of the output pipe according to whether we
15125
	 * have active connectors/encoders. */
6084 serge 15126
	if (!intel_crtc_has_encoders(crtc))
15127
		intel_crtc_disable_noatomic(&crtc->base);
2327 Serge 15128
 
6084 serge 15129
	if (crtc->active != crtc->base.state->active) {
3031 serge 15130
		struct intel_encoder *encoder;
2327 Serge 15131
 
3031 serge 15132
		/* This can happen either due to bugs in the get_hw_state
6084 serge 15133
		 * functions or because of calls to intel_crtc_disable_noatomic,
15134
		 * or because the pipe is force-enabled due to the
3031 serge 15135
		 * pipe A quirk. */
15136
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15137
			      crtc->base.base.id,
6084 serge 15138
			      crtc->base.state->enable ? "enabled" : "disabled",
3031 serge 15139
			      crtc->active ? "enabled" : "disabled");
2327 Serge 15140
 
6084 serge 15141
		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15142
		crtc->base.state->active = crtc->active;
3031 serge 15143
		crtc->base.enabled = crtc->active;
2327 Serge 15144
 
3031 serge 15145
		/* Because we only establish the connector -> encoder ->
15146
		 * crtc links if something is active, this means the
15147
		 * crtc is now deactivated. Break the links. connector
15148
		 * -> encoder links are only establish when things are
15149
		 *  actually up, hence no need to break them. */
15150
		WARN_ON(crtc->active);
2327 Serge 15151
 
6084 serge 15152
		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
3031 serge 15153
			encoder->base.crtc = NULL;
15154
	}
5060 serge 15155
 
5354 serge 15156
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
5060 serge 15157
		/*
15158
		 * We start out with underrun reporting disabled to avoid races.
15159
		 * For correct bookkeeping mark this on active crtcs.
15160
		 *
15161
		 * Also on gmch platforms we dont have any hardware bits to
15162
		 * disable the underrun reporting. Which means we need to start
15163
		 * out with underrun reporting disabled also on inactive pipes,
15164
		 * since otherwise we'll complain about the garbage we read when
15165
		 * e.g. coming up after runtime pm.
15166
		 *
15167
		 * No protection against concurrent access is required - at
15168
		 * worst a fifo underrun happens which also sets this to false.
15169
		 */
15170
		crtc->cpu_fifo_underrun_disabled = true;
15171
		crtc->pch_fifo_underrun_disabled = true;
15172
	}
2327 Serge 15173
}
15174
 
3031 serge 15175
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 15176
{
3031 serge 15177
	struct intel_connector *connector;
15178
	struct drm_device *dev = encoder->base.dev;
6084 serge 15179
	bool active = false;
2327 Serge 15180
 
3031 serge 15181
	/* We need to check both for a crtc link (meaning that the
15182
	 * encoder is active and trying to read from a pipe) and the
15183
	 * pipe itself being active. */
15184
	bool has_active_crtc = encoder->base.crtc &&
15185
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 15186
 
6084 serge 15187
	for_each_intel_connector(dev, connector) {
15188
		if (connector->base.encoder != &encoder->base)
15189
			continue;
15190
 
15191
		active = true;
15192
		break;
15193
	}
15194
 
15195
	if (active && !has_active_crtc) {
3031 serge 15196
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15197
			      encoder->base.base.id,
5060 serge 15198
			      encoder->base.name);
2327 Serge 15199
 
3031 serge 15200
		/* Connector is active, but has no active pipe. This is
15201
		 * fallout from our resume register restoring. Disable
15202
		 * the encoder manually again. */
15203
		if (encoder->base.crtc) {
15204
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15205
				      encoder->base.base.id,
5060 serge 15206
				      encoder->base.name);
3031 serge 15207
			encoder->disable(encoder);
5060 serge 15208
			if (encoder->post_disable)
15209
				encoder->post_disable(encoder);
3031 serge 15210
		}
5060 serge 15211
		encoder->base.crtc = NULL;
2327 Serge 15212
 
3031 serge 15213
		/* Inconsistent output/port/pipe state happens presumably due to
15214
		 * a bug in one of the get_hw_state functions. Or someplace else
15215
		 * in our code, like the register restore mess on resume. Clamp
15216
		 * things to off as a safer default. */
6084 serge 15217
		for_each_intel_connector(dev, connector) {
3031 serge 15218
			if (connector->encoder != encoder)
15219
				continue;
5060 serge 15220
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15221
			connector->base.encoder = NULL;
3031 serge 15222
		}
15223
	}
15224
	/* Enabled encoders without active connectors will be fixed in
15225
	 * the crtc fixup. */
2327 Serge 15226
}
15227
 
5060 serge 15228
void i915_redisable_vga_power_on(struct drm_device *dev)
3746 Serge 15229
{
15230
	struct drm_i915_private *dev_priv = dev->dev_private;
15231
	u32 vga_reg = i915_vgacntrl_reg(dev);
15232
 
5060 serge 15233
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15234
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15235
		i915_disable_vga(dev);
15236
	}
15237
}
15238
 
15239
void i915_redisable_vga(struct drm_device *dev)
15240
{
15241
	struct drm_i915_private *dev_priv = dev->dev_private;
15242
 
4104 Serge 15243
	/* This function can be called both from intel_modeset_setup_hw_state or
15244
	 * at a very early point in our resume sequence, where the power well
15245
	 * structures are not yet restored. Since this function is at a very
15246
	 * paranoid "someone might have enabled VGA while we were not looking"
15247
	 * level, just check if the power well is enabled instead of trying to
15248
	 * follow the "don't touch the power well if we don't need it" policy
15249
	 * the rest of the driver uses. */
5354 serge 15250
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
4104 Serge 15251
		return;
15252
 
5060 serge 15253
	i915_redisable_vga_power_on(dev);
3746 Serge 15254
}
15255
 
6084 serge 15256
static bool primary_get_hw_state(struct intel_plane *plane)
5060 serge 15257
{
6084 serge 15258
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
5060 serge 15259
 
6084 serge 15260
	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15261
}
5060 serge 15262
 
6084 serge 15263
/* FIXME read out full plane state for all planes */
15264
static void readout_plane_state(struct intel_crtc *crtc)
15265
{
15266
	struct drm_plane *primary = crtc->base.primary;
15267
	struct intel_plane_state *plane_state =
15268
		to_intel_plane_state(primary->state);
15269
 
15270
	plane_state->visible =
15271
		primary_get_hw_state(to_intel_plane(primary));
15272
 
15273
	if (plane_state->visible)
15274
		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
5060 serge 15275
}
15276
 
4104 Serge 15277
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 15278
{
15279
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15280
	enum pipe pipe;
15281
	struct intel_crtc *crtc;
15282
	struct intel_encoder *encoder;
15283
	struct intel_connector *connector;
4104 Serge 15284
	int i;
2327 Serge 15285
 
5060 serge 15286
	for_each_intel_crtc(dev, crtc) {
6084 serge 15287
		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
15288
		memset(crtc->config, 0, sizeof(*crtc->config));
15289
		crtc->config->base.crtc = &crtc->base;
2327 Serge 15290
 
3746 Serge 15291
		crtc->active = dev_priv->display.get_pipe_config(crtc,
6084 serge 15292
								 crtc->config);
2327 Serge 15293
 
6084 serge 15294
		crtc->base.state->active = crtc->active;
3031 serge 15295
		crtc->base.enabled = crtc->active;
2330 Serge 15296
 
6084 serge 15297
		readout_plane_state(crtc);
15298
 
3031 serge 15299
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15300
			      crtc->base.base.id,
15301
			      crtc->active ? "enabled" : "disabled");
2339 Serge 15302
	}
2332 Serge 15303
 
4104 Serge 15304
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15305
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15306
 
5354 serge 15307
		pll->on = pll->get_hw_state(dev_priv, pll,
15308
					    &pll->config.hw_state);
4104 Serge 15309
		pll->active = 0;
5354 serge 15310
		pll->config.crtc_mask = 0;
5060 serge 15311
		for_each_intel_crtc(dev, crtc) {
5354 serge 15312
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
4104 Serge 15313
				pll->active++;
5354 serge 15314
				pll->config.crtc_mask |= 1 << crtc->pipe;
15315
			}
4104 Serge 15316
		}
15317
 
5354 serge 15318
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15319
			      pll->name, pll->config.crtc_mask, pll->on);
5060 serge 15320
 
5354 serge 15321
		if (pll->config.crtc_mask)
5060 serge 15322
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
4104 Serge 15323
	}
15324
 
5354 serge 15325
	for_each_intel_encoder(dev, encoder) {
3031 serge 15326
		pipe = 0;
2332 Serge 15327
 
3031 serge 15328
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 15329
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15330
			encoder->base.crtc = &crtc->base;
6084 serge 15331
			encoder->get_config(encoder, crtc->config);
3031 serge 15332
		} else {
15333
			encoder->base.crtc = NULL;
15334
		}
2332 Serge 15335
 
4560 Serge 15336
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 15337
			      encoder->base.base.id,
5060 serge 15338
			      encoder->base.name,
3031 serge 15339
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 15340
			      pipe_name(pipe));
3031 serge 15341
	}
2332 Serge 15342
 
6084 serge 15343
	for_each_intel_connector(dev, connector) {
3031 serge 15344
		if (connector->get_hw_state(connector)) {
15345
			connector->base.dpms = DRM_MODE_DPMS_ON;
15346
			connector->base.encoder = &connector->encoder->base;
15347
		} else {
15348
			connector->base.dpms = DRM_MODE_DPMS_OFF;
15349
			connector->base.encoder = NULL;
15350
		}
15351
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15352
			      connector->base.base.id,
5060 serge 15353
			      connector->base.name,
3031 serge 15354
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 15355
	}
6084 serge 15356
 
15357
	for_each_intel_crtc(dev, crtc) {
15358
		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15359
 
15360
		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15361
		if (crtc->base.state->active) {
15362
			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15363
			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15364
			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15365
 
15366
			/*
15367
			 * The initial mode needs to be set in order to keep
15368
			 * the atomic core happy. It wants a valid mode if the
15369
			 * crtc's enabled, so we do the above call.
15370
			 *
15371
			 * At this point some state updated by the connectors
15372
			 * in their ->detect() callback has not run yet, so
15373
			 * no recalculation can be done yet.
15374
			 *
15375
			 * Even if we could do a recalculation and modeset
15376
			 * right now it would cause a double modeset if
15377
			 * fbdev or userspace chooses a different initial mode.
15378
			 *
15379
			 * If that happens, someone indicated they wanted a
15380
			 * mode change, which means it's safe to do a full
15381
			 * recalculation.
15382
			 */
15383
			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15384
 
15385
			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15386
			update_scanline_offset(crtc);
15387
		}
15388
	}
4104 Serge 15389
}
2332 Serge 15390
 
6084 serge 15391
/* Scan out the current hw modeset state,
15392
 * and sanitizes it to the current state
15393
 */
15394
static void
15395
intel_modeset_setup_hw_state(struct drm_device *dev)
4104 Serge 15396
{
15397
	struct drm_i915_private *dev_priv = dev->dev_private;
15398
	enum pipe pipe;
15399
	struct intel_crtc *crtc;
15400
	struct intel_encoder *encoder;
15401
	int i;
15402
 
15403
	intel_modeset_readout_hw_state(dev);
15404
 
3031 serge 15405
	/* HW state is read out, now we need to sanitize this mess. */
5354 serge 15406
	for_each_intel_encoder(dev, encoder) {
3031 serge 15407
		intel_sanitize_encoder(encoder);
2332 Serge 15408
	}
15409
 
5354 serge 15410
	for_each_pipe(dev_priv, pipe) {
3031 serge 15411
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15412
		intel_sanitize_crtc(crtc);
6084 serge 15413
		intel_dump_pipe_config(crtc, crtc->config,
15414
				       "[setup_hw_state]");
2332 Serge 15415
	}
15416
 
6084 serge 15417
	intel_modeset_update_connector_atomic_state(dev);
15418
 
4104 Serge 15419
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15420
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15421
 
15422
		if (!pll->on || pll->active)
15423
			continue;
15424
 
15425
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15426
 
15427
		pll->disable(dev_priv, pll);
15428
		pll->on = false;
15429
	}
15430
 
6084 serge 15431
	if (IS_VALLEYVIEW(dev))
15432
		vlv_wm_get_hw_state(dev);
15433
	else if (IS_GEN9(dev))
5354 serge 15434
		skl_wm_get_hw_state(dev);
15435
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 15436
		ilk_wm_get_hw_state(dev);
15437
 
6084 serge 15438
	for_each_intel_crtc(dev, crtc) {
15439
		unsigned long put_domains;
4560 Serge 15440
 
6084 serge 15441
		put_domains = modeset_get_crtc_power_domains(&crtc->base);
15442
		if (WARN_ON(put_domains))
15443
			modeset_put_power_domains(dev_priv, put_domains);
15444
	}
15445
	intel_display_set_init_power(dev_priv, false);
15446
}
3746 Serge 15447
 
6084 serge 15448
void intel_display_resume(struct drm_device *dev)
15449
{
15450
	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15451
	struct intel_connector *conn;
15452
	struct intel_plane *plane;
15453
	struct drm_crtc *crtc;
15454
	int ret;
15455
 
15456
	if (!state)
15457
		return;
15458
 
15459
	state->acquire_ctx = dev->mode_config.acquire_ctx;
15460
 
15461
	/* preserve complete old state, including dpll */
15462
	intel_atomic_get_shared_dpll_state(state);
15463
 
15464
	for_each_crtc(dev, crtc) {
15465
		struct drm_crtc_state *crtc_state =
15466
			drm_atomic_get_crtc_state(state, crtc);
15467
 
15468
		ret = PTR_ERR_OR_ZERO(crtc_state);
15469
		if (ret)
15470
			goto err;
15471
 
15472
		/* force a restore */
15473
		crtc_state->mode_changed = true;
3243 Serge 15474
	}
2332 Serge 15475
 
6084 serge 15476
	for_each_intel_plane(dev, plane) {
15477
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15478
		if (ret)
15479
			goto err;
15480
	}
15481
 
15482
	for_each_intel_connector(dev, conn) {
15483
		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15484
		if (ret)
15485
			goto err;
15486
	}
15487
 
15488
	intel_modeset_setup_hw_state(dev);
15489
 
15490
	i915_redisable_vga(dev);
15491
	ret = drm_atomic_commit(state);
15492
	if (!ret)
15493
		return;
15494
 
15495
err:
15496
	DRM_ERROR("Restoring old state failed with %i\n", ret);
15497
	drm_atomic_state_free(state);
2332 Serge 15498
}
15499
 
3031 serge 15500
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 15501
{
5060 serge 15502
	struct drm_crtc *c;
15503
	struct drm_i915_gem_object *obj;
6084 serge 15504
	int ret;
5060 serge 15505
 
15506
	mutex_lock(&dev->struct_mutex);
15507
	intel_init_gt_powersave(dev);
15508
	mutex_unlock(&dev->struct_mutex);
15509
 
3031 serge 15510
	intel_modeset_init_hw(dev);
2330 Serge 15511
 
3031 serge 15512
//   intel_setup_overlay(dev);
2330 Serge 15513
 
5060 serge 15514
	/*
15515
	 * Make sure any fbs we allocated at startup are properly
15516
	 * pinned & fenced.  When we do the allocation it's too early
15517
	 * for this.
15518
	 */
15519
	for_each_crtc(dev, c) {
15520
		obj = intel_fb_obj(c->primary->fb);
15521
		if (obj == NULL)
15522
			continue;
15523
 
6084 serge 15524
		mutex_lock(&dev->struct_mutex);
15525
		ret = intel_pin_and_fence_fb_obj(c->primary,
15526
						 c->primary->fb,
15527
						 c->primary->state,
15528
						 NULL, NULL);
15529
		mutex_unlock(&dev->struct_mutex);
15530
		if (ret) {
5060 serge 15531
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
15532
				  to_intel_crtc(c)->pipe);
15533
			drm_framebuffer_unreference(c->primary->fb);
15534
			c->primary->fb = NULL;
6084 serge 15535
			c->primary->crtc = c->primary->state->crtc = NULL;
15536
			update_state_fb(c->primary);
15537
			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
5060 serge 15538
		}
15539
	}
6084 serge 15540
 
15541
	intel_backlight_register(dev);
2330 Serge 15542
}
15543
 
5060 serge 15544
void intel_connector_unregister(struct intel_connector *intel_connector)
15545
{
15546
	struct drm_connector *connector = &intel_connector->base;
15547
 
15548
	intel_panel_destroy_backlight(connector);
15549
	drm_connector_unregister(connector);
15550
}
15551
 
3031 serge 15552
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 15553
{
3031 serge 15554
#if 0
15555
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 15556
	struct drm_connector *connector;
2327 Serge 15557
 
5354 serge 15558
	intel_disable_gt_powersave(dev);
15559
 
15560
	intel_backlight_unregister(dev);
15561
 
4104 Serge 15562
	/*
15563
	 * Interrupts and polling as the first thing to avoid creating havoc.
5354 serge 15564
	 * Too much stuff here (turning of connectors, ...) would
4104 Serge 15565
	 * experience fancy races otherwise.
15566
	 */
5354 serge 15567
	intel_irq_uninstall(dev_priv);
5060 serge 15568
 
4104 Serge 15569
	/*
15570
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
15571
	 * poll handlers. Hence disable polling after hpd handling is shut down.
15572
	 */
4560 Serge 15573
	drm_kms_helper_poll_fini(dev);
4104 Serge 15574
 
4560 Serge 15575
	intel_unregister_dsm_handler();
2327 Serge 15576
 
6084 serge 15577
	intel_fbc_disable(dev_priv);
2342 Serge 15578
 
4104 Serge 15579
	/* flush any delayed tasks or pending work */
15580
	flush_scheduled_work();
2327 Serge 15581
 
4560 Serge 15582
	/* destroy the backlight and sysfs files before encoders/connectors */
15583
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5060 serge 15584
		struct intel_connector *intel_connector;
15585
 
15586
		intel_connector = to_intel_connector(connector);
15587
		intel_connector->unregister(intel_connector);
4560 Serge 15588
	}
2327 Serge 15589
 
3031 serge 15590
	drm_mode_config_cleanup(dev);
5060 serge 15591
 
15592
	intel_cleanup_overlay(dev);
15593
 
15594
	mutex_lock(&dev->struct_mutex);
15595
	intel_cleanup_gt_powersave(dev);
15596
	mutex_unlock(&dev->struct_mutex);
2327 Serge 15597
#endif
15598
}
15599
 
15600
/*
3031 serge 15601
 * Return which encoder is currently attached for connector.
2327 Serge 15602
 */
3031 serge 15603
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 15604
{
3031 serge 15605
	return &intel_attached_encoder(connector)->base;
15606
}
2327 Serge 15607
 
3031 serge 15608
void intel_connector_attach_encoder(struct intel_connector *connector,
15609
				    struct intel_encoder *encoder)
15610
{
15611
	connector->encoder = encoder;
15612
	drm_mode_connector_attach_encoder(&connector->base,
15613
					  &encoder->base);
2327 Serge 15614
}
15615
 
15616
/*
3031 serge 15617
 * set vga decode state - true == enable VGA decode
2327 Serge 15618
 */
3031 serge 15619
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 15620
{
2330 Serge 15621
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 15622
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 15623
	u16 gmch_ctrl;
2327 Serge 15624
 
5060 serge 15625
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15626
		DRM_ERROR("failed to read control word\n");
15627
		return -EIO;
15628
	}
15629
 
15630
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15631
		return 0;
15632
 
3031 serge 15633
	if (state)
15634
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 15635
	else
3031 serge 15636
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5060 serge 15637
 
15638
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15639
		DRM_ERROR("failed to write control word\n");
15640
		return -EIO;
15641
	}
15642
 
3031 serge 15643
	return 0;
2330 Serge 15644
}
15645
 
3031 serge 15646
#ifdef CONFIG_DEBUG_FS
2327 Serge 15647
 
3031 serge 15648
struct intel_display_error_state {
4104 Serge 15649
 
15650
	u32 power_well_driver;
15651
 
15652
	int num_transcoders;
15653
 
3031 serge 15654
	struct intel_cursor_error_state {
15655
		u32 control;
15656
		u32 position;
15657
		u32 base;
15658
		u32 size;
15659
	} cursor[I915_MAX_PIPES];
2327 Serge 15660
 
3031 serge 15661
	struct intel_pipe_error_state {
4560 Serge 15662
		bool power_domain_on;
3031 serge 15663
		u32 source;
5060 serge 15664
		u32 stat;
3031 serge 15665
	} pipe[I915_MAX_PIPES];
2327 Serge 15666
 
3031 serge 15667
	struct intel_plane_error_state {
15668
		u32 control;
15669
		u32 stride;
15670
		u32 size;
15671
		u32 pos;
15672
		u32 addr;
15673
		u32 surface;
15674
		u32 tile_offset;
15675
	} plane[I915_MAX_PIPES];
4104 Serge 15676
 
15677
	struct intel_transcoder_error_state {
4560 Serge 15678
		bool power_domain_on;
4104 Serge 15679
		enum transcoder cpu_transcoder;
15680
 
15681
		u32 conf;
15682
 
15683
		u32 htotal;
15684
		u32 hblank;
15685
		u32 hsync;
15686
		u32 vtotal;
15687
		u32 vblank;
15688
		u32 vsync;
15689
	} transcoder[4];
3031 serge 15690
};
2327 Serge 15691
 
3031 serge 15692
struct intel_display_error_state *
15693
intel_display_capture_error_state(struct drm_device *dev)
15694
{
5060 serge 15695
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15696
	struct intel_display_error_state *error;
4104 Serge 15697
	int transcoders[] = {
15698
		TRANSCODER_A,
15699
		TRANSCODER_B,
15700
		TRANSCODER_C,
15701
		TRANSCODER_EDP,
15702
	};
3031 serge 15703
	int i;
2327 Serge 15704
 
4104 Serge 15705
	if (INTEL_INFO(dev)->num_pipes == 0)
15706
		return NULL;
15707
 
4560 Serge 15708
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 15709
	if (error == NULL)
15710
		return NULL;
2327 Serge 15711
 
4560 Serge 15712
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 15713
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15714
 
5354 serge 15715
	for_each_pipe(dev_priv, i) {
4560 Serge 15716
		error->pipe[i].power_domain_on =
5354 serge 15717
			__intel_display_power_is_enabled(dev_priv,
6084 serge 15718
							 POWER_DOMAIN_PIPE(i));
4560 Serge 15719
		if (!error->pipe[i].power_domain_on)
15720
			continue;
15721
 
3031 serge 15722
		error->cursor[i].control = I915_READ(CURCNTR(i));
15723
		error->cursor[i].position = I915_READ(CURPOS(i));
15724
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 15725
 
3031 serge 15726
		error->plane[i].control = I915_READ(DSPCNTR(i));
15727
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 15728
		if (INTEL_INFO(dev)->gen <= 3) {
6084 serge 15729
			error->plane[i].size = I915_READ(DSPSIZE(i));
15730
			error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 15731
		}
15732
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
6084 serge 15733
			error->plane[i].addr = I915_READ(DSPADDR(i));
3031 serge 15734
		if (INTEL_INFO(dev)->gen >= 4) {
15735
			error->plane[i].surface = I915_READ(DSPSURF(i));
15736
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15737
		}
2327 Serge 15738
 
3031 serge 15739
		error->pipe[i].source = I915_READ(PIPESRC(i));
5060 serge 15740
 
15741
		if (HAS_GMCH_DISPLAY(dev))
15742
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
3031 serge 15743
	}
2327 Serge 15744
 
4104 Serge 15745
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
15746
	if (HAS_DDI(dev_priv->dev))
15747
		error->num_transcoders++; /* Account for eDP. */
15748
 
15749
	for (i = 0; i < error->num_transcoders; i++) {
15750
		enum transcoder cpu_transcoder = transcoders[i];
15751
 
4560 Serge 15752
		error->transcoder[i].power_domain_on =
5354 serge 15753
			__intel_display_power_is_enabled(dev_priv,
4560 Serge 15754
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15755
		if (!error->transcoder[i].power_domain_on)
15756
			continue;
15757
 
4104 Serge 15758
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
15759
 
15760
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15761
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15762
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15763
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15764
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15765
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15766
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15767
	}
15768
 
3031 serge 15769
	return error;
2330 Serge 15770
}
2327 Serge 15771
 
4104 Serge 15772
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15773
 
3031 serge 15774
void
4104 Serge 15775
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 15776
				struct drm_device *dev,
15777
				struct intel_display_error_state *error)
2332 Serge 15778
{
5354 serge 15779
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 15780
	int i;
2330 Serge 15781
 
4104 Serge 15782
	if (!error)
15783
		return;
15784
 
15785
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 15786
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 15787
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
15788
			   error->power_well_driver);
5354 serge 15789
	for_each_pipe(dev_priv, i) {
4104 Serge 15790
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 15791
		err_printf(m, "  Power: %s\n",
15792
			   error->pipe[i].power_domain_on ? "on" : "off");
4104 Serge 15793
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
5060 serge 15794
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
2332 Serge 15795
 
4104 Serge 15796
		err_printf(m, "Plane [%d]:\n", i);
15797
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
15798
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 15799
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 15800
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
15801
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 15802
		}
15803
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 15804
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 15805
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 15806
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
15807
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 15808
		}
2332 Serge 15809
 
4104 Serge 15810
		err_printf(m, "Cursor [%d]:\n", i);
15811
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
15812
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
15813
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 15814
	}
4104 Serge 15815
 
15816
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 15817
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 15818
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 15819
		err_printf(m, "  Power: %s\n",
15820
			   error->transcoder[i].power_domain_on ? "on" : "off");
4104 Serge 15821
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
15822
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
15823
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
15824
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
15825
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
15826
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
15827
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
15828
	}
2327 Serge 15829
}
3031 serge 15830
#endif
5354 serge 15831
 
15832
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
15833
{
15834
	struct intel_crtc *crtc;
15835
 
15836
	for_each_intel_crtc(dev, crtc) {
15837
		struct intel_unpin_work *work;
15838
 
15839
		spin_lock_irq(&dev->event_lock);
15840
 
15841
		work = crtc->unpin_work;
15842
 
15843
		if (work && work->event &&
15844
		    work->event->base.file_priv == file) {
15845
			kfree(work->event);
15846
			work->event = NULL;
15847
		}
15848
 
15849
		spin_unlock_irq(&dev->event_lock);
15850
	}
15851
}