Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
5097 serge 27
#include 
2327 Serge 28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
5354 serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
3031 serge 40
#include 
41
#include 
5060 serge 42
#include 
43
#include 
44
#include 
2327 Serge 45
 
5060 serge 46
/* Primary plane formats supported by all gen */
47
#define COMMON_PRIMARY_FORMATS \
48
	DRM_FORMAT_C8, \
49
	DRM_FORMAT_RGB565, \
50
	DRM_FORMAT_XRGB8888, \
51
	DRM_FORMAT_ARGB8888
52
 
53
/* Primary plane formats for gen <= 3 */
54
static const uint32_t intel_primary_formats_gen2[] = {
55
	COMMON_PRIMARY_FORMATS,
56
	DRM_FORMAT_XRGB1555,
57
	DRM_FORMAT_ARGB1555,
58
};
59
 
60
/* Primary plane formats for gen >= 4 */
61
static const uint32_t intel_primary_formats_gen4[] = {
62
	COMMON_PRIMARY_FORMATS, \
63
	DRM_FORMAT_XBGR8888,
64
	DRM_FORMAT_ABGR8888,
65
	DRM_FORMAT_XRGB2101010,
66
	DRM_FORMAT_ARGB2101010,
67
	DRM_FORMAT_XBGR2101010,
68
	DRM_FORMAT_ABGR2101010,
69
};
70
 
71
/* Cursor formats */
72
static const uint32_t intel_cursor_formats[] = {
73
	DRM_FORMAT_ARGB8888,
74
};
75
 
76
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 77
 
4104 Serge 78
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
79
				struct intel_crtc_config *pipe_config);
4560 Serge 80
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
4104 Serge 81
				    struct intel_crtc_config *pipe_config);
2327 Serge 82
 
4104 Serge 83
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
84
			  int x, int y, struct drm_framebuffer *old_fb);
5060 serge 85
static int intel_framebuffer_init(struct drm_device *dev,
86
				  struct intel_framebuffer *ifb,
87
				  struct drm_mode_fb_cmd2 *mode_cmd,
88
				  struct drm_i915_gem_object *obj);
89
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
90
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
91
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 92
					 struct intel_link_m_n *m_n,
93
					 struct intel_link_m_n *m2_n2);
5060 serge 94
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
95
static void haswell_set_pipeconf(struct drm_crtc *crtc);
96
static void intel_set_pipe_csc(struct drm_crtc *crtc);
5354 serge 97
static void vlv_prepare_pll(struct intel_crtc *crtc,
98
			    const struct intel_crtc_config *pipe_config);
99
static void chv_prepare_pll(struct intel_crtc *crtc,
100
			    const struct intel_crtc_config *pipe_config);
4104 Serge 101
 
5060 serge 102
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
103
{
104
	if (!connector->mst_port)
105
		return connector->encoder;
106
	else
107
		return &connector->mst_port->mst_encoders[pipe]->base;
108
}
4104 Serge 109
 
2327 Serge 110
typedef struct {
111
    int min, max;
112
} intel_range_t;
113
 
114
typedef struct {
115
    int dot_limit;
116
    int p2_slow, p2_fast;
117
} intel_p2_t;
118
 
119
typedef struct intel_limit intel_limit_t;
120
struct intel_limit {
121
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
122
    intel_p2_t      p2;
123
};
124
 
3243 Serge 125
int
126
intel_pch_rawclk(struct drm_device *dev)
127
{
128
	struct drm_i915_private *dev_priv = dev->dev_private;
129
 
130
	WARN_ON(!HAS_PCH_SPLIT(dev));
131
 
132
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
133
}
134
 
2327 Serge 135
static inline u32 /* units of 100MHz */
136
intel_fdi_link_freq(struct drm_device *dev)
137
{
138
	if (IS_GEN5(dev)) {
139
		struct drm_i915_private *dev_priv = dev->dev_private;
140
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
141
	} else
142
		return 27;
143
}
144
 
4104 Serge 145
static const intel_limit_t intel_limits_i8xx_dac = {
146
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 147
	.vco = { .min = 908000, .max = 1512000 },
148
	.n = { .min = 2, .max = 16 },
4104 Serge 149
	.m = { .min = 96, .max = 140 },
150
	.m1 = { .min = 18, .max = 26 },
151
	.m2 = { .min = 6, .max = 16 },
152
	.p = { .min = 4, .max = 128 },
153
	.p1 = { .min = 2, .max = 33 },
154
	.p2 = { .dot_limit = 165000,
155
		.p2_slow = 4, .p2_fast = 2 },
156
};
157
 
2327 Serge 158
static const intel_limit_t intel_limits_i8xx_dvo = {
159
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 160
	.vco = { .min = 908000, .max = 1512000 },
161
	.n = { .min = 2, .max = 16 },
2327 Serge 162
        .m = { .min = 96, .max = 140 },
163
        .m1 = { .min = 18, .max = 26 },
164
        .m2 = { .min = 6, .max = 16 },
165
        .p = { .min = 4, .max = 128 },
166
        .p1 = { .min = 2, .max = 33 },
167
	.p2 = { .dot_limit = 165000,
4104 Serge 168
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 169
};
170
 
171
static const intel_limit_t intel_limits_i8xx_lvds = {
172
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 173
	.vco = { .min = 908000, .max = 1512000 },
174
	.n = { .min = 2, .max = 16 },
2327 Serge 175
        .m = { .min = 96, .max = 140 },
176
        .m1 = { .min = 18, .max = 26 },
177
        .m2 = { .min = 6, .max = 16 },
178
        .p = { .min = 4, .max = 128 },
179
        .p1 = { .min = 1, .max = 6 },
180
	.p2 = { .dot_limit = 165000,
181
		.p2_slow = 14, .p2_fast = 7 },
182
};
183
 
184
static const intel_limit_t intel_limits_i9xx_sdvo = {
185
        .dot = { .min = 20000, .max = 400000 },
186
        .vco = { .min = 1400000, .max = 2800000 },
187
        .n = { .min = 1, .max = 6 },
188
        .m = { .min = 70, .max = 120 },
3480 Serge 189
	.m1 = { .min = 8, .max = 18 },
190
	.m2 = { .min = 3, .max = 7 },
2327 Serge 191
        .p = { .min = 5, .max = 80 },
192
        .p1 = { .min = 1, .max = 8 },
193
	.p2 = { .dot_limit = 200000,
194
		.p2_slow = 10, .p2_fast = 5 },
195
};
196
 
197
static const intel_limit_t intel_limits_i9xx_lvds = {
198
        .dot = { .min = 20000, .max = 400000 },
199
        .vco = { .min = 1400000, .max = 2800000 },
200
        .n = { .min = 1, .max = 6 },
201
        .m = { .min = 70, .max = 120 },
3480 Serge 202
	.m1 = { .min = 8, .max = 18 },
203
	.m2 = { .min = 3, .max = 7 },
2327 Serge 204
        .p = { .min = 7, .max = 98 },
205
        .p1 = { .min = 1, .max = 8 },
206
	.p2 = { .dot_limit = 112000,
207
		.p2_slow = 14, .p2_fast = 7 },
208
};
209
 
210
 
211
static const intel_limit_t intel_limits_g4x_sdvo = {
212
	.dot = { .min = 25000, .max = 270000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 17, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 10, .max = 30 },
219
	.p1 = { .min = 1, .max = 3},
220
	.p2 = { .dot_limit = 270000,
221
		.p2_slow = 10,
222
		.p2_fast = 10
223
	},
224
};
225
 
226
static const intel_limit_t intel_limits_g4x_hdmi = {
227
	.dot = { .min = 22000, .max = 400000 },
228
	.vco = { .min = 1750000, .max = 3500000},
229
	.n = { .min = 1, .max = 4 },
230
	.m = { .min = 104, .max = 138 },
231
	.m1 = { .min = 16, .max = 23 },
232
	.m2 = { .min = 5, .max = 11 },
233
	.p = { .min = 5, .max = 80 },
234
	.p1 = { .min = 1, .max = 8},
235
	.p2 = { .dot_limit = 165000,
236
		.p2_slow = 10, .p2_fast = 5 },
237
};
238
 
239
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
240
	.dot = { .min = 20000, .max = 115000 },
241
	.vco = { .min = 1750000, .max = 3500000 },
242
	.n = { .min = 1, .max = 3 },
243
	.m = { .min = 104, .max = 138 },
244
	.m1 = { .min = 17, .max = 23 },
245
	.m2 = { .min = 5, .max = 11 },
246
	.p = { .min = 28, .max = 112 },
247
	.p1 = { .min = 2, .max = 8 },
248
	.p2 = { .dot_limit = 0,
249
		.p2_slow = 14, .p2_fast = 14
250
	},
251
};
252
 
253
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
254
	.dot = { .min = 80000, .max = 224000 },
255
	.vco = { .min = 1750000, .max = 3500000 },
256
	.n = { .min = 1, .max = 3 },
257
	.m = { .min = 104, .max = 138 },
258
	.m1 = { .min = 17, .max = 23 },
259
	.m2 = { .min = 5, .max = 11 },
260
	.p = { .min = 14, .max = 42 },
261
	.p1 = { .min = 2, .max = 6 },
262
	.p2 = { .dot_limit = 0,
263
		.p2_slow = 7, .p2_fast = 7
264
	},
265
};
266
 
267
static const intel_limit_t intel_limits_pineview_sdvo = {
268
        .dot = { .min = 20000, .max = 400000},
269
        .vco = { .min = 1700000, .max = 3500000 },
270
	/* Pineview's Ncounter is a ring counter */
271
        .n = { .min = 3, .max = 6 },
272
        .m = { .min = 2, .max = 256 },
273
	/* Pineview only has one combined m divider, which we treat as m2. */
274
        .m1 = { .min = 0, .max = 0 },
275
        .m2 = { .min = 0, .max = 254 },
276
        .p = { .min = 5, .max = 80 },
277
        .p1 = { .min = 1, .max = 8 },
278
	.p2 = { .dot_limit = 200000,
279
		.p2_slow = 10, .p2_fast = 5 },
280
};
281
 
282
static const intel_limit_t intel_limits_pineview_lvds = {
283
        .dot = { .min = 20000, .max = 400000 },
284
        .vco = { .min = 1700000, .max = 3500000 },
285
        .n = { .min = 3, .max = 6 },
286
        .m = { .min = 2, .max = 256 },
287
        .m1 = { .min = 0, .max = 0 },
288
        .m2 = { .min = 0, .max = 254 },
289
        .p = { .min = 7, .max = 112 },
290
        .p1 = { .min = 1, .max = 8 },
291
	.p2 = { .dot_limit = 112000,
292
		.p2_slow = 14, .p2_fast = 14 },
293
};
294
 
295
/* Ironlake / Sandybridge
296
 *
297
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
298
 * the range value for them is (actual_value - 2).
299
 */
300
static const intel_limit_t intel_limits_ironlake_dac = {
301
	.dot = { .min = 25000, .max = 350000 },
302
	.vco = { .min = 1760000, .max = 3510000 },
303
	.n = { .min = 1, .max = 5 },
304
	.m = { .min = 79, .max = 127 },
305
	.m1 = { .min = 12, .max = 22 },
306
	.m2 = { .min = 5, .max = 9 },
307
	.p = { .min = 5, .max = 80 },
308
	.p1 = { .min = 1, .max = 8 },
309
	.p2 = { .dot_limit = 225000,
310
		.p2_slow = 10, .p2_fast = 5 },
311
};
312
 
313
static const intel_limit_t intel_limits_ironlake_single_lvds = {
314
	.dot = { .min = 25000, .max = 350000 },
315
	.vco = { .min = 1760000, .max = 3510000 },
316
	.n = { .min = 1, .max = 3 },
317
	.m = { .min = 79, .max = 118 },
318
	.m1 = { .min = 12, .max = 22 },
319
	.m2 = { .min = 5, .max = 9 },
320
	.p = { .min = 28, .max = 112 },
321
	.p1 = { .min = 2, .max = 8 },
322
	.p2 = { .dot_limit = 225000,
323
		.p2_slow = 14, .p2_fast = 14 },
324
};
325
 
326
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
327
	.dot = { .min = 25000, .max = 350000 },
328
	.vco = { .min = 1760000, .max = 3510000 },
329
	.n = { .min = 1, .max = 3 },
330
	.m = { .min = 79, .max = 127 },
331
	.m1 = { .min = 12, .max = 22 },
332
	.m2 = { .min = 5, .max = 9 },
333
	.p = { .min = 14, .max = 56 },
334
	.p1 = { .min = 2, .max = 8 },
335
	.p2 = { .dot_limit = 225000,
336
		.p2_slow = 7, .p2_fast = 7 },
337
};
338
 
339
/* LVDS 100mhz refclk limits. */
340
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
341
	.dot = { .min = 25000, .max = 350000 },
342
	.vco = { .min = 1760000, .max = 3510000 },
343
	.n = { .min = 1, .max = 2 },
344
	.m = { .min = 79, .max = 126 },
345
	.m1 = { .min = 12, .max = 22 },
346
	.m2 = { .min = 5, .max = 9 },
347
	.p = { .min = 28, .max = 112 },
2342 Serge 348
	.p1 = { .min = 2, .max = 8 },
2327 Serge 349
	.p2 = { .dot_limit = 225000,
350
		.p2_slow = 14, .p2_fast = 14 },
351
};
352
 
353
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
354
	.dot = { .min = 25000, .max = 350000 },
355
	.vco = { .min = 1760000, .max = 3510000 },
356
	.n = { .min = 1, .max = 3 },
357
	.m = { .min = 79, .max = 126 },
358
	.m1 = { .min = 12, .max = 22 },
359
	.m2 = { .min = 5, .max = 9 },
360
	.p = { .min = 14, .max = 42 },
2342 Serge 361
	.p1 = { .min = 2, .max = 6 },
2327 Serge 362
	.p2 = { .dot_limit = 225000,
363
		.p2_slow = 7, .p2_fast = 7 },
364
};
365
 
4560 Serge 366
static const intel_limit_t intel_limits_vlv = {
367
	 /*
368
	  * These are the data rate limits (measured in fast clocks)
369
	  * since those are the strictest limits we have. The fast
370
	  * clock and actual rate limits are more relaxed, so checking
371
	  * them would make no difference.
372
	  */
373
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 374
	.vco = { .min = 4000000, .max = 6000000 },
375
	.n = { .min = 1, .max = 7 },
376
	.m1 = { .min = 2, .max = 3 },
377
	.m2 = { .min = 11, .max = 156 },
378
	.p1 = { .min = 2, .max = 3 },
4560 Serge 379
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 380
};
381
 
5060 serge 382
static const intel_limit_t intel_limits_chv = {
383
	/*
384
	 * These are the data rate limits (measured in fast clocks)
385
	 * since those are the strictest limits we have.  The fast
386
	 * clock and actual rate limits are more relaxed, so checking
387
	 * them would make no difference.
388
	 */
389
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
390
	.vco = { .min = 4860000, .max = 6700000 },
391
	.n = { .min = 1, .max = 1 },
392
	.m1 = { .min = 2, .max = 2 },
393
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
394
	.p1 = { .min = 2, .max = 4 },
395
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
396
};
397
 
4560 Serge 398
static void vlv_clock(int refclk, intel_clock_t *clock)
399
{
400
	clock->m = clock->m1 * clock->m2;
401
	clock->p = clock->p1 * clock->p2;
402
	if (WARN_ON(clock->n == 0 || clock->p == 0))
403
		return;
404
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
405
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
406
}
3031 serge 407
 
4560 Serge 408
/**
409
 * Returns whether any output on the specified pipe is of the specified type
410
 */
5354 serge 411
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
4560 Serge 412
{
5354 serge 413
	struct drm_device *dev = crtc->base.dev;
4560 Serge 414
	struct intel_encoder *encoder;
415
 
5354 serge 416
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4560 Serge 417
		if (encoder->type == type)
418
			return true;
419
 
420
	return false;
421
}
422
 
5354 serge 423
/**
424
 * Returns whether any output on the specified pipe will have the specified
425
 * type after a staged modeset is complete, i.e., the same as
426
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
427
 * encoder->crtc.
428
 */
429
static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
430
{
431
	struct drm_device *dev = crtc->base.dev;
432
	struct intel_encoder *encoder;
433
 
434
	for_each_intel_encoder(dev, encoder)
435
		if (encoder->new_crtc == crtc && encoder->type == type)
436
			return true;
437
 
438
	return false;
439
}
440
 
441
static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
2327 Serge 442
						int refclk)
443
{
5354 serge 444
	struct drm_device *dev = crtc->base.dev;
2327 Serge 445
	const intel_limit_t *limit;
446
 
5354 serge 447
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 448
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 449
			if (refclk == 100000)
450
				limit = &intel_limits_ironlake_dual_lvds_100m;
451
			else
452
				limit = &intel_limits_ironlake_dual_lvds;
453
		} else {
454
			if (refclk == 100000)
455
				limit = &intel_limits_ironlake_single_lvds_100m;
456
			else
457
				limit = &intel_limits_ironlake_single_lvds;
458
		}
4104 Serge 459
	} else
2327 Serge 460
		limit = &intel_limits_ironlake_dac;
461
 
462
	return limit;
463
}
464
 
5354 serge 465
static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
2327 Serge 466
{
5354 serge 467
	struct drm_device *dev = crtc->base.dev;
2327 Serge 468
	const intel_limit_t *limit;
469
 
5354 serge 470
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 471
		if (intel_is_dual_link_lvds(dev))
2327 Serge 472
			limit = &intel_limits_g4x_dual_channel_lvds;
473
		else
474
			limit = &intel_limits_g4x_single_channel_lvds;
5354 serge 475
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
476
		   intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
2327 Serge 477
		limit = &intel_limits_g4x_hdmi;
5354 serge 478
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
2327 Serge 479
		limit = &intel_limits_g4x_sdvo;
480
	} else /* The option is for other outputs */
481
		limit = &intel_limits_i9xx_sdvo;
482
 
483
	return limit;
484
}
485
 
5354 serge 486
static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
2327 Serge 487
{
5354 serge 488
	struct drm_device *dev = crtc->base.dev;
2327 Serge 489
	const intel_limit_t *limit;
490
 
491
	if (HAS_PCH_SPLIT(dev))
492
		limit = intel_ironlake_limit(crtc, refclk);
493
	else if (IS_G4X(dev)) {
494
		limit = intel_g4x_limit(crtc);
495
	} else if (IS_PINEVIEW(dev)) {
5354 serge 496
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
2327 Serge 497
			limit = &intel_limits_pineview_lvds;
498
		else
499
			limit = &intel_limits_pineview_sdvo;
5060 serge 500
	} else if (IS_CHERRYVIEW(dev)) {
501
		limit = &intel_limits_chv;
3031 serge 502
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 503
		limit = &intel_limits_vlv;
2327 Serge 504
	} else if (!IS_GEN2(dev)) {
5354 serge 505
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
2327 Serge 506
			limit = &intel_limits_i9xx_lvds;
507
		else
508
			limit = &intel_limits_i9xx_sdvo;
509
	} else {
5354 serge 510
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
2327 Serge 511
			limit = &intel_limits_i8xx_lvds;
5354 serge 512
		else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
4104 Serge 513
			limit = &intel_limits_i8xx_dvo;
2327 Serge 514
		else
4104 Serge 515
			limit = &intel_limits_i8xx_dac;
2327 Serge 516
	}
517
	return limit;
518
}
519
 
520
/* m1 is reserved as 0 in Pineview, n is a ring counter */
521
static void pineview_clock(int refclk, intel_clock_t *clock)
522
{
523
	clock->m = clock->m2 + 2;
524
	clock->p = clock->p1 * clock->p2;
4560 Serge 525
	if (WARN_ON(clock->n == 0 || clock->p == 0))
526
		return;
527
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
528
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 529
}
530
 
4104 Serge 531
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 532
{
4104 Serge 533
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
534
}
535
 
536
static void i9xx_clock(int refclk, intel_clock_t *clock)
537
{
538
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 539
	clock->p = clock->p1 * clock->p2;
4560 Serge 540
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
541
		return;
542
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
543
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 544
}
545
 
5060 serge 546
static void chv_clock(int refclk, intel_clock_t *clock)
547
{
548
	clock->m = clock->m1 * clock->m2;
549
	clock->p = clock->p1 * clock->p2;
550
	if (WARN_ON(clock->n == 0 || clock->p == 0))
551
		return;
552
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
553
			clock->n << 22);
554
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
555
}
556
 
2327 Serge 557
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
558
/**
559
 * Returns whether the given set of divisors are valid for a given refclk with
560
 * the given connectors.
561
 */
562
 
563
static bool intel_PLL_is_valid(struct drm_device *dev,
564
			       const intel_limit_t *limit,
565
			       const intel_clock_t *clock)
566
{
4560 Serge 567
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
568
		INTELPllInvalid("n out of range\n");
2327 Serge 569
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 570
		INTELPllInvalid("p1 out of range\n");
2327 Serge 571
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 572
		INTELPllInvalid("m2 out of range\n");
2327 Serge 573
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 574
		INTELPllInvalid("m1 out of range\n");
4560 Serge 575
 
576
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
577
		if (clock->m1 <= clock->m2)
2342 Serge 578
		INTELPllInvalid("m1 <= m2\n");
4560 Serge 579
 
580
	if (!IS_VALLEYVIEW(dev)) {
581
		if (clock->p < limit->p.min || limit->p.max < clock->p)
582
			INTELPllInvalid("p out of range\n");
2327 Serge 583
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 584
		INTELPllInvalid("m out of range\n");
4560 Serge 585
	}
586
 
2327 Serge 587
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 588
		INTELPllInvalid("vco out of range\n");
2327 Serge 589
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
590
	 * connector, etc., rather than just a single range.
591
	 */
592
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 593
		INTELPllInvalid("dot out of range\n");
2327 Serge 594
 
595
	return true;
596
}
597
 
598
static bool
5354 serge 599
i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
3031 serge 600
		    int target, int refclk, intel_clock_t *match_clock,
601
		    intel_clock_t *best_clock)
2327 Serge 602
{
5354 serge 603
	struct drm_device *dev = crtc->base.dev;
2327 Serge 604
	intel_clock_t clock;
605
	int err = target;
606
 
5354 serge 607
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
2327 Serge 608
		/*
3480 Serge 609
		 * For LVDS just rely on its current settings for dual-channel.
610
		 * We haven't figured out how to reliably set up different
611
		 * single/dual channel state, if we even can.
2327 Serge 612
		 */
3480 Serge 613
		if (intel_is_dual_link_lvds(dev))
2327 Serge 614
			clock.p2 = limit->p2.p2_fast;
615
		else
616
			clock.p2 = limit->p2.p2_slow;
617
	} else {
618
		if (target < limit->p2.dot_limit)
619
			clock.p2 = limit->p2.p2_slow;
620
		else
621
			clock.p2 = limit->p2.p2_fast;
622
	}
623
 
2342 Serge 624
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 625
 
626
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
627
	     clock.m1++) {
628
		for (clock.m2 = limit->m2.min;
629
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 630
			if (clock.m2 >= clock.m1)
2327 Serge 631
				break;
632
			for (clock.n = limit->n.min;
633
			     clock.n <= limit->n.max; clock.n++) {
634
				for (clock.p1 = limit->p1.min;
635
					clock.p1 <= limit->p1.max; clock.p1++) {
636
					int this_err;
637
 
4104 Serge 638
					i9xx_clock(refclk, &clock);
2327 Serge 639
					if (!intel_PLL_is_valid(dev, limit,
640
								&clock))
641
						continue;
3031 serge 642
					if (match_clock &&
643
					    clock.p != match_clock->p)
644
						continue;
2327 Serge 645
 
646
					this_err = abs(clock.dot - target);
647
					if (this_err < err) {
648
						*best_clock = clock;
649
						err = this_err;
650
					}
651
				}
652
			}
653
		}
654
	}
655
 
656
	return (err != target);
657
}
658
 
659
static bool
5354 serge 660
pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
4104 Serge 661
		   int target, int refclk, intel_clock_t *match_clock,
662
		   intel_clock_t *best_clock)
663
{
5354 serge 664
	struct drm_device *dev = crtc->base.dev;
4104 Serge 665
	intel_clock_t clock;
666
	int err = target;
667
 
5354 serge 668
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
4104 Serge 669
		/*
670
		 * For LVDS just rely on its current settings for dual-channel.
671
		 * We haven't figured out how to reliably set up different
672
		 * single/dual channel state, if we even can.
673
		 */
674
		if (intel_is_dual_link_lvds(dev))
675
			clock.p2 = limit->p2.p2_fast;
676
		else
677
			clock.p2 = limit->p2.p2_slow;
678
	} else {
679
		if (target < limit->p2.dot_limit)
680
			clock.p2 = limit->p2.p2_slow;
681
		else
682
			clock.p2 = limit->p2.p2_fast;
683
	}
684
 
685
	memset(best_clock, 0, sizeof(*best_clock));
686
 
687
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
688
	     clock.m1++) {
689
		for (clock.m2 = limit->m2.min;
690
		     clock.m2 <= limit->m2.max; clock.m2++) {
691
			for (clock.n = limit->n.min;
692
			     clock.n <= limit->n.max; clock.n++) {
693
				for (clock.p1 = limit->p1.min;
694
					clock.p1 <= limit->p1.max; clock.p1++) {
695
					int this_err;
696
 
697
					pineview_clock(refclk, &clock);
698
					if (!intel_PLL_is_valid(dev, limit,
699
								&clock))
700
						continue;
701
					if (match_clock &&
702
					    clock.p != match_clock->p)
703
						continue;
704
 
705
					this_err = abs(clock.dot - target);
706
					if (this_err < err) {
707
						*best_clock = clock;
708
						err = this_err;
709
					}
710
				}
711
			}
712
		}
713
	}
714
 
715
	return (err != target);
716
}
717
 
718
static bool
5354 serge 719
g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
3031 serge 720
			int target, int refclk, intel_clock_t *match_clock,
721
			intel_clock_t *best_clock)
2327 Serge 722
{
5354 serge 723
	struct drm_device *dev = crtc->base.dev;
2327 Serge 724
	intel_clock_t clock;
725
	int max_n;
726
	bool found;
727
	/* approximately equals target * 0.00585 */
728
	int err_most = (target >> 8) + (target >> 9);
729
	found = false;
730
 
5354 serge 731
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 732
		if (intel_is_dual_link_lvds(dev))
2327 Serge 733
			clock.p2 = limit->p2.p2_fast;
734
		else
735
			clock.p2 = limit->p2.p2_slow;
736
	} else {
737
		if (target < limit->p2.dot_limit)
738
			clock.p2 = limit->p2.p2_slow;
739
		else
740
			clock.p2 = limit->p2.p2_fast;
741
	}
742
 
743
	memset(best_clock, 0, sizeof(*best_clock));
744
	max_n = limit->n.max;
745
	/* based on hardware requirement, prefer smaller n to precision */
746
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
747
		/* based on hardware requirement, prefere larger m1,m2 */
748
		for (clock.m1 = limit->m1.max;
749
		     clock.m1 >= limit->m1.min; clock.m1--) {
750
			for (clock.m2 = limit->m2.max;
751
			     clock.m2 >= limit->m2.min; clock.m2--) {
752
				for (clock.p1 = limit->p1.max;
753
				     clock.p1 >= limit->p1.min; clock.p1--) {
754
					int this_err;
755
 
4104 Serge 756
					i9xx_clock(refclk, &clock);
2327 Serge 757
					if (!intel_PLL_is_valid(dev, limit,
758
								&clock))
759
						continue;
760
 
761
					this_err = abs(clock.dot - target);
762
					if (this_err < err_most) {
763
						*best_clock = clock;
764
						err_most = this_err;
765
						max_n = clock.n;
766
						found = true;
767
					}
768
				}
769
			}
770
		}
771
	}
772
	return found;
773
}
774
 
775
static bool
5354 serge 776
vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
3031 serge 777
			int target, int refclk, intel_clock_t *match_clock,
778
			intel_clock_t *best_clock)
779
{
5354 serge 780
	struct drm_device *dev = crtc->base.dev;
4560 Serge 781
	intel_clock_t clock;
782
	unsigned int bestppm = 1000000;
783
	/* min update 19.2 MHz */
784
	int max_n = min(limit->n.max, refclk / 19200);
785
	bool found = false;
2327 Serge 786
 
4560 Serge 787
	target *= 5; /* fast clock */
3031 serge 788
 
4560 Serge 789
	memset(best_clock, 0, sizeof(*best_clock));
790
 
3031 serge 791
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 792
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
793
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
794
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
795
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
796
				clock.p = clock.p1 * clock.p2;
3031 serge 797
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 798
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
799
					unsigned int ppm, diff;
800
 
801
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
802
								     refclk * clock.m1);
803
 
804
					vlv_clock(refclk, &clock);
805
 
806
					if (!intel_PLL_is_valid(dev, limit,
807
								&clock))
808
						continue;
809
 
810
					diff = abs(clock.dot - target);
811
					ppm = div_u64(1000000ULL * diff, target);
812
 
813
					if (ppm < 100 && clock.p > best_clock->p) {
3031 serge 814
							bestppm = 0;
4560 Serge 815
						*best_clock = clock;
816
						found = true;
3031 serge 817
						}
4560 Serge 818
 
819
					if (bestppm >= 10 && ppm < bestppm - 10) {
820
						bestppm = ppm;
821
						*best_clock = clock;
822
						found = true;
3031 serge 823
						}
824
						}
825
					}
826
				}
827
			}
828
 
4560 Serge 829
	return found;
3031 serge 830
}
831
 
5060 serge 832
static bool
5354 serge 833
chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
5060 serge 834
		   int target, int refclk, intel_clock_t *match_clock,
835
		   intel_clock_t *best_clock)
836
{
5354 serge 837
	struct drm_device *dev = crtc->base.dev;
5060 serge 838
	intel_clock_t clock;
839
	uint64_t m2;
840
	int found = false;
841
 
842
	memset(best_clock, 0, sizeof(*best_clock));
843
 
844
	/*
845
	 * Based on hardware doc, the n always set to 1, and m1 always
846
	 * set to 2.  If requires to support 200Mhz refclk, we need to
847
	 * revisit this because n may not 1 anymore.
848
	 */
849
	clock.n = 1, clock.m1 = 2;
850
	target *= 5;	/* fast clock */
851
 
852
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
853
		for (clock.p2 = limit->p2.p2_fast;
854
				clock.p2 >= limit->p2.p2_slow;
855
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
856
 
857
			clock.p = clock.p1 * clock.p2;
858
 
859
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
860
					clock.n) << 22, refclk * clock.m1);
861
 
862
			if (m2 > INT_MAX/clock.m1)
863
				continue;
864
 
865
			clock.m2 = m2;
866
 
867
			chv_clock(refclk, &clock);
868
 
869
			if (!intel_PLL_is_valid(dev, limit, &clock))
870
				continue;
871
 
872
			/* based on hardware requirement, prefer bigger p
873
			 */
874
			if (clock.p > best_clock->p) {
875
				*best_clock = clock;
876
				found = true;
877
			}
878
		}
879
	}
880
 
881
	return found;
882
}
883
 
4560 Serge 884
bool intel_crtc_active(struct drm_crtc *crtc)
885
{
886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
887
 
888
	/* Be paranoid as we can arrive here with only partial
889
	 * state retrieved from the hardware during setup.
890
	 *
891
	 * We can ditch the adjusted_mode.crtc_clock check as soon
892
	 * as Haswell has gained clock readout/fastboot support.
893
	 *
5060 serge 894
	 * We can ditch the crtc->primary->fb check as soon as we can
4560 Serge 895
	 * properly reconstruct framebuffers.
896
	 */
5060 serge 897
	return intel_crtc->active && crtc->primary->fb &&
4560 Serge 898
		intel_crtc->config.adjusted_mode.crtc_clock;
899
}
900
 
3243 Serge 901
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
902
					     enum pipe pipe)
903
{
904
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
905
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
906
 
3746 Serge 907
	return intel_crtc->config.cpu_transcoder;
3243 Serge 908
}
909
 
4560 Serge 910
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
911
{
912
	struct drm_i915_private *dev_priv = dev->dev_private;
913
	u32 reg = PIPEDSL(pipe);
914
	u32 line1, line2;
915
	u32 line_mask;
916
 
917
	if (IS_GEN2(dev))
918
		line_mask = DSL_LINEMASK_GEN2;
919
	else
920
		line_mask = DSL_LINEMASK_GEN3;
921
 
922
	line1 = I915_READ(reg) & line_mask;
923
	mdelay(5);
924
	line2 = I915_READ(reg) & line_mask;
925
 
926
	return line1 == line2;
927
}
928
 
2327 Serge 929
/*
930
 * intel_wait_for_pipe_off - wait for pipe to turn off
5354 serge 931
 * @crtc: crtc whose pipe to wait for
2327 Serge 932
 *
933
 * After disabling a pipe, we can't wait for vblank in the usual way,
934
 * spinning on the vblank interrupt status bit, since we won't actually
935
 * see an interrupt when the pipe is disabled.
936
 *
937
 * On Gen4 and above:
938
 *   wait for the pipe register state bit to turn off
939
 *
940
 * Otherwise:
941
 *   wait for the display line value to settle (it usually
942
 *   ends up stopping at the start of the next frame).
943
 *
944
 */
5354 serge 945
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
2327 Serge 946
{
5354 serge 947
	struct drm_device *dev = crtc->base.dev;
2327 Serge 948
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 949
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
950
	enum pipe pipe = crtc->pipe;
2327 Serge 951
 
952
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 953
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 954
 
955
		/* Wait for the Pipe State to go off */
956
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
957
			     100))
3031 serge 958
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 959
	} else {
960
		/* Wait for the display line to settle */
4560 Serge 961
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 962
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 963
	}
964
}
965
 
3480 Serge 966
/*
967
 * ibx_digital_port_connected - is the specified port connected?
968
 * @dev_priv: i915 private structure
969
 * @port: the port to test
970
 *
971
 * Returns true if @port is connected, false otherwise.
972
 */
973
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
974
				struct intel_digital_port *port)
975
{
976
	u32 bit;
977
 
978
	if (HAS_PCH_IBX(dev_priv->dev)) {
5060 serge 979
		switch (port->port) {
3480 Serge 980
		case PORT_B:
981
			bit = SDE_PORTB_HOTPLUG;
982
			break;
983
		case PORT_C:
984
			bit = SDE_PORTC_HOTPLUG;
985
			break;
986
		case PORT_D:
987
			bit = SDE_PORTD_HOTPLUG;
988
			break;
989
		default:
990
			return true;
991
		}
992
	} else {
5060 serge 993
		switch (port->port) {
3480 Serge 994
		case PORT_B:
995
			bit = SDE_PORTB_HOTPLUG_CPT;
996
			break;
997
		case PORT_C:
998
			bit = SDE_PORTC_HOTPLUG_CPT;
999
			break;
1000
		case PORT_D:
1001
			bit = SDE_PORTD_HOTPLUG_CPT;
1002
			break;
1003
		default:
1004
			return true;
1005
		}
1006
	}
1007
 
1008
	return I915_READ(SDEISR) & bit;
1009
}
1010
 
2327 Serge 1011
static const char *state_string(bool enabled)
1012
{
1013
	return enabled ? "on" : "off";
1014
}
1015
 
1016
/* Only for pre-ILK configs */
4104 Serge 1017
void assert_pll(struct drm_i915_private *dev_priv,
2327 Serge 1018
		       enum pipe pipe, bool state)
1019
{
1020
	int reg;
1021
	u32 val;
1022
	bool cur_state;
1023
 
1024
	reg = DPLL(pipe);
1025
	val = I915_READ(reg);
1026
	cur_state = !!(val & DPLL_VCO_ENABLE);
1027
	WARN(cur_state != state,
1028
	     "PLL state assertion failure (expected %s, current %s)\n",
1029
	     state_string(state), state_string(cur_state));
1030
}
1031
 
4560 Serge 1032
/* XXX: the dsi pll is shared between MIPI DSI ports */
1033
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1034
{
1035
	u32 val;
1036
	bool cur_state;
1037
 
1038
	mutex_lock(&dev_priv->dpio_lock);
1039
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1040
	mutex_unlock(&dev_priv->dpio_lock);
1041
 
1042
	cur_state = val & DSI_PLL_VCO_EN;
1043
	WARN(cur_state != state,
1044
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1045
	     state_string(state), state_string(cur_state));
1046
}
1047
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1048
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1049
 
4104 Serge 1050
struct intel_shared_dpll *
1051
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1052
{
1053
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1054
 
1055
	if (crtc->config.shared_dpll < 0)
1056
		return NULL;
1057
 
1058
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1059
}
1060
 
2327 Serge 1061
/* For ILK+ */
4104 Serge 1062
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1063
			       struct intel_shared_dpll *pll,
3031 serge 1064
			   bool state)
2327 Serge 1065
{
1066
	bool cur_state;
4104 Serge 1067
	struct intel_dpll_hw_state hw_state;
2327 Serge 1068
 
3031 serge 1069
	if (WARN (!pll,
4104 Serge 1070
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 1071
		return;
2342 Serge 1072
 
4104 Serge 1073
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
3031 serge 1074
	WARN(cur_state != state,
4104 Serge 1075
	     "%s assertion failure (expected %s, current %s)\n",
1076
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 1077
}
1078
 
1079
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1080
			  enum pipe pipe, bool state)
1081
{
1082
	int reg;
1083
	u32 val;
1084
	bool cur_state;
3243 Serge 1085
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1086
								      pipe);
2327 Serge 1087
 
3480 Serge 1088
	if (HAS_DDI(dev_priv->dev)) {
1089
		/* DDI does not have a specific FDI_TX register */
3243 Serge 1090
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
3031 serge 1091
		val = I915_READ(reg);
3243 Serge 1092
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1093
	} else {
2327 Serge 1094
	reg = FDI_TX_CTL(pipe);
1095
	val = I915_READ(reg);
1096
	cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1097
	}
2327 Serge 1098
	WARN(cur_state != state,
1099
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1100
	     state_string(state), state_string(cur_state));
1101
}
1102
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1103
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1104
 
1105
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1106
			  enum pipe pipe, bool state)
1107
{
1108
	int reg;
1109
	u32 val;
1110
	bool cur_state;
1111
 
1112
	reg = FDI_RX_CTL(pipe);
1113
	val = I915_READ(reg);
1114
	cur_state = !!(val & FDI_RX_ENABLE);
1115
	WARN(cur_state != state,
1116
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1117
	     state_string(state), state_string(cur_state));
1118
}
1119
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1120
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1121
 
1122
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1123
				      enum pipe pipe)
1124
{
1125
	int reg;
1126
	u32 val;
1127
 
1128
	/* ILK FDI PLL is always enabled */
5060 serge 1129
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
2327 Serge 1130
		return;
1131
 
3031 serge 1132
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1133
	if (HAS_DDI(dev_priv->dev))
3031 serge 1134
		return;
1135
 
2327 Serge 1136
	reg = FDI_TX_CTL(pipe);
1137
	val = I915_READ(reg);
1138
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1139
}
1140
 
4104 Serge 1141
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1142
		       enum pipe pipe, bool state)
2327 Serge 1143
{
1144
	int reg;
1145
	u32 val;
4104 Serge 1146
	bool cur_state;
2327 Serge 1147
 
1148
	reg = FDI_RX_CTL(pipe);
1149
	val = I915_READ(reg);
4104 Serge 1150
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1151
	WARN(cur_state != state,
1152
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1153
	     state_string(state), state_string(cur_state));
2327 Serge 1154
}
1155
 
5354 serge 1156
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
2327 Serge 1157
				  enum pipe pipe)
1158
{
5354 serge 1159
	struct drm_device *dev = dev_priv->dev;
1160
	int pp_reg;
2327 Serge 1161
	u32 val;
1162
	enum pipe panel_pipe = PIPE_A;
1163
	bool locked = true;
1164
 
5354 serge 1165
	if (WARN_ON(HAS_DDI(dev)))
1166
		return;
1167
 
1168
	if (HAS_PCH_SPLIT(dev)) {
1169
		u32 port_sel;
1170
 
2327 Serge 1171
		pp_reg = PCH_PP_CONTROL;
5354 serge 1172
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1173
 
1174
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1175
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1176
			panel_pipe = PIPE_B;
1177
		/* XXX: else fix for eDP */
1178
	} else if (IS_VALLEYVIEW(dev)) {
1179
		/* presumably write lock depends on pipe, not port select */
1180
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1181
		panel_pipe = pipe;
2327 Serge 1182
	} else {
1183
		pp_reg = PP_CONTROL;
5354 serge 1184
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1185
			panel_pipe = PIPE_B;
2327 Serge 1186
	}
1187
 
1188
	val = I915_READ(pp_reg);
1189
	if (!(val & PANEL_POWER_ON) ||
5354 serge 1190
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
2327 Serge 1191
		locked = false;
1192
 
1193
	WARN(panel_pipe == pipe && locked,
1194
	     "panel assertion failure, pipe %c regs locked\n",
1195
	     pipe_name(pipe));
1196
}
1197
 
4560 Serge 1198
static void assert_cursor(struct drm_i915_private *dev_priv,
1199
			  enum pipe pipe, bool state)
1200
{
1201
	struct drm_device *dev = dev_priv->dev;
1202
	bool cur_state;
1203
 
5060 serge 1204
	if (IS_845G(dev) || IS_I865G(dev))
4560 Serge 1205
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1206
	else
1207
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1208
 
1209
	WARN(cur_state != state,
1210
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1211
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1212
}
1213
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1214
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1215
 
2342 Serge 1216
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 1217
			enum pipe pipe, bool state)
1218
{
1219
	int reg;
1220
	u32 val;
1221
	bool cur_state;
3243 Serge 1222
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1223
								      pipe);
2327 Serge 1224
 
5354 serge 1225
	/* if we need the pipe quirk it must be always on */
1226
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1227
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3031 serge 1228
		state = true;
1229
 
5354 serge 1230
	if (!intel_display_power_is_enabled(dev_priv,
4104 Serge 1231
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
3480 Serge 1232
		cur_state = false;
1233
	} else {
3243 Serge 1234
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1235
	val = I915_READ(reg);
1236
	cur_state = !!(val & PIPECONF_ENABLE);
3480 Serge 1237
	}
1238
 
2327 Serge 1239
	WARN(cur_state != state,
1240
	     "pipe %c assertion failure (expected %s, current %s)\n",
1241
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1242
}
1243
 
3031 serge 1244
static void assert_plane(struct drm_i915_private *dev_priv,
1245
			 enum plane plane, bool state)
2327 Serge 1246
{
1247
	int reg;
1248
	u32 val;
3031 serge 1249
	bool cur_state;
2327 Serge 1250
 
1251
	reg = DSPCNTR(plane);
1252
	val = I915_READ(reg);
3031 serge 1253
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1254
	WARN(cur_state != state,
1255
	     "plane %c assertion failure (expected %s, current %s)\n",
1256
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1257
}
1258
 
3031 serge 1259
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1260
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1261
 
2327 Serge 1262
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1263
				   enum pipe pipe)
1264
{
4104 Serge 1265
	struct drm_device *dev = dev_priv->dev;
2327 Serge 1266
	int reg, i;
1267
	u32 val;
1268
	int cur_pipe;
1269
 
4104 Serge 1270
	/* Primary planes are fixed to pipes on gen4+ */
1271
	if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 1272
		reg = DSPCNTR(pipe);
1273
		val = I915_READ(reg);
5060 serge 1274
		WARN(val & DISPLAY_PLANE_ENABLE,
3031 serge 1275
		     "plane %c assertion failure, should be disabled but not\n",
1276
		     plane_name(pipe));
2327 Serge 1277
		return;
3031 serge 1278
	}
2327 Serge 1279
 
1280
	/* Need to check both planes against the pipe */
5354 serge 1281
	for_each_pipe(dev_priv, i) {
2327 Serge 1282
		reg = DSPCNTR(i);
1283
		val = I915_READ(reg);
1284
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1285
			DISPPLANE_SEL_PIPE_SHIFT;
1286
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1287
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1288
		     plane_name(i), pipe_name(pipe));
1289
	}
1290
}
1291
 
3746 Serge 1292
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1293
				    enum pipe pipe)
1294
{
4104 Serge 1295
	struct drm_device *dev = dev_priv->dev;
5060 serge 1296
	int reg, sprite;
3746 Serge 1297
	u32 val;
1298
 
5354 serge 1299
	if (INTEL_INFO(dev)->gen >= 9) {
5060 serge 1300
		for_each_sprite(pipe, sprite) {
5354 serge 1301
			val = I915_READ(PLANE_CTL(pipe, sprite));
1302
			WARN(val & PLANE_CTL_ENABLE,
1303
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1304
			     sprite, pipe_name(pipe));
1305
		}
1306
	} else if (IS_VALLEYVIEW(dev)) {
1307
		for_each_sprite(pipe, sprite) {
5060 serge 1308
			reg = SPCNTR(pipe, sprite);
3746 Serge 1309
		val = I915_READ(reg);
5060 serge 1310
			WARN(val & SP_ENABLE,
4104 Serge 1311
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
5060 serge 1312
			     sprite_name(pipe, sprite), pipe_name(pipe));
4104 Serge 1313
		}
1314
	} else if (INTEL_INFO(dev)->gen >= 7) {
1315
		reg = SPRCTL(pipe);
1316
		val = I915_READ(reg);
5060 serge 1317
		WARN(val & SPRITE_ENABLE,
4104 Serge 1318
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1319
		     plane_name(pipe), pipe_name(pipe));
1320
	} else if (INTEL_INFO(dev)->gen >= 5) {
1321
		reg = DVSCNTR(pipe);
1322
		val = I915_READ(reg);
5060 serge 1323
		WARN(val & DVS_ENABLE,
4104 Serge 1324
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1325
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1326
	}
1327
}
1328
 
5354 serge 1329
static void assert_vblank_disabled(struct drm_crtc *crtc)
1330
{
1331
	if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1332
		drm_crtc_vblank_put(crtc);
1333
}
1334
 
4560 Serge 1335
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1336
{
1337
	u32 val;
1338
	bool enabled;
1339
 
4560 Serge 1340
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1341
 
2327 Serge 1342
	val = I915_READ(PCH_DREF_CONTROL);
1343
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1344
			    DREF_SUPERSPREAD_SOURCE_MASK));
1345
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1346
}
1347
 
4104 Serge 1348
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
2327 Serge 1349
				       enum pipe pipe)
1350
{
1351
	int reg;
1352
	u32 val;
1353
	bool enabled;
1354
 
4104 Serge 1355
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1356
	val = I915_READ(reg);
1357
	enabled = !!(val & TRANS_ENABLE);
1358
	WARN(enabled,
1359
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1360
	     pipe_name(pipe));
1361
}
1362
 
1363
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1364
			    enum pipe pipe, u32 port_sel, u32 val)
1365
{
1366
	if ((val & DP_PORT_EN) == 0)
1367
		return false;
1368
 
1369
	if (HAS_PCH_CPT(dev_priv->dev)) {
1370
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1371
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1372
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1373
			return false;
5060 serge 1374
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1375
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1376
			return false;
2327 Serge 1377
	} else {
1378
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1379
			return false;
1380
	}
1381
	return true;
1382
}
1383
 
1384
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1385
			      enum pipe pipe, u32 val)
1386
{
3746 Serge 1387
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1388
		return false;
1389
 
1390
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1391
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1392
			return false;
5060 serge 1393
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1394
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1395
			return false;
2327 Serge 1396
	} else {
3746 Serge 1397
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1398
			return false;
1399
	}
1400
	return true;
1401
}
1402
 
1403
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1404
			      enum pipe pipe, u32 val)
1405
{
1406
	if ((val & LVDS_PORT_EN) == 0)
1407
		return false;
1408
 
1409
	if (HAS_PCH_CPT(dev_priv->dev)) {
1410
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1411
			return false;
1412
	} else {
1413
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1414
			return false;
1415
	}
1416
	return true;
1417
}
1418
 
1419
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1420
			      enum pipe pipe, u32 val)
1421
{
1422
	if ((val & ADPA_DAC_ENABLE) == 0)
1423
		return false;
1424
	if (HAS_PCH_CPT(dev_priv->dev)) {
1425
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1426
			return false;
1427
	} else {
1428
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1429
			return false;
1430
	}
1431
	return true;
1432
}
1433
 
1434
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1435
				   enum pipe pipe, int reg, u32 port_sel)
1436
{
1437
	u32 val = I915_READ(reg);
1438
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1439
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1440
	     reg, pipe_name(pipe));
3031 serge 1441
 
1442
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1443
	     && (val & DP_PIPEB_SELECT),
1444
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1445
}
1446
 
1447
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1448
				     enum pipe pipe, int reg)
1449
{
1450
	u32 val = I915_READ(reg);
3031 serge 1451
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1452
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1453
	     reg, pipe_name(pipe));
3031 serge 1454
 
3746 Serge 1455
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1456
	     && (val & SDVO_PIPE_B_SELECT),
1457
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1458
}
1459
 
1460
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1461
				      enum pipe pipe)
1462
{
1463
	int reg;
1464
	u32 val;
1465
 
1466
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1467
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1468
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1469
 
1470
	reg = PCH_ADPA;
1471
	val = I915_READ(reg);
3031 serge 1472
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1473
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1474
	     pipe_name(pipe));
1475
 
1476
	reg = PCH_LVDS;
1477
	val = I915_READ(reg);
3031 serge 1478
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1479
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1480
	     pipe_name(pipe));
1481
 
3746 Serge 1482
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1483
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1484
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1485
}
1486
 
4560 Serge 1487
static void intel_init_dpio(struct drm_device *dev)
1488
{
1489
	struct drm_i915_private *dev_priv = dev->dev_private;
1490
 
1491
	if (!IS_VALLEYVIEW(dev))
1492
		return;
1493
 
5060 serge 1494
	/*
1495
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1496
	 * CHV x1 PHY (DP/HDMI D)
1497
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1498
	 */
1499
	if (IS_CHERRYVIEW(dev)) {
1500
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1501
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1502
	} else {
4560 Serge 1503
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
5060 serge 1504
	}
4560 Serge 1505
}
1506
 
5354 serge 1507
static void vlv_enable_pll(struct intel_crtc *crtc,
1508
			   const struct intel_crtc_config *pipe_config)
4560 Serge 1509
{
4104 Serge 1510
	struct drm_device *dev = crtc->base.dev;
1511
	struct drm_i915_private *dev_priv = dev->dev_private;
1512
	int reg = DPLL(crtc->pipe);
5354 serge 1513
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2327 Serge 1514
 
4104 Serge 1515
	assert_pipe_disabled(dev_priv, crtc->pipe);
1516
 
2327 Serge 1517
    /* No really, not for ILK+ */
4104 Serge 1518
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
2327 Serge 1519
 
1520
    /* PLL is protected by panel, make sure we can write it */
5354 serge 1521
	if (IS_MOBILE(dev_priv->dev))
4104 Serge 1522
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1523
 
4104 Serge 1524
	I915_WRITE(reg, dpll);
1525
	POSTING_READ(reg);
1526
	udelay(150);
2327 Serge 1527
 
4104 Serge 1528
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1529
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1530
 
5354 serge 1531
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
4104 Serge 1532
	POSTING_READ(DPLL_MD(crtc->pipe));
1533
 
1534
	/* We do this three times for luck */
1535
	I915_WRITE(reg, dpll);
1536
	POSTING_READ(reg);
1537
	udelay(150); /* wait for warmup */
1538
	I915_WRITE(reg, dpll);
1539
	POSTING_READ(reg);
1540
	udelay(150); /* wait for warmup */
1541
	I915_WRITE(reg, dpll);
1542
	POSTING_READ(reg);
1543
	udelay(150); /* wait for warmup */
1544
}
1545
 
5354 serge 1546
static void chv_enable_pll(struct intel_crtc *crtc,
1547
			   const struct intel_crtc_config *pipe_config)
5060 serge 1548
{
1549
	struct drm_device *dev = crtc->base.dev;
1550
	struct drm_i915_private *dev_priv = dev->dev_private;
1551
	int pipe = crtc->pipe;
1552
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1553
	u32 tmp;
1554
 
1555
	assert_pipe_disabled(dev_priv, crtc->pipe);
1556
 
1557
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1558
 
1559
	mutex_lock(&dev_priv->dpio_lock);
1560
 
1561
	/* Enable back the 10bit clock to display controller */
1562
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1563
	tmp |= DPIO_DCLKP_EN;
1564
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1565
 
1566
	/*
1567
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1568
	 */
1569
	udelay(1);
1570
 
1571
	/* Enable PLL */
5354 serge 1572
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
5060 serge 1573
 
1574
	/* Check PLL is locked */
1575
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1576
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1577
 
1578
	/* not sure when this should be written */
5354 serge 1579
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
5060 serge 1580
	POSTING_READ(DPLL_MD(pipe));
1581
 
1582
	mutex_unlock(&dev_priv->dpio_lock);
1583
}
1584
 
5354 serge 1585
static int intel_num_dvo_pipes(struct drm_device *dev)
1586
{
1587
	struct intel_crtc *crtc;
1588
	int count = 0;
1589
 
1590
	for_each_intel_crtc(dev, crtc)
1591
		count += crtc->active &&
1592
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1593
 
1594
	return count;
1595
}
1596
 
4104 Serge 1597
static void i9xx_enable_pll(struct intel_crtc *crtc)
1598
{
1599
	struct drm_device *dev = crtc->base.dev;
1600
	struct drm_i915_private *dev_priv = dev->dev_private;
1601
	int reg = DPLL(crtc->pipe);
1602
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1603
 
1604
	assert_pipe_disabled(dev_priv, crtc->pipe);
1605
 
1606
	/* No really, not for ILK+ */
5060 serge 1607
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
4104 Serge 1608
 
1609
	/* PLL is protected by panel, make sure we can write it */
1610
	if (IS_MOBILE(dev) && !IS_I830(dev))
1611
		assert_panel_unlocked(dev_priv, crtc->pipe);
1612
 
5354 serge 1613
	/* Enable DVO 2x clock on both PLLs if necessary */
1614
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1615
		/*
1616
		 * It appears to be important that we don't enable this
1617
		 * for the current pipe before otherwise configuring the
1618
		 * PLL. No idea how this should be handled if multiple
1619
		 * DVO outputs are enabled simultaneosly.
1620
		 */
1621
		dpll |= DPLL_DVO_2X_MODE;
1622
		I915_WRITE(DPLL(!crtc->pipe),
1623
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1624
	}
4104 Serge 1625
 
1626
	/* Wait for the clocks to stabilize. */
1627
	POSTING_READ(reg);
1628
	udelay(150);
1629
 
1630
	if (INTEL_INFO(dev)->gen >= 4) {
1631
		I915_WRITE(DPLL_MD(crtc->pipe),
1632
			   crtc->config.dpll_hw_state.dpll_md);
1633
	} else {
1634
		/* The pixel multiplier can only be updated once the
1635
		 * DPLL is enabled and the clocks are stable.
1636
		 *
1637
		 * So write it again.
1638
		 */
1639
		I915_WRITE(reg, dpll);
1640
	}
1641
 
2327 Serge 1642
    /* We do this three times for luck */
4104 Serge 1643
	I915_WRITE(reg, dpll);
2327 Serge 1644
    POSTING_READ(reg);
1645
    udelay(150); /* wait for warmup */
4104 Serge 1646
	I915_WRITE(reg, dpll);
2327 Serge 1647
    POSTING_READ(reg);
1648
    udelay(150); /* wait for warmup */
4104 Serge 1649
	I915_WRITE(reg, dpll);
2327 Serge 1650
    POSTING_READ(reg);
1651
    udelay(150); /* wait for warmup */
1652
}
1653
 
1654
/**
4104 Serge 1655
 * i9xx_disable_pll - disable a PLL
2327 Serge 1656
 * @dev_priv: i915 private structure
1657
 * @pipe: pipe PLL to disable
1658
 *
1659
 * Disable the PLL for @pipe, making sure the pipe is off first.
1660
 *
1661
 * Note!  This is for pre-ILK only.
1662
 */
5354 serge 1663
static void i9xx_disable_pll(struct intel_crtc *crtc)
2327 Serge 1664
{
5354 serge 1665
	struct drm_device *dev = crtc->base.dev;
1666
	struct drm_i915_private *dev_priv = dev->dev_private;
1667
	enum pipe pipe = crtc->pipe;
1668
 
1669
	/* Disable DVO 2x clock on both PLLs if necessary */
1670
	if (IS_I830(dev) &&
1671
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1672
	    intel_num_dvo_pipes(dev) == 1) {
1673
		I915_WRITE(DPLL(PIPE_B),
1674
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1675
		I915_WRITE(DPLL(PIPE_A),
1676
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1677
	}
1678
 
1679
	/* Don't disable pipe or pipe PLLs if needed */
1680
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1681
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2327 Serge 1682
		return;
1683
 
1684
	/* Make sure the pipe isn't still relying on us */
1685
	assert_pipe_disabled(dev_priv, pipe);
1686
 
4104 Serge 1687
	I915_WRITE(DPLL(pipe), 0);
1688
	POSTING_READ(DPLL(pipe));
2327 Serge 1689
}
1690
 
4539 Serge 1691
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1692
{
1693
	u32 val = 0;
1694
 
1695
	/* Make sure the pipe isn't still relying on us */
1696
	assert_pipe_disabled(dev_priv, pipe);
1697
 
4560 Serge 1698
	/*
1699
	 * Leave integrated clock source and reference clock enabled for pipe B.
1700
	 * The latter is needed for VGA hotplug / manual detection.
1701
	 */
4539 Serge 1702
	if (pipe == PIPE_B)
4560 Serge 1703
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
4539 Serge 1704
	I915_WRITE(DPLL(pipe), val);
1705
	POSTING_READ(DPLL(pipe));
5060 serge 1706
 
4539 Serge 1707
}
1708
 
5060 serge 1709
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1710
{
1711
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1712
	u32 val;
1713
 
1714
	/* Make sure the pipe isn't still relying on us */
1715
	assert_pipe_disabled(dev_priv, pipe);
1716
 
1717
	/* Set PLL en = 0 */
5354 serge 1718
	val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
5060 serge 1719
	if (pipe != PIPE_A)
1720
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1721
	I915_WRITE(DPLL(pipe), val);
1722
	POSTING_READ(DPLL(pipe));
1723
 
1724
	mutex_lock(&dev_priv->dpio_lock);
1725
 
1726
	/* Disable 10bit clock to display controller */
1727
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1728
	val &= ~DPIO_DCLKP_EN;
1729
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1730
 
1731
	/* disable left/right clock distribution */
1732
	if (pipe != PIPE_B) {
1733
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1734
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1735
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1736
	} else {
1737
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1738
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1739
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1740
	}
1741
 
1742
	mutex_unlock(&dev_priv->dpio_lock);
1743
}
1744
 
4560 Serge 1745
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1746
		struct intel_digital_port *dport)
3031 serge 1747
{
4104 Serge 1748
	u32 port_mask;
5060 serge 1749
	int dpll_reg;
3031 serge 1750
 
4560 Serge 1751
	switch (dport->port) {
1752
	case PORT_B:
4104 Serge 1753
		port_mask = DPLL_PORTB_READY_MASK;
5060 serge 1754
		dpll_reg = DPLL(0);
4560 Serge 1755
		break;
1756
	case PORT_C:
4104 Serge 1757
		port_mask = DPLL_PORTC_READY_MASK;
5060 serge 1758
		dpll_reg = DPLL(0);
4560 Serge 1759
		break;
5060 serge 1760
	case PORT_D:
1761
		port_mask = DPLL_PORTD_READY_MASK;
1762
		dpll_reg = DPIO_PHY_STATUS;
1763
		break;
4560 Serge 1764
	default:
1765
		BUG();
1766
	}
3243 Serge 1767
 
5060 serge 1768
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
4104 Serge 1769
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
5060 serge 1770
		     port_name(dport->port), I915_READ(dpll_reg));
3031 serge 1771
}
1772
 
5060 serge 1773
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1774
{
1775
	struct drm_device *dev = crtc->base.dev;
1776
	struct drm_i915_private *dev_priv = dev->dev_private;
1777
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1778
 
1779
	if (WARN_ON(pll == NULL))
1780
		return;
1781
 
5354 serge 1782
	WARN_ON(!pll->config.crtc_mask);
5060 serge 1783
	if (pll->active == 0) {
1784
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1785
		WARN_ON(pll->on);
1786
		assert_shared_dpll_disabled(dev_priv, pll);
1787
 
1788
		pll->mode_set(dev_priv, pll);
1789
	}
1790
}
1791
 
2327 Serge 1792
/**
5060 serge 1793
 * intel_enable_shared_dpll - enable PCH PLL
2327 Serge 1794
 * @dev_priv: i915 private structure
1795
 * @pipe: pipe PLL to enable
1796
 *
1797
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1798
 * drives the transcoder clock.
1799
 */
5060 serge 1800
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1801
{
5060 serge 1802
	struct drm_device *dev = crtc->base.dev;
1803
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1804
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1805
 
4104 Serge 1806
	if (WARN_ON(pll == NULL))
2342 Serge 1807
		return;
1808
 
5354 serge 1809
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1810
		return;
2327 Serge 1811
 
5354 serge 1812
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
4104 Serge 1813
		      pll->name, pll->active, pll->on,
1814
		      crtc->base.base.id);
3031 serge 1815
 
4104 Serge 1816
	if (pll->active++) {
1817
		WARN_ON(!pll->on);
1818
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1819
		return;
1820
	}
4104 Serge 1821
	WARN_ON(pll->on);
3031 serge 1822
 
5060 serge 1823
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1824
 
4104 Serge 1825
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1826
	pll->enable(dev_priv, pll);
3031 serge 1827
	pll->on = true;
2327 Serge 1828
}
1829
 
5354 serge 1830
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1831
{
5060 serge 1832
	struct drm_device *dev = crtc->base.dev;
1833
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1834
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1835
 
1836
	/* PCH only available on ILK+ */
5060 serge 1837
	BUG_ON(INTEL_INFO(dev)->gen < 5);
4104 Serge 1838
	if (WARN_ON(pll == NULL))
3031 serge 1839
	       return;
2327 Serge 1840
 
5354 serge 1841
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1842
		return;
2327 Serge 1843
 
4104 Serge 1844
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1845
		      pll->name, pll->active, pll->on,
1846
		      crtc->base.base.id);
2342 Serge 1847
 
3031 serge 1848
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1849
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1850
		return;
1851
	}
2342 Serge 1852
 
4104 Serge 1853
	assert_shared_dpll_enabled(dev_priv, pll);
1854
	WARN_ON(!pll->on);
1855
	if (--pll->active)
2342 Serge 1856
		return;
1857
 
4104 Serge 1858
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1859
	pll->disable(dev_priv, pll);
3031 serge 1860
	pll->on = false;
5060 serge 1861
 
1862
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2327 Serge 1863
}
1864
 
3243 Serge 1865
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1866
				    enum pipe pipe)
1867
{
3243 Serge 1868
	struct drm_device *dev = dev_priv->dev;
3031 serge 1869
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1870
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 1871
	uint32_t reg, val, pipeconf_val;
2327 Serge 1872
 
1873
	/* PCH only available on ILK+ */
5354 serge 1874
	BUG_ON(!HAS_PCH_SPLIT(dev));
2327 Serge 1875
 
1876
	/* Make sure PCH DPLL is enabled */
4104 Serge 1877
	assert_shared_dpll_enabled(dev_priv,
1878
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1879
 
1880
	/* FDI must be feeding us bits for PCH ports */
1881
	assert_fdi_tx_enabled(dev_priv, pipe);
1882
	assert_fdi_rx_enabled(dev_priv, pipe);
1883
 
3243 Serge 1884
	if (HAS_PCH_CPT(dev)) {
1885
		/* Workaround: Set the timing override bit before enabling the
1886
		 * pch transcoder. */
1887
		reg = TRANS_CHICKEN2(pipe);
1888
		val = I915_READ(reg);
1889
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1890
		I915_WRITE(reg, val);
3031 serge 1891
	}
3243 Serge 1892
 
4104 Serge 1893
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1894
	val = I915_READ(reg);
3031 serge 1895
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1896
 
1897
	if (HAS_PCH_IBX(dev_priv->dev)) {
1898
		/*
1899
		 * make the BPC in transcoder be consistent with
1900
		 * that in pipeconf reg.
1901
		 */
3480 Serge 1902
		val &= ~PIPECONF_BPC_MASK;
1903
		val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 1904
	}
3031 serge 1905
 
1906
	val &= ~TRANS_INTERLACE_MASK;
1907
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1908
		if (HAS_PCH_IBX(dev_priv->dev) &&
5354 serge 1909
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3031 serge 1910
			val |= TRANS_LEGACY_INTERLACED_ILK;
1911
		else
1912
			val |= TRANS_INTERLACED;
1913
	else
1914
		val |= TRANS_PROGRESSIVE;
1915
 
2327 Serge 1916
	I915_WRITE(reg, val | TRANS_ENABLE);
1917
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 1918
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 1919
}
1920
 
3243 Serge 1921
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1922
				      enum transcoder cpu_transcoder)
1923
{
1924
	u32 val, pipeconf_val;
1925
 
1926
	/* PCH only available on ILK+ */
5354 serge 1927
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
3243 Serge 1928
 
1929
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 1930
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 1931
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1932
 
1933
	/* Workaround: set timing override bit. */
1934
	val = I915_READ(_TRANSA_CHICKEN2);
1935
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1936
	I915_WRITE(_TRANSA_CHICKEN2, val);
1937
 
1938
	val = TRANS_ENABLE;
1939
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1940
 
1941
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1942
	    PIPECONF_INTERLACED_ILK)
1943
		val |= TRANS_INTERLACED;
1944
	else
1945
		val |= TRANS_PROGRESSIVE;
1946
 
4104 Serge 1947
	I915_WRITE(LPT_TRANSCONF, val);
1948
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 1949
		DRM_ERROR("Failed to enable PCH transcoder\n");
1950
}
1951
 
1952
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1953
				     enum pipe pipe)
1954
{
3243 Serge 1955
	struct drm_device *dev = dev_priv->dev;
1956
	uint32_t reg, val;
2327 Serge 1957
 
1958
	/* FDI relies on the transcoder */
1959
	assert_fdi_tx_disabled(dev_priv, pipe);
1960
	assert_fdi_rx_disabled(dev_priv, pipe);
1961
 
1962
	/* Ports must be off as well */
1963
	assert_pch_ports_disabled(dev_priv, pipe);
1964
 
4104 Serge 1965
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1966
	val = I915_READ(reg);
1967
	val &= ~TRANS_ENABLE;
1968
	I915_WRITE(reg, val);
1969
	/* wait for PCH transcoder off, transcoder state */
1970
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 1971
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 1972
 
1973
	if (!HAS_PCH_IBX(dev)) {
1974
		/* Workaround: Clear the timing override chicken bit again. */
1975
		reg = TRANS_CHICKEN2(pipe);
1976
		val = I915_READ(reg);
1977
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1978
		I915_WRITE(reg, val);
1979
	}
2327 Serge 1980
}
1981
 
3243 Serge 1982
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1983
{
1984
	u32 val;
1985
 
4104 Serge 1986
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 1987
	val &= ~TRANS_ENABLE;
4104 Serge 1988
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 1989
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 1990
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 1991
		DRM_ERROR("Failed to disable PCH transcoder\n");
1992
 
1993
	/* Workaround: clear timing override bit. */
1994
	val = I915_READ(_TRANSA_CHICKEN2);
1995
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1996
	I915_WRITE(_TRANSA_CHICKEN2, val);
1997
}
1998
 
2327 Serge 1999
/**
2000
 * intel_enable_pipe - enable a pipe, asserting requirements
5060 serge 2001
 * @crtc: crtc responsible for the pipe
2327 Serge 2002
 *
5060 serge 2003
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2327 Serge 2004
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2005
 */
5060 serge 2006
static void intel_enable_pipe(struct intel_crtc *crtc)
2327 Serge 2007
{
5060 serge 2008
	struct drm_device *dev = crtc->base.dev;
2009
	struct drm_i915_private *dev_priv = dev->dev_private;
2010
	enum pipe pipe = crtc->pipe;
3243 Serge 2011
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2012
								      pipe);
3480 Serge 2013
	enum pipe pch_transcoder;
2327 Serge 2014
	int reg;
2015
	u32 val;
2016
 
4104 Serge 2017
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2018
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 2019
	assert_sprites_disabled(dev_priv, pipe);
2020
 
3480 Serge 2021
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 2022
		pch_transcoder = TRANSCODER_A;
2023
	else
2024
		pch_transcoder = pipe;
2025
 
2327 Serge 2026
	/*
2027
	 * A pipe without a PLL won't actually be able to drive bits from
2028
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2029
	 * need the check.
2030
	 */
2031
	if (!HAS_PCH_SPLIT(dev_priv->dev))
5354 serge 2032
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4560 Serge 2033
			assert_dsi_pll_enabled(dev_priv);
2034
		else
2327 Serge 2035
		assert_pll_enabled(dev_priv, pipe);
2036
	else {
5060 serge 2037
		if (crtc->config.has_pch_encoder) {
2327 Serge 2038
			/* if driving the PCH, we need FDI enabled */
3243 Serge 2039
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 2040
			assert_fdi_tx_pll_enabled(dev_priv,
2041
						  (enum pipe) cpu_transcoder);
2327 Serge 2042
		}
2043
		/* FIXME: assert CPU port conditions for SNB+ */
2044
	}
2045
 
3243 Serge 2046
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2047
	val = I915_READ(reg);
5060 serge 2048
	if (val & PIPECONF_ENABLE) {
5354 serge 2049
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2050
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2327 Serge 2051
		return;
5060 serge 2052
	}
2327 Serge 2053
 
2054
	I915_WRITE(reg, val | PIPECONF_ENABLE);
5060 serge 2055
	POSTING_READ(reg);
2327 Serge 2056
}
2057
 
2058
/**
2059
 * intel_disable_pipe - disable a pipe, asserting requirements
5354 serge 2060
 * @crtc: crtc whose pipes is to be disabled
2327 Serge 2061
 *
5354 serge 2062
 * Disable the pipe of @crtc, making sure that various hardware
2063
 * specific requirements are met, if applicable, e.g. plane
2064
 * disabled, panel fitter off, etc.
2327 Serge 2065
 *
2066
 * Will wait until the pipe has shut down before returning.
2067
 */
5354 serge 2068
static void intel_disable_pipe(struct intel_crtc *crtc)
2327 Serge 2069
{
5354 serge 2070
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2071
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2072
	enum pipe pipe = crtc->pipe;
2327 Serge 2073
	int reg;
2074
	u32 val;
2075
 
3031 serge 2076
    /*
2327 Serge 2077
	 * Make sure planes won't keep trying to pump pixels to us,
2078
	 * or we might hang the display.
2079
	 */
2080
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2081
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 2082
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 2083
 
3243 Serge 2084
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2085
	val = I915_READ(reg);
2086
	if ((val & PIPECONF_ENABLE) == 0)
2087
		return;
2088
 
5354 serge 2089
	/*
2090
	 * Double wide has implications for planes
2091
	 * so best keep it disabled when not needed.
2092
	 */
2093
	if (crtc->config.double_wide)
2094
		val &= ~PIPECONF_DOUBLE_WIDE;
2095
 
2096
	/* Don't disable pipe or pipe PLLs if needed */
2097
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2098
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2099
		val &= ~PIPECONF_ENABLE;
2100
 
2101
	I915_WRITE(reg, val);
2102
	if ((val & PIPECONF_ENABLE) == 0)
2103
		intel_wait_for_pipe_off(crtc);
2327 Serge 2104
}
2105
 
2106
/*
2107
 * Plane regs are double buffered, going from enabled->disabled needs a
2108
 * trigger in order to latch.  The display address reg provides this.
2109
 */
4560 Serge 2110
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2327 Serge 2111
				      enum plane plane)
2112
{
5060 serge 2113
	struct drm_device *dev = dev_priv->dev;
2114
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
4560 Serge 2115
 
2116
	I915_WRITE(reg, I915_READ(reg));
2117
	POSTING_READ(reg);
2327 Serge 2118
}
2119
 
2120
/**
5060 serge 2121
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
5354 serge 2122
 * @plane:  plane to be enabled
2123
 * @crtc: crtc for the plane
2327 Serge 2124
 *
5354 serge 2125
 * Enable @plane on @crtc, making sure that the pipe is running first.
2327 Serge 2126
 */
5354 serge 2127
static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2128
					  struct drm_crtc *crtc)
2327 Serge 2129
{
5354 serge 2130
	struct drm_device *dev = plane->dev;
2131
	struct drm_i915_private *dev_priv = dev->dev_private;
2132
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 2133
 
2134
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
5354 serge 2135
	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2327 Serge 2136
 
5060 serge 2137
	if (intel_crtc->primary_enabled)
2138
		return;
4560 Serge 2139
 
2140
	intel_crtc->primary_enabled = true;
2141
 
5354 serge 2142
	dev_priv->display.update_primary_plane(crtc, plane->fb,
2143
					       crtc->x, crtc->y);
2327 Serge 2144
 
5354 serge 2145
	/*
2146
	 * BDW signals flip done immediately if the plane
2147
	 * is disabled, even if the plane enable is already
2148
	 * armed to occur at the next vblank :(
2149
	 */
2150
	if (IS_BROADWELL(dev))
2151
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 2152
}
2153
 
2154
/**
5060 serge 2155
 * intel_disable_primary_hw_plane - disable the primary hardware plane
5354 serge 2156
 * @plane: plane to be disabled
2157
 * @crtc: crtc for the plane
2327 Serge 2158
 *
5354 serge 2159
 * Disable @plane on @crtc, making sure that the pipe is running first.
2327 Serge 2160
 */
5354 serge 2161
static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2162
					   struct drm_crtc *crtc)
2327 Serge 2163
{
5354 serge 2164
	struct drm_device *dev = plane->dev;
2165
	struct drm_i915_private *dev_priv = dev->dev_private;
2166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 2167
 
5354 serge 2168
	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2169
 
5060 serge 2170
	if (!intel_crtc->primary_enabled)
2171
		return;
4560 Serge 2172
 
2173
	intel_crtc->primary_enabled = false;
2174
 
5354 serge 2175
	dev_priv->display.update_primary_plane(crtc, plane->fb,
2176
					       crtc->x, crtc->y);
2327 Serge 2177
}
2178
 
3746 Serge 2179
static bool need_vtd_wa(struct drm_device *dev)
2180
{
2181
#ifdef CONFIG_INTEL_IOMMU
2182
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2183
		return true;
2184
#endif
2185
	return false;
2186
}
2187
 
5060 serge 2188
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2189
{
2190
	int tile_height;
2191
 
2192
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2193
	return ALIGN(height, tile_height);
2194
}
2195
 
2335 Serge 2196
int
5354 serge 2197
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2198
			   struct drm_framebuffer *fb,
5060 serge 2199
			   struct intel_engine_cs *pipelined)
2335 Serge 2200
{
5354 serge 2201
	struct drm_device *dev = fb->dev;
2335 Serge 2202
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2203
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2335 Serge 2204
	u32 alignment;
2205
	int ret;
2327 Serge 2206
 
5060 serge 2207
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2208
 
2335 Serge 2209
	switch (obj->tiling_mode) {
2210
	case I915_TILING_NONE:
5354 serge 2211
		if (INTEL_INFO(dev)->gen >= 9)
2212
			alignment = 256 * 1024;
2213
		else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2335 Serge 2214
			alignment = 128 * 1024;
2215
		else if (INTEL_INFO(dev)->gen >= 4)
2216
			alignment = 4 * 1024;
2217
		else
2218
			alignment = 64 * 1024;
2219
		break;
2220
	case I915_TILING_X:
5354 serge 2221
		if (INTEL_INFO(dev)->gen >= 9)
2222
			alignment = 256 * 1024;
2223
		else {
2335 Serge 2224
		/* pin() will align the object as required by fence */
2225
		alignment = 0;
5354 serge 2226
		}
2335 Serge 2227
		break;
2228
	case I915_TILING_Y:
4560 Serge 2229
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2335 Serge 2230
		return -EINVAL;
2231
	default:
2232
		BUG();
2233
	}
2327 Serge 2234
 
3746 Serge 2235
	/* Note that the w/a also requires 64 PTE of padding following the
2236
	 * bo. We currently fill all unused PTE with the shadow page and so
2237
	 * we should always have valid PTE following the scanout preventing
2238
	 * the VT-d warning.
2239
	 */
2240
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2241
		alignment = 256 * 1024;
2242
 
5097 serge 2243
	/*
2244
	 * Global gtt pte registers are special registers which actually forward
2245
	 * writes to a chunk of system memory. Which means that there is no risk
2246
	 * that the register values disappear as soon as we call
2247
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2248
	 * pin/unpin/fence and not more.
2249
	 */
2250
	intel_runtime_pm_get(dev_priv);
2251
 
2335 Serge 2252
	dev_priv->mm.interruptible = false;
2253
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2254
	if (ret)
2255
		goto err_interruptible;
2327 Serge 2256
 
2335 Serge 2257
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2258
	 * fence, whereas 965+ only requires a fence if using
2259
	 * framebuffer compression.  For simplicity, we always install
2260
	 * a fence as the cost is not that onerous.
2261
	 */
3480 Serge 2262
	ret = i915_gem_object_get_fence(obj);
2263
	if (ret)
2264
		goto err_unpin;
2327 Serge 2265
 
3480 Serge 2266
	i915_gem_object_pin_fence(obj);
2267
 
2335 Serge 2268
	dev_priv->mm.interruptible = true;
5097 serge 2269
	intel_runtime_pm_put(dev_priv);
2335 Serge 2270
	return 0;
2327 Serge 2271
 
2335 Serge 2272
err_unpin:
4104 Serge 2273
	i915_gem_object_unpin_from_display_plane(obj);
2335 Serge 2274
err_interruptible:
2275
	dev_priv->mm.interruptible = true;
5097 serge 2276
	intel_runtime_pm_put(dev_priv);
2335 Serge 2277
	return ret;
2278
}
2327 Serge 2279
 
3031 serge 2280
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2281
{
5060 serge 2282
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2283
 
2284
	i915_gem_object_unpin_fence(obj);
2285
//	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 2286
}
2287
 
2288
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2289
 * is assumed to be a power-of-two. */
3480 Serge 2290
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2291
					     unsigned int tiling_mode,
2292
					     unsigned int cpp,
3031 serge 2293
							unsigned int pitch)
2294
{
3480 Serge 2295
	if (tiling_mode != I915_TILING_NONE) {
2296
		unsigned int tile_rows, tiles;
3031 serge 2297
 
2298
	tile_rows = *y / 8;
2299
	*y %= 8;
2300
 
3480 Serge 2301
		tiles = *x / (512/cpp);
2302
		*x %= 512/cpp;
2303
 
3031 serge 2304
	return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 2305
	} else {
2306
		unsigned int offset;
2307
 
2308
		offset = *y * pitch + *x * cpp;
2309
		*y = 0;
2310
		*x = (offset & 4095) / cpp;
2311
		return offset & -4096;
2312
	}
3031 serge 2313
}
2314
 
5060 serge 2315
int intel_format_to_fourcc(int format)
2327 Serge 2316
{
5060 serge 2317
	switch (format) {
2318
	case DISPPLANE_8BPP:
2319
		return DRM_FORMAT_C8;
2320
	case DISPPLANE_BGRX555:
2321
		return DRM_FORMAT_XRGB1555;
2322
	case DISPPLANE_BGRX565:
2323
		return DRM_FORMAT_RGB565;
2324
	default:
2325
	case DISPPLANE_BGRX888:
2326
		return DRM_FORMAT_XRGB8888;
2327
	case DISPPLANE_RGBX888:
2328
		return DRM_FORMAT_XBGR8888;
2329
	case DISPPLANE_BGRX101010:
2330
		return DRM_FORMAT_XRGB2101010;
2331
	case DISPPLANE_RGBX101010:
2332
		return DRM_FORMAT_XBGR2101010;
2333
	}
2334
}
2335
 
2336
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2337
				  struct intel_plane_config *plane_config)
2338
{
2339
	struct drm_device *dev = crtc->base.dev;
2340
	struct drm_i915_gem_object *obj = NULL;
2341
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2342
	u32 base = plane_config->base;
2343
 
2344
	if (plane_config->size == 0)
2345
		return false;
2346
 
2347
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2348
							     plane_config->size);
2349
	if (!obj)
2350
		return false;
2351
 
5367 serge 2352
    obj->map_and_fenceable=true;
5060 serge 2353
    main_fb_obj = obj;
2354
 
2355
	if (plane_config->tiled) {
2356
		obj->tiling_mode = I915_TILING_X;
2357
		obj->stride = crtc->base.primary->fb->pitches[0];
2358
	}
2359
 
2360
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2361
	mode_cmd.width = crtc->base.primary->fb->width;
2362
	mode_cmd.height = crtc->base.primary->fb->height;
2363
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2364
 
2365
	mutex_lock(&dev->struct_mutex);
2366
 
2367
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2368
				   &mode_cmd, obj)) {
2369
		DRM_DEBUG_KMS("intel fb init failed\n");
2370
		goto out_unref_obj;
2371
	}
2372
 
2373
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2374
	mutex_unlock(&dev->struct_mutex);
2375
 
2376
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2377
	return true;
2378
 
2379
out_unref_obj:
2380
	drm_gem_object_unreference(&obj->base);
2381
	mutex_unlock(&dev->struct_mutex);
2382
	return false;
2383
}
2384
 
2385
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2386
				 struct intel_plane_config *plane_config)
2387
{
2388
	struct drm_device *dev = intel_crtc->base.dev;
5354 serge 2389
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2390
	struct drm_crtc *c;
2391
	struct intel_crtc *i;
2392
	struct drm_i915_gem_object *obj;
2393
 
2394
	if (!intel_crtc->base.primary->fb)
2395
		return;
2396
 
2397
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2398
		return;
2399
 
2400
	kfree(intel_crtc->base.primary->fb);
2401
	intel_crtc->base.primary->fb = NULL;
2402
 
2403
	/*
2404
	 * Failed to alloc the obj, check to see if we should share
2405
	 * an fb with another CRTC instead
2406
	 */
2407
	for_each_crtc(dev, c) {
2408
		i = to_intel_crtc(c);
2409
 
2410
		if (c == &intel_crtc->base)
2411
			continue;
2412
 
2413
		if (!i->active)
2414
			continue;
2415
 
2416
		obj = intel_fb_obj(c->primary->fb);
2417
		if (obj == NULL)
2418
			continue;
2419
 
2420
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
5354 serge 2421
			if (obj->tiling_mode != I915_TILING_NONE)
2422
				dev_priv->preserve_bios_swizzle = true;
2423
 
5060 serge 2424
			drm_framebuffer_reference(c->primary->fb);
2425
			intel_crtc->base.primary->fb = c->primary->fb;
2426
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2427
			break;
2428
		}
2429
	}
2430
}
2431
 
2432
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2433
				     struct drm_framebuffer *fb,
2434
				     int x, int y)
2435
{
2327 Serge 2436
    struct drm_device *dev = crtc->dev;
2437
    struct drm_i915_private *dev_priv = dev->dev_private;
2438
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5354 serge 2439
	struct drm_i915_gem_object *obj;
2327 Serge 2440
    int plane = intel_crtc->plane;
3031 serge 2441
	unsigned long linear_offset;
2327 Serge 2442
    u32 dspcntr;
5354 serge 2443
	u32 reg = DSPCNTR(plane);
2444
	int pixel_size;
2327 Serge 2445
 
5354 serge 2446
	if (!intel_crtc->primary_enabled) {
2447
		I915_WRITE(reg, 0);
2448
		if (INTEL_INFO(dev)->gen >= 4)
2449
			I915_WRITE(DSPSURF(plane), 0);
2450
		else
2451
			I915_WRITE(DSPADDR(plane), 0);
2452
		POSTING_READ(reg);
2453
		return;
2454
	}
2455
 
2456
	obj = intel_fb_obj(fb);
2457
	if (WARN_ON(obj == NULL))
2458
		return;
2459
 
2460
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2461
 
2462
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2463
 
2464
	dspcntr |= DISPLAY_PLANE_ENABLE;
2465
 
2466
	if (INTEL_INFO(dev)->gen < 4) {
2467
		if (intel_crtc->pipe == PIPE_B)
2468
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2469
 
2470
		/* pipesrc and dspsize control the size that is scaled from,
2471
		 * which should always be the user's requested size.
2472
		 */
2473
		I915_WRITE(DSPSIZE(plane),
2474
			   ((intel_crtc->config.pipe_src_h - 1) << 16) |
2475
			   (intel_crtc->config.pipe_src_w - 1));
2476
		I915_WRITE(DSPPOS(plane), 0);
2477
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2478
		I915_WRITE(PRIMSIZE(plane),
2479
			   ((intel_crtc->config.pipe_src_h - 1) << 16) |
2480
			   (intel_crtc->config.pipe_src_w - 1));
2481
		I915_WRITE(PRIMPOS(plane), 0);
2482
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2483
	}
2484
 
3243 Serge 2485
	switch (fb->pixel_format) {
2486
	case DRM_FORMAT_C8:
2327 Serge 2487
        dspcntr |= DISPPLANE_8BPP;
2488
        break;
3243 Serge 2489
	case DRM_FORMAT_XRGB1555:
2490
	case DRM_FORMAT_ARGB1555:
2491
		dspcntr |= DISPPLANE_BGRX555;
2492
		break;
2493
	case DRM_FORMAT_RGB565:
2494
		dspcntr |= DISPPLANE_BGRX565;
2495
		break;
2496
	case DRM_FORMAT_XRGB8888:
2497
	case DRM_FORMAT_ARGB8888:
2498
		dspcntr |= DISPPLANE_BGRX888;
2499
		break;
2500
	case DRM_FORMAT_XBGR8888:
2501
	case DRM_FORMAT_ABGR8888:
2502
		dspcntr |= DISPPLANE_RGBX888;
2503
		break;
2504
	case DRM_FORMAT_XRGB2101010:
2505
	case DRM_FORMAT_ARGB2101010:
2506
		dspcntr |= DISPPLANE_BGRX101010;
2327 Serge 2507
        break;
3243 Serge 2508
	case DRM_FORMAT_XBGR2101010:
2509
	case DRM_FORMAT_ABGR2101010:
2510
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2511
        break;
2512
    default:
3746 Serge 2513
		BUG();
2327 Serge 2514
    }
3243 Serge 2515
 
5354 serge 2516
	if (INTEL_INFO(dev)->gen >= 4 &&
2517
	    obj->tiling_mode != I915_TILING_NONE)
2327 Serge 2518
            dspcntr |= DISPPLANE_TILED;
2519
 
4104 Serge 2520
	if (IS_G4X(dev))
2521
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2522
 
5354 serge 2523
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2327 Serge 2524
 
3031 serge 2525
	if (INTEL_INFO(dev)->gen >= 4) {
2526
		intel_crtc->dspaddr_offset =
3480 Serge 2527
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
5354 serge 2528
						       pixel_size,
3031 serge 2529
							   fb->pitches[0]);
2530
		linear_offset -= intel_crtc->dspaddr_offset;
2531
	} else {
2532
		intel_crtc->dspaddr_offset = linear_offset;
2533
	}
2534
 
5354 serge 2535
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2536
		dspcntr |= DISPPLANE_ROTATE_180;
2537
 
2538
		x += (intel_crtc->config.pipe_src_w - 1);
2539
		y += (intel_crtc->config.pipe_src_h - 1);
2540
 
2541
		/* Finding the last pixel of the last line of the display
2542
		data and adding to linear_offset*/
2543
		linear_offset +=
2544
			(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2545
			(intel_crtc->config.pipe_src_w - 1) * pixel_size;
2546
	}
2547
 
2548
	I915_WRITE(reg, dspcntr);
2549
 
4104 Serge 2550
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2551
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2552
		      fb->pitches[0]);
2342 Serge 2553
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2554
    if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2555
		I915_WRITE(DSPSURF(plane),
4104 Serge 2556
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 2557
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2558
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2327 Serge 2559
    } else
4104 Serge 2560
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2327 Serge 2561
    POSTING_READ(reg);
2562
}
2563
 
5060 serge 2564
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2565
					 struct drm_framebuffer *fb,
2566
					 int x, int y)
2327 Serge 2567
{
2568
    struct drm_device *dev = crtc->dev;
2569
    struct drm_i915_private *dev_priv = dev->dev_private;
2570
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5354 serge 2571
	struct drm_i915_gem_object *obj;
2327 Serge 2572
    int plane = intel_crtc->plane;
3031 serge 2573
	unsigned long linear_offset;
2327 Serge 2574
    u32 dspcntr;
5354 serge 2575
	u32 reg = DSPCNTR(plane);
2576
	int pixel_size;
2327 Serge 2577
 
5354 serge 2578
	if (!intel_crtc->primary_enabled) {
2579
		I915_WRITE(reg, 0);
2580
		I915_WRITE(DSPSURF(plane), 0);
2581
		POSTING_READ(reg);
2582
		return;
2583
	}
2584
 
2585
	obj = intel_fb_obj(fb);
2586
	if (WARN_ON(obj == NULL))
2587
		return;
2588
 
2589
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2590
 
2591
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2592
 
2593
	dspcntr |= DISPLAY_PLANE_ENABLE;
2594
 
2595
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2596
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2597
 
3243 Serge 2598
	switch (fb->pixel_format) {
2599
	case DRM_FORMAT_C8:
2327 Serge 2600
        dspcntr |= DISPPLANE_8BPP;
2601
        break;
3243 Serge 2602
	case DRM_FORMAT_RGB565:
2603
		dspcntr |= DISPPLANE_BGRX565;
2327 Serge 2604
        break;
3243 Serge 2605
	case DRM_FORMAT_XRGB8888:
2606
	case DRM_FORMAT_ARGB8888:
2607
		dspcntr |= DISPPLANE_BGRX888;
2608
		break;
2609
	case DRM_FORMAT_XBGR8888:
2610
	case DRM_FORMAT_ABGR8888:
2611
		dspcntr |= DISPPLANE_RGBX888;
2612
		break;
2613
	case DRM_FORMAT_XRGB2101010:
2614
	case DRM_FORMAT_ARGB2101010:
2615
		dspcntr |= DISPPLANE_BGRX101010;
2616
		break;
2617
	case DRM_FORMAT_XBGR2101010:
2618
	case DRM_FORMAT_ABGR2101010:
2619
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2620
        break;
2621
    default:
3746 Serge 2622
		BUG();
2327 Serge 2623
    }
2624
 
3480 Serge 2625
	if (obj->tiling_mode != I915_TILING_NONE)
2626
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2627
 
5354 serge 2628
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2327 Serge 2629
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2630
 
5354 serge 2631
	linear_offset = y * fb->pitches[0] + x * pixel_size;
3031 serge 2632
	intel_crtc->dspaddr_offset =
3480 Serge 2633
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
5354 serge 2634
					       pixel_size,
3031 serge 2635
						   fb->pitches[0]);
2636
	linear_offset -= intel_crtc->dspaddr_offset;
5354 serge 2637
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2638
		dspcntr |= DISPPLANE_ROTATE_180;
2327 Serge 2639
 
5354 serge 2640
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2641
			x += (intel_crtc->config.pipe_src_w - 1);
2642
			y += (intel_crtc->config.pipe_src_h - 1);
2643
 
2644
			/* Finding the last pixel of the last line of the display
2645
			data and adding to linear_offset*/
2646
			linear_offset +=
2647
				(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2648
				(intel_crtc->config.pipe_src_w - 1) * pixel_size;
2649
		}
2650
	}
2651
 
2652
	I915_WRITE(reg, dspcntr);
2653
 
4104 Serge 2654
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2655
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2656
		      fb->pitches[0]);
2342 Serge 2657
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2658
	I915_WRITE(DSPSURF(plane),
4104 Serge 2659
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2660
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2661
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2662
	} else {
2330 Serge 2663
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2664
	I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2665
	}
2330 Serge 2666
	POSTING_READ(reg);
2327 Serge 2667
}
2668
 
5354 serge 2669
static void skylake_update_primary_plane(struct drm_crtc *crtc,
2670
					 struct drm_framebuffer *fb,
2671
					 int x, int y)
2672
{
2673
	struct drm_device *dev = crtc->dev;
2674
	struct drm_i915_private *dev_priv = dev->dev_private;
2675
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676
	struct intel_framebuffer *intel_fb;
2677
	struct drm_i915_gem_object *obj;
2678
	int pipe = intel_crtc->pipe;
2679
	u32 plane_ctl, stride;
2680
 
2681
	if (!intel_crtc->primary_enabled) {
2682
		I915_WRITE(PLANE_CTL(pipe, 0), 0);
2683
		I915_WRITE(PLANE_SURF(pipe, 0), 0);
2684
		POSTING_READ(PLANE_CTL(pipe, 0));
2685
		return;
2686
	}
2687
 
2688
	plane_ctl = PLANE_CTL_ENABLE |
2689
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
2690
		    PLANE_CTL_PIPE_CSC_ENABLE;
2691
 
2692
	switch (fb->pixel_format) {
2693
	case DRM_FORMAT_RGB565:
2694
		plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2695
		break;
2696
	case DRM_FORMAT_XRGB8888:
2697
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2698
		break;
2699
	case DRM_FORMAT_XBGR8888:
2700
		plane_ctl |= PLANE_CTL_ORDER_RGBX;
2701
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2702
		break;
2703
	case DRM_FORMAT_XRGB2101010:
2704
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2705
		break;
2706
	case DRM_FORMAT_XBGR2101010:
2707
		plane_ctl |= PLANE_CTL_ORDER_RGBX;
2708
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2709
		break;
2710
	default:
2711
		BUG();
2712
	}
2713
 
2714
	intel_fb = to_intel_framebuffer(fb);
2715
	obj = intel_fb->obj;
2716
 
2717
	/*
2718
	 * The stride is either expressed as a multiple of 64 bytes chunks for
2719
	 * linear buffers or in number of tiles for tiled buffers.
2720
	 */
2721
	switch (obj->tiling_mode) {
2722
	case I915_TILING_NONE:
2723
		stride = fb->pitches[0] >> 6;
2724
		break;
2725
	case I915_TILING_X:
2726
		plane_ctl |= PLANE_CTL_TILED_X;
2727
		stride = fb->pitches[0] >> 9;
2728
		break;
2729
	default:
2730
		BUG();
2731
	}
2732
 
2733
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2734
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2735
		plane_ctl |= PLANE_CTL_ROTATE_180;
2736
 
2737
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2738
 
2739
	DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2740
		      i915_gem_obj_ggtt_offset(obj),
2741
		      x, y, fb->width, fb->height,
2742
		      fb->pitches[0]);
2743
 
2744
	I915_WRITE(PLANE_POS(pipe, 0), 0);
2745
	I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2746
	I915_WRITE(PLANE_SIZE(pipe, 0),
2747
		   (intel_crtc->config.pipe_src_h - 1) << 16 |
2748
		   (intel_crtc->config.pipe_src_w - 1));
2749
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2750
	I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2751
 
2752
	POSTING_READ(PLANE_SURF(pipe, 0));
2753
}
2754
 
2327 Serge 2755
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2756
static int
2757
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2758
			   int x, int y, enum mode_set_atomic state)
2759
{
2760
	struct drm_device *dev = crtc->dev;
2761
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2762
 
2763
	if (dev_priv->display.disable_fbc)
2764
		dev_priv->display.disable_fbc(dev);
2765
 
5060 serge 2766
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2767
 
2768
	return 0;
3031 serge 2769
}
2770
 
2771
#if 0
5354 serge 2772
static void intel_complete_page_flips(struct drm_device *dev)
4104 Serge 2773
{
2774
	struct drm_crtc *crtc;
2775
 
5060 serge 2776
	for_each_crtc(dev, crtc) {
4104 Serge 2777
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2778
		enum plane plane = intel_crtc->plane;
2779
 
2780
		intel_prepare_page_flip(dev, plane);
2781
		intel_finish_page_flip_plane(dev, plane);
2782
	}
5354 serge 2783
}
4104 Serge 2784
 
5354 serge 2785
static void intel_update_primary_planes(struct drm_device *dev)
2786
{
2787
	struct drm_i915_private *dev_priv = dev->dev_private;
2788
	struct drm_crtc *crtc;
2789
 
5060 serge 2790
	for_each_crtc(dev, crtc) {
4104 Serge 2791
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2792
 
5060 serge 2793
		drm_modeset_lock(&crtc->mutex, NULL);
4560 Serge 2794
		/*
2795
		 * FIXME: Once we have proper support for primary planes (and
2796
		 * disabling them without disabling the entire crtc) allow again
5060 serge 2797
		 * a NULL crtc->primary->fb.
4560 Serge 2798
		 */
5060 serge 2799
		if (intel_crtc->active && crtc->primary->fb)
2800
			dev_priv->display.update_primary_plane(crtc,
2801
							       crtc->primary->fb,
2802
							       crtc->x,
2803
							       crtc->y);
2804
		drm_modeset_unlock(&crtc->mutex);
4104 Serge 2805
	}
2806
}
2807
 
5354 serge 2808
void intel_prepare_reset(struct drm_device *dev)
2809
{
2810
	struct drm_i915_private *dev_priv = to_i915(dev);
2811
	struct intel_crtc *crtc;
2812
 
2813
	/* no reset support for gen2 */
2814
	if (IS_GEN2(dev))
2815
		return;
2816
 
2817
	/* reset doesn't touch the display */
2818
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2819
		return;
2820
 
2821
	drm_modeset_lock_all(dev);
2822
 
2823
	/*
2824
	 * Disabling the crtcs gracefully seems nicer. Also the
2825
	 * g33 docs say we should at least disable all the planes.
2826
	 */
2827
	for_each_intel_crtc(dev, crtc) {
2828
		if (crtc->active)
2829
			dev_priv->display.crtc_disable(&crtc->base);
2830
	}
2831
}
2832
 
2833
void intel_finish_reset(struct drm_device *dev)
2834
{
2835
	struct drm_i915_private *dev_priv = to_i915(dev);
2836
 
2837
	/*
2838
	 * Flips in the rings will be nuked by the reset,
2839
	 * so complete all pending flips so that user space
2840
	 * will get its events and not get stuck.
2841
	 */
2842
	intel_complete_page_flips(dev);
2843
 
2844
	/* no reset support for gen2 */
2845
	if (IS_GEN2(dev))
2846
		return;
2847
 
2848
	/* reset doesn't touch the display */
2849
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2850
		/*
2851
		 * Flips in the rings have been nuked by the reset,
2852
		 * so update the base address of all primary
2853
		 * planes to the the last fb to make sure we're
2854
		 * showing the correct fb after a reset.
2855
		 */
2856
		intel_update_primary_planes(dev);
2857
		return;
2858
	}
2859
 
2860
	/*
2861
	 * The display has been reset as well,
2862
	 * so need a full re-initialization.
2863
	 */
2864
	intel_runtime_pm_disable_interrupts(dev_priv);
2865
	intel_runtime_pm_enable_interrupts(dev_priv);
2866
 
2867
	intel_modeset_init_hw(dev);
2868
 
2869
	spin_lock_irq(&dev_priv->irq_lock);
2870
	if (dev_priv->display.hpd_irq_setup)
2871
		dev_priv->display.hpd_irq_setup(dev);
2872
	spin_unlock_irq(&dev_priv->irq_lock);
2873
 
2874
	intel_modeset_setup_hw_state(dev, true);
2875
 
2876
	intel_hpd_init(dev_priv);
2877
 
2878
	drm_modeset_unlock_all(dev);
2879
}
2880
 
3031 serge 2881
static int
2882
intel_finish_fb(struct drm_framebuffer *old_fb)
2883
{
5060 serge 2884
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3031 serge 2885
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2886
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 2887
	int ret;
2888
 
3031 serge 2889
	/* Big Hammer, we also need to ensure that any pending
2890
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2891
	 * current scanout is retired before unpinning the old
2892
	 * framebuffer.
2893
	 *
2894
	 * This should only fail upon a hung GPU, in which case we
2895
	 * can safely continue.
2896
	 */
2897
	dev_priv->mm.interruptible = false;
2898
	ret = i915_gem_object_finish_gpu(obj);
2899
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 2900
 
3031 serge 2901
	return ret;
2327 Serge 2902
}
4104 Serge 2903
 
5060 serge 2904
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
4104 Serge 2905
{
2906
	struct drm_device *dev = crtc->dev;
5060 serge 2907
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2908
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2909
	bool pending;
4104 Serge 2910
 
5060 serge 2911
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2912
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2913
		return false;
4104 Serge 2914
 
5354 serge 2915
	spin_lock_irq(&dev->event_lock);
5060 serge 2916
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
5354 serge 2917
	spin_unlock_irq(&dev->event_lock);
4104 Serge 2918
 
5060 serge 2919
	return pending;
4104 Serge 2920
}
3031 serge 2921
#endif
2327 Serge 2922
 
5354 serge 2923
static void intel_update_pipe_size(struct intel_crtc *crtc)
2924
{
2925
	struct drm_device *dev = crtc->base.dev;
2926
	struct drm_i915_private *dev_priv = dev->dev_private;
2927
	const struct drm_display_mode *adjusted_mode;
2928
 
2929
	if (!i915.fastboot)
2930
		return;
2931
 
2932
	/*
2933
	 * Update pipe size and adjust fitter if needed: the reason for this is
2934
	 * that in compute_mode_changes we check the native mode (not the pfit
2935
	 * mode) to see if we can flip rather than do a full mode set. In the
2936
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2937
	 * pfit state, we'll end up with a big fb scanned out into the wrong
2938
	 * sized surface.
2939
	 *
2940
	 * To fix this properly, we need to hoist the checks up into
2941
	 * compute_mode_changes (or above), check the actual pfit state and
2942
	 * whether the platform allows pfit disable with pipe active, and only
2943
	 * then update the pipesrc and pfit state, even on the flip path.
2944
	 */
2945
 
2946
	adjusted_mode = &crtc->config.adjusted_mode;
2947
 
2948
	I915_WRITE(PIPESRC(crtc->pipe),
2949
		   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2950
		   (adjusted_mode->crtc_vdisplay - 1));
2951
	if (!crtc->config.pch_pfit.enabled &&
2952
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2953
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2954
		I915_WRITE(PF_CTL(crtc->pipe), 0);
2955
		I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2956
		I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2957
	}
2958
	crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2959
	crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2960
}
2961
 
2327 Serge 2962
static int
2963
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
3031 serge 2964
		    struct drm_framebuffer *fb)
2327 Serge 2965
{
2966
	struct drm_device *dev = crtc->dev;
3031 serge 2967
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2968
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2969
	enum pipe pipe = intel_crtc->pipe;
2970
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2971
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2342 Serge 2972
	int ret;
2327 Serge 2973
 
5060 serge 2974
 
2327 Serge 2975
	/* no fb bound */
3031 serge 2976
	if (!fb) {
2327 Serge 2977
		DRM_ERROR("No FB bound\n");
2978
		return 0;
2979
	}
2980
 
3746 Serge 2981
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
4104 Serge 2982
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2983
			  plane_name(intel_crtc->plane),
3746 Serge 2984
				INTEL_INFO(dev)->num_pipes);
2327 Serge 2985
		return -EINVAL;
2986
	}
2987
 
2988
	mutex_lock(&dev->struct_mutex);
5354 serge 2989
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
5060 serge 2990
	if (ret == 0)
5354 serge 2991
		i915_gem_track_fb(old_obj, intel_fb_obj(fb),
5060 serge 2992
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2993
	mutex_unlock(&dev->struct_mutex);
4280 Serge 2994
    if (ret != 0) {
2995
       DRM_ERROR("pin & fence failed\n");
2996
       return ret;
2997
    }
2327 Serge 2998
 
5060 serge 2999
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2327 Serge 3000
 
5060 serge 3001
	if (intel_crtc->active)
3002
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
3003
 
3004
	crtc->primary->fb = fb;
3031 serge 3005
	crtc->x = x;
3006
	crtc->y = y;
3007
 
3008
	if (old_fb) {
4104 Serge 3009
		if (intel_crtc->active && old_fb != fb)
3031 serge 3010
		intel_wait_for_vblank(dev, intel_crtc->pipe);
5060 serge 3011
		mutex_lock(&dev->struct_mutex);
3012
		intel_unpin_fb_obj(old_obj);
3013
		mutex_unlock(&dev->struct_mutex);
3031 serge 3014
	}
3015
 
5060 serge 3016
	mutex_lock(&dev->struct_mutex);
3031 serge 3017
	intel_update_fbc(dev);
2336 Serge 3018
	mutex_unlock(&dev->struct_mutex);
2327 Serge 3019
 
2336 Serge 3020
    return 0;
2327 Serge 3021
}
3022
 
3023
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3024
{
3025
	struct drm_device *dev = crtc->dev;
3026
	struct drm_i915_private *dev_priv = dev->dev_private;
3027
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3028
	int pipe = intel_crtc->pipe;
3029
	u32 reg, temp;
3030
 
3031
	/* enable normal train */
3032
	reg = FDI_TX_CTL(pipe);
3033
	temp = I915_READ(reg);
3034
	if (IS_IVYBRIDGE(dev)) {
3035
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3036
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3037
	} else {
3038
		temp &= ~FDI_LINK_TRAIN_NONE;
3039
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3040
	}
3041
	I915_WRITE(reg, temp);
3042
 
3043
	reg = FDI_RX_CTL(pipe);
3044
	temp = I915_READ(reg);
3045
	if (HAS_PCH_CPT(dev)) {
3046
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3047
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3048
	} else {
3049
		temp &= ~FDI_LINK_TRAIN_NONE;
3050
		temp |= FDI_LINK_TRAIN_NONE;
3051
	}
3052
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3053
 
3054
	/* wait one idle pattern time */
3055
	POSTING_READ(reg);
3056
	udelay(1000);
3057
 
3058
	/* IVB wants error correction enabled */
3059
	if (IS_IVYBRIDGE(dev))
3060
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3061
			   FDI_FE_ERRC_ENABLE);
3062
}
3063
 
4280 Serge 3064
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
4104 Serge 3065
{
4280 Serge 3066
	return crtc->base.enabled && crtc->active &&
3067
		crtc->config.has_pch_encoder;
4104 Serge 3068
}
3069
 
3243 Serge 3070
static void ivb_modeset_global_resources(struct drm_device *dev)
2327 Serge 3071
{
3072
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3073
	struct intel_crtc *pipe_B_crtc =
3074
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3075
	struct intel_crtc *pipe_C_crtc =
3076
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3077
	uint32_t temp;
2327 Serge 3078
 
4104 Serge 3079
	/*
3080
	 * When everything is off disable fdi C so that we could enable fdi B
3081
	 * with all lanes. Note that we don't care about enabled pipes without
3082
	 * an enabled pch encoder.
3083
	 */
3084
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3085
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
3243 Serge 3086
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3087
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3088
 
3089
		temp = I915_READ(SOUTH_CHICKEN1);
3090
		temp &= ~FDI_BC_BIFURCATION_SELECT;
3091
		DRM_DEBUG_KMS("disabling fdi C rx\n");
3092
		I915_WRITE(SOUTH_CHICKEN1, temp);
3093
	}
2327 Serge 3094
}
3095
 
3096
/* The FDI link training functions for ILK/Ibexpeak. */
3097
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3098
{
3099
    struct drm_device *dev = crtc->dev;
3100
    struct drm_i915_private *dev_priv = dev->dev_private;
3101
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3102
    int pipe = intel_crtc->pipe;
3103
    u32 reg, temp, tries;
3104
 
5060 serge 3105
	/* FDI needs bits from pipe first */
2327 Serge 3106
    assert_pipe_enabled(dev_priv, pipe);
3107
 
3108
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3109
       for train result */
3110
    reg = FDI_RX_IMR(pipe);
3111
    temp = I915_READ(reg);
3112
    temp &= ~FDI_RX_SYMBOL_LOCK;
3113
    temp &= ~FDI_RX_BIT_LOCK;
3114
    I915_WRITE(reg, temp);
3115
    I915_READ(reg);
3116
    udelay(150);
3117
 
3118
    /* enable CPU FDI TX and PCH FDI RX */
3119
    reg = FDI_TX_CTL(pipe);
3120
    temp = I915_READ(reg);
4104 Serge 3121
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3122
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3123
    temp &= ~FDI_LINK_TRAIN_NONE;
3124
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3125
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3126
 
3127
    reg = FDI_RX_CTL(pipe);
3128
    temp = I915_READ(reg);
3129
    temp &= ~FDI_LINK_TRAIN_NONE;
3130
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3131
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3132
 
3133
    POSTING_READ(reg);
3134
    udelay(150);
3135
 
3136
    /* Ironlake workaround, enable clock pointer after FDI enable*/
3137
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3138
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3139
               FDI_RX_PHASE_SYNC_POINTER_EN);
3140
 
3141
    reg = FDI_RX_IIR(pipe);
3142
    for (tries = 0; tries < 5; tries++) {
3143
        temp = I915_READ(reg);
3144
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3145
 
3146
        if ((temp & FDI_RX_BIT_LOCK)) {
3147
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3148
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3149
            break;
3150
        }
3151
    }
3152
    if (tries == 5)
3153
        DRM_ERROR("FDI train 1 fail!\n");
3154
 
3155
    /* Train 2 */
3156
    reg = FDI_TX_CTL(pipe);
3157
    temp = I915_READ(reg);
3158
    temp &= ~FDI_LINK_TRAIN_NONE;
3159
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3160
    I915_WRITE(reg, temp);
3161
 
3162
    reg = FDI_RX_CTL(pipe);
3163
    temp = I915_READ(reg);
3164
    temp &= ~FDI_LINK_TRAIN_NONE;
3165
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3166
    I915_WRITE(reg, temp);
3167
 
3168
    POSTING_READ(reg);
3169
    udelay(150);
3170
 
3171
    reg = FDI_RX_IIR(pipe);
3172
    for (tries = 0; tries < 5; tries++) {
3173
        temp = I915_READ(reg);
3174
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3175
 
3176
        if (temp & FDI_RX_SYMBOL_LOCK) {
3177
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3178
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3179
            break;
3180
        }
3181
    }
3182
    if (tries == 5)
3183
        DRM_ERROR("FDI train 2 fail!\n");
3184
 
3185
    DRM_DEBUG_KMS("FDI train done\n");
3186
 
3187
}
3188
 
2342 Serge 3189
static const int snb_b_fdi_train_param[] = {
2327 Serge 3190
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3191
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3192
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3193
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3194
};
3195
 
3196
/* The FDI link training functions for SNB/Cougarpoint. */
3197
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3198
{
3199
    struct drm_device *dev = crtc->dev;
3200
    struct drm_i915_private *dev_priv = dev->dev_private;
3201
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3202
    int pipe = intel_crtc->pipe;
3031 serge 3203
	u32 reg, temp, i, retry;
2327 Serge 3204
 
3205
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3206
       for train result */
3207
    reg = FDI_RX_IMR(pipe);
3208
    temp = I915_READ(reg);
3209
    temp &= ~FDI_RX_SYMBOL_LOCK;
3210
    temp &= ~FDI_RX_BIT_LOCK;
3211
    I915_WRITE(reg, temp);
3212
 
3213
    POSTING_READ(reg);
3214
    udelay(150);
3215
 
3216
    /* enable CPU FDI TX and PCH FDI RX */
3217
    reg = FDI_TX_CTL(pipe);
3218
    temp = I915_READ(reg);
4104 Serge 3219
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3220
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3221
    temp &= ~FDI_LINK_TRAIN_NONE;
3222
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3223
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3224
    /* SNB-B */
3225
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3226
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3227
 
3243 Serge 3228
	I915_WRITE(FDI_RX_MISC(pipe),
3229
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3230
 
2327 Serge 3231
    reg = FDI_RX_CTL(pipe);
3232
    temp = I915_READ(reg);
3233
    if (HAS_PCH_CPT(dev)) {
3234
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3235
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3236
    } else {
3237
        temp &= ~FDI_LINK_TRAIN_NONE;
3238
        temp |= FDI_LINK_TRAIN_PATTERN_1;
3239
    }
3240
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3241
 
3242
    POSTING_READ(reg);
3243
    udelay(150);
3244
 
2342 Serge 3245
	for (i = 0; i < 4; i++) {
2327 Serge 3246
        reg = FDI_TX_CTL(pipe);
3247
        temp = I915_READ(reg);
3248
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3249
        temp |= snb_b_fdi_train_param[i];
3250
        I915_WRITE(reg, temp);
3251
 
3252
        POSTING_READ(reg);
3253
        udelay(500);
3254
 
3031 serge 3255
		for (retry = 0; retry < 5; retry++) {
2327 Serge 3256
        reg = FDI_RX_IIR(pipe);
3257
        temp = I915_READ(reg);
3258
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3259
        if (temp & FDI_RX_BIT_LOCK) {
3260
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3261
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3262
            break;
3263
        }
3031 serge 3264
			udelay(50);
3265
		}
3266
		if (retry < 5)
3267
			break;
2327 Serge 3268
    }
3269
    if (i == 4)
3270
        DRM_ERROR("FDI train 1 fail!\n");
3271
 
3272
    /* Train 2 */
3273
    reg = FDI_TX_CTL(pipe);
3274
    temp = I915_READ(reg);
3275
    temp &= ~FDI_LINK_TRAIN_NONE;
3276
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3277
    if (IS_GEN6(dev)) {
3278
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3279
        /* SNB-B */
3280
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3281
    }
3282
    I915_WRITE(reg, temp);
3283
 
3284
    reg = FDI_RX_CTL(pipe);
3285
    temp = I915_READ(reg);
3286
    if (HAS_PCH_CPT(dev)) {
3287
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3288
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3289
    } else {
3290
        temp &= ~FDI_LINK_TRAIN_NONE;
3291
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3292
    }
3293
    I915_WRITE(reg, temp);
3294
 
3295
    POSTING_READ(reg);
3296
    udelay(150);
3297
 
2342 Serge 3298
	for (i = 0; i < 4; i++) {
2327 Serge 3299
        reg = FDI_TX_CTL(pipe);
3300
        temp = I915_READ(reg);
3301
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3302
        temp |= snb_b_fdi_train_param[i];
3303
        I915_WRITE(reg, temp);
3304
 
3305
        POSTING_READ(reg);
3306
        udelay(500);
3307
 
3031 serge 3308
		for (retry = 0; retry < 5; retry++) {
2327 Serge 3309
        reg = FDI_RX_IIR(pipe);
3310
        temp = I915_READ(reg);
3311
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3312
        if (temp & FDI_RX_SYMBOL_LOCK) {
3313
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3314
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3315
            break;
3316
        }
3031 serge 3317
			udelay(50);
3318
		}
3319
		if (retry < 5)
3320
			break;
2327 Serge 3321
    }
3322
    if (i == 4)
3323
        DRM_ERROR("FDI train 2 fail!\n");
3324
 
3325
    DRM_DEBUG_KMS("FDI train done.\n");
3326
}
3327
 
3328
/* Manual link training for Ivy Bridge A0 parts */
3329
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3330
{
3331
    struct drm_device *dev = crtc->dev;
3332
    struct drm_i915_private *dev_priv = dev->dev_private;
3333
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3334
    int pipe = intel_crtc->pipe;
4104 Serge 3335
	u32 reg, temp, i, j;
2327 Serge 3336
 
3337
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3338
       for train result */
3339
    reg = FDI_RX_IMR(pipe);
3340
    temp = I915_READ(reg);
3341
    temp &= ~FDI_RX_SYMBOL_LOCK;
3342
    temp &= ~FDI_RX_BIT_LOCK;
3343
    I915_WRITE(reg, temp);
3344
 
3345
    POSTING_READ(reg);
3346
    udelay(150);
3347
 
3243 Serge 3348
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3349
		      I915_READ(FDI_RX_IIR(pipe)));
3350
 
4104 Serge 3351
	/* Try each vswing and preemphasis setting twice before moving on */
3352
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3353
		/* disable first in case we need to retry */
3354
		reg = FDI_TX_CTL(pipe);
3355
		temp = I915_READ(reg);
3356
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3357
		temp &= ~FDI_TX_ENABLE;
3358
		I915_WRITE(reg, temp);
3359
 
3360
		reg = FDI_RX_CTL(pipe);
3361
		temp = I915_READ(reg);
3362
		temp &= ~FDI_LINK_TRAIN_AUTO;
3363
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3364
		temp &= ~FDI_RX_ENABLE;
3365
		I915_WRITE(reg, temp);
3366
 
2327 Serge 3367
    /* enable CPU FDI TX and PCH FDI RX */
3368
    reg = FDI_TX_CTL(pipe);
3369
    temp = I915_READ(reg);
4104 Serge 3370
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3371
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3372
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3373
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 3374
		temp |= snb_b_fdi_train_param[j/2];
2342 Serge 3375
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 3376
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3377
 
3243 Serge 3378
	I915_WRITE(FDI_RX_MISC(pipe),
3379
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3380
 
2327 Serge 3381
    reg = FDI_RX_CTL(pipe);
3382
    temp = I915_READ(reg);
3383
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 3384
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 3385
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3386
 
3387
    POSTING_READ(reg);
4104 Serge 3388
		udelay(1); /* should be 0.5us */
2327 Serge 3389
 
2342 Serge 3390
	for (i = 0; i < 4; i++) {
2327 Serge 3391
        reg = FDI_RX_IIR(pipe);
3392
        temp = I915_READ(reg);
3393
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3394
 
3395
        if (temp & FDI_RX_BIT_LOCK ||
3396
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3397
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 3398
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3399
					      i);
2327 Serge 3400
            break;
3401
        }
4104 Serge 3402
			udelay(1); /* should be 0.5us */
3403
		}
3404
		if (i == 4) {
3405
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3406
			continue;
2327 Serge 3407
    }
3408
 
3409
    /* Train 2 */
3410
    reg = FDI_TX_CTL(pipe);
3411
    temp = I915_READ(reg);
3412
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3413
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3414
    I915_WRITE(reg, temp);
3415
 
3416
    reg = FDI_RX_CTL(pipe);
3417
    temp = I915_READ(reg);
3418
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3419
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3420
    I915_WRITE(reg, temp);
3421
 
3422
    POSTING_READ(reg);
4104 Serge 3423
		udelay(2); /* should be 1.5us */
2327 Serge 3424
 
2342 Serge 3425
	for (i = 0; i < 4; i++) {
2327 Serge 3426
        reg = FDI_RX_IIR(pipe);
3427
        temp = I915_READ(reg);
3428
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3429
 
4104 Serge 3430
			if (temp & FDI_RX_SYMBOL_LOCK ||
3431
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2327 Serge 3432
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 3433
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3434
					      i);
3435
				goto train_done;
2327 Serge 3436
        }
4104 Serge 3437
			udelay(2); /* should be 1.5us */
2327 Serge 3438
    }
3439
    if (i == 4)
4104 Serge 3440
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3441
	}
2327 Serge 3442
 
4104 Serge 3443
train_done:
2327 Serge 3444
    DRM_DEBUG_KMS("FDI train done.\n");
3445
}
3446
 
3031 serge 3447
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 3448
{
3031 serge 3449
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 3450
	struct drm_i915_private *dev_priv = dev->dev_private;
3451
	int pipe = intel_crtc->pipe;
3452
	u32 reg, temp;
3453
 
3454
 
3455
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3456
	reg = FDI_RX_CTL(pipe);
3457
	temp = I915_READ(reg);
4104 Serge 3458
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3459
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3480 Serge 3460
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3461
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3462
 
3463
	POSTING_READ(reg);
3464
	udelay(200);
3465
 
3466
	/* Switch from Rawclk to PCDclk */
3467
	temp = I915_READ(reg);
3468
	I915_WRITE(reg, temp | FDI_PCDCLK);
3469
 
3470
	POSTING_READ(reg);
3471
	udelay(200);
3472
 
3473
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3474
	reg = FDI_TX_CTL(pipe);
3475
	temp = I915_READ(reg);
3476
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3477
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3478
 
3479
		POSTING_READ(reg);
3480
		udelay(100);
3481
	}
3482
}
3483
 
3031 serge 3484
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3485
{
3486
	struct drm_device *dev = intel_crtc->base.dev;
3487
	struct drm_i915_private *dev_priv = dev->dev_private;
3488
	int pipe = intel_crtc->pipe;
3489
	u32 reg, temp;
3490
 
3491
	/* Switch from PCDclk to Rawclk */
3492
	reg = FDI_RX_CTL(pipe);
3493
	temp = I915_READ(reg);
3494
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3495
 
3496
	/* Disable CPU FDI TX PLL */
3497
	reg = FDI_TX_CTL(pipe);
3498
	temp = I915_READ(reg);
3499
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3500
 
3501
	POSTING_READ(reg);
3502
	udelay(100);
3503
 
3504
	reg = FDI_RX_CTL(pipe);
3505
	temp = I915_READ(reg);
3506
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3507
 
3508
	/* Wait for the clocks to turn off. */
3509
	POSTING_READ(reg);
3510
	udelay(100);
3511
}
3512
 
2327 Serge 3513
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3514
{
3515
	struct drm_device *dev = crtc->dev;
3516
	struct drm_i915_private *dev_priv = dev->dev_private;
3517
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3518
	int pipe = intel_crtc->pipe;
3519
	u32 reg, temp;
3520
 
3521
	/* disable CPU FDI tx and PCH FDI rx */
3522
	reg = FDI_TX_CTL(pipe);
3523
	temp = I915_READ(reg);
3524
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3525
	POSTING_READ(reg);
3526
 
3527
	reg = FDI_RX_CTL(pipe);
3528
	temp = I915_READ(reg);
3529
	temp &= ~(0x7 << 16);
3480 Serge 3530
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3531
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3532
 
3533
	POSTING_READ(reg);
3534
	udelay(100);
3535
 
3536
	/* Ironlake workaround, disable clock pointer after downing FDI */
5060 serge 3537
	if (HAS_PCH_IBX(dev))
2327 Serge 3538
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3539
 
3540
	/* still set train pattern 1 */
3541
	reg = FDI_TX_CTL(pipe);
3542
	temp = I915_READ(reg);
3543
	temp &= ~FDI_LINK_TRAIN_NONE;
3544
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3545
	I915_WRITE(reg, temp);
3546
 
3547
	reg = FDI_RX_CTL(pipe);
3548
	temp = I915_READ(reg);
3549
	if (HAS_PCH_CPT(dev)) {
3550
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3551
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3552
	} else {
3553
		temp &= ~FDI_LINK_TRAIN_NONE;
3554
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3555
	}
3556
	/* BPC in FDI rx is consistent with that in PIPECONF */
3557
	temp &= ~(0x07 << 16);
3480 Serge 3558
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3559
	I915_WRITE(reg, temp);
3560
 
3561
	POSTING_READ(reg);
3562
	udelay(100);
3563
}
3564
 
5060 serge 3565
bool intel_has_pending_fb_unpin(struct drm_device *dev)
2327 Serge 3566
{
5060 serge 3567
	struct intel_crtc *crtc;
2327 Serge 3568
 
5060 serge 3569
	/* Note that we don't need to be called with mode_config.lock here
3570
	 * as our list of CRTC objects is static for the lifetime of the
3571
	 * device and so cannot disappear as we iterate. Similarly, we can
3572
	 * happily treat the predicates as racy, atomic checks as userspace
3573
	 * cannot claim and pin a new fb without at least acquring the
3574
	 * struct_mutex and so serialising with us.
3575
	 */
3576
	for_each_intel_crtc(dev, crtc) {
3577
		if (atomic_read(&crtc->unpin_work_count) == 0)
3578
			continue;
2327 Serge 3579
 
5060 serge 3580
		if (crtc->unpin_work)
3581
			intel_wait_for_vblank(dev, crtc->pipe);
3031 serge 3582
 
5060 serge 3583
		return true;
3584
	}
3585
 
3586
	return false;
2327 Serge 3587
}
3588
 
3031 serge 3589
#if 0
5060 serge 3590
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2327 Serge 3591
{
3031 serge 3592
	struct drm_device *dev = crtc->dev;
3593
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 3594
 
3480 Serge 3595
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5354 serge 3596
	if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3597
				       !intel_crtc_has_pending_flip(crtc),
3598
				       60*HZ) == 0)) {
3599
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3480 Serge 3600
 
5354 serge 3601
		spin_lock_irq(&dev->event_lock);
3602
		if (intel_crtc->unpin_work) {
3603
			WARN_ONCE(1, "Removing stuck page flip\n");
3604
			page_flip_completed(intel_crtc);
3605
		}
3606
		spin_unlock_irq(&dev->event_lock);
3607
	}
3031 serge 3608
 
5354 serge 3609
	if (crtc->primary->fb) {
3031 serge 3610
	mutex_lock(&dev->struct_mutex);
5060 serge 3611
	intel_finish_fb(crtc->primary->fb);
3031 serge 3612
	mutex_unlock(&dev->struct_mutex);
5354 serge 3613
	}
2327 Serge 3614
}
3031 serge 3615
#endif
2327 Serge 3616
 
3031 serge 3617
/* Program iCLKIP clock to the desired frequency */
3618
static void lpt_program_iclkip(struct drm_crtc *crtc)
3619
{
3620
	struct drm_device *dev = crtc->dev;
3621
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3622
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3031 serge 3623
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3624
	u32 temp;
3625
 
3480 Serge 3626
	mutex_lock(&dev_priv->dpio_lock);
3627
 
3031 serge 3628
	/* It is necessary to ungate the pixclk gate prior to programming
3629
	 * the divisors, and gate it back when it is done.
3630
	 */
3631
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3632
 
3633
	/* Disable SSCCTL */
3634
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 3635
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3636
				SBI_SSCCTL_DISABLE,
3637
			SBI_ICLK);
3031 serge 3638
 
3639
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 3640
	if (clock == 20000) {
3031 serge 3641
		auxdiv = 1;
3642
		divsel = 0x41;
3643
		phaseinc = 0x20;
3644
	} else {
3645
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 3646
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3647
		 * divisors, it is necessary to divide one by another, so we
3031 serge 3648
		 * convert the virtual clock precision to KHz here for higher
3649
		 * precision.
3650
		 */
3651
		u32 iclk_virtual_root_freq = 172800 * 1000;
3652
		u32 iclk_pi_range = 64;
3653
		u32 desired_divisor, msb_divisor_value, pi_value;
3654
 
4560 Serge 3655
		desired_divisor = (iclk_virtual_root_freq / clock);
3031 serge 3656
		msb_divisor_value = desired_divisor / iclk_pi_range;
3657
		pi_value = desired_divisor % iclk_pi_range;
3658
 
3659
		auxdiv = 0;
3660
		divsel = msb_divisor_value - 2;
3661
		phaseinc = pi_value;
3662
	}
3663
 
3664
	/* This should not happen with any sane values */
3665
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3666
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3667
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3668
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3669
 
3670
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 3671
			clock,
3031 serge 3672
			auxdiv,
3673
			divsel,
3674
			phasedir,
3675
			phaseinc);
3676
 
3677
	/* Program SSCDIVINTPHASE6 */
3243 Serge 3678
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 3679
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3680
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3681
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3682
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3683
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3684
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 3685
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 3686
 
3687
	/* Program SSCAUXDIV */
3243 Serge 3688
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 3689
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3690
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 3691
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 3692
 
3693
	/* Enable modulator and associated divider */
3243 Serge 3694
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 3695
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 3696
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 3697
 
3698
	/* Wait for initialization time */
3699
	udelay(24);
3700
 
3701
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3480 Serge 3702
 
3703
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 3704
}
3705
 
4104 Serge 3706
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3707
						enum pipe pch_transcoder)
3708
{
3709
	struct drm_device *dev = crtc->base.dev;
3710
	struct drm_i915_private *dev_priv = dev->dev_private;
3711
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3712
 
3713
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3714
		   I915_READ(HTOTAL(cpu_transcoder)));
3715
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3716
		   I915_READ(HBLANK(cpu_transcoder)));
3717
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3718
		   I915_READ(HSYNC(cpu_transcoder)));
3719
 
3720
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3721
		   I915_READ(VTOTAL(cpu_transcoder)));
3722
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3723
		   I915_READ(VBLANK(cpu_transcoder)));
3724
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3725
		   I915_READ(VSYNC(cpu_transcoder)));
3726
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3727
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3728
}
3729
 
4280 Serge 3730
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3731
{
3732
	struct drm_i915_private *dev_priv = dev->dev_private;
3733
	uint32_t temp;
3734
 
3735
	temp = I915_READ(SOUTH_CHICKEN1);
3736
	if (temp & FDI_BC_BIFURCATION_SELECT)
3737
		return;
3738
 
3739
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3740
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3741
 
3742
	temp |= FDI_BC_BIFURCATION_SELECT;
3743
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3744
	I915_WRITE(SOUTH_CHICKEN1, temp);
3745
	POSTING_READ(SOUTH_CHICKEN1);
3746
}
3747
 
3748
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3749
{
3750
	struct drm_device *dev = intel_crtc->base.dev;
3751
	struct drm_i915_private *dev_priv = dev->dev_private;
3752
 
3753
	switch (intel_crtc->pipe) {
3754
	case PIPE_A:
3755
		break;
3756
	case PIPE_B:
3757
		if (intel_crtc->config.fdi_lanes > 2)
3758
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3759
		else
3760
			cpt_enable_fdi_bc_bifurcation(dev);
3761
 
3762
		break;
3763
	case PIPE_C:
3764
		cpt_enable_fdi_bc_bifurcation(dev);
3765
 
3766
		break;
3767
	default:
3768
		BUG();
3769
	}
3770
}
3771
 
2327 Serge 3772
/*
3773
 * Enable PCH resources required for PCH ports:
3774
 *   - PCH PLLs
3775
 *   - FDI training & RX/TX
3776
 *   - update transcoder timings
3777
 *   - DP transcoding bits
3778
 *   - transcoder
3779
 */
3780
static void ironlake_pch_enable(struct drm_crtc *crtc)
3781
{
3782
	struct drm_device *dev = crtc->dev;
3783
	struct drm_i915_private *dev_priv = dev->dev_private;
3784
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3785
	int pipe = intel_crtc->pipe;
3031 serge 3786
	u32 reg, temp;
2327 Serge 3787
 
4104 Serge 3788
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 3789
 
4280 Serge 3790
	if (IS_IVYBRIDGE(dev))
3791
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3792
 
3243 Serge 3793
	/* Write the TU size bits before fdi link training, so that error
3794
	 * detection works. */
3795
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3796
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3797
 
2327 Serge 3798
	/* For PCH output, training FDI link */
3799
	dev_priv->display.fdi_link_train(crtc);
3800
 
4104 Serge 3801
	/* We need to program the right clock selection before writing the pixel
3802
	 * mutliplier into the DPLL. */
3243 Serge 3803
	if (HAS_PCH_CPT(dev)) {
3031 serge 3804
		u32 sel;
2342 Serge 3805
 
2327 Serge 3806
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3807
		temp |= TRANS_DPLL_ENABLE(pipe);
3808
		sel = TRANS_DPLLB_SEL(pipe);
3809
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 3810
			temp |= sel;
3811
		else
3812
			temp &= ~sel;
2327 Serge 3813
		I915_WRITE(PCH_DPLL_SEL, temp);
3814
	}
3815
 
4104 Serge 3816
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3817
	 * transcoder, and we actually should do this to not upset any PCH
3818
	 * transcoder that already use the clock when we share it.
3819
	 *
3820
	 * Note that enable_shared_dpll tries to do the right thing, but
3821
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3822
	 * the right LVDS enable sequence. */
5060 serge 3823
	intel_enable_shared_dpll(intel_crtc);
4104 Serge 3824
 
2327 Serge 3825
	/* set transcoder timing, panel must allow it */
3826
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 3827
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 3828
 
3829
	intel_fdi_normal_train(crtc);
3830
 
3831
	/* For PCH DP, enable TRANS_DP_CTL */
5354 serge 3832
	if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
3480 Serge 3833
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2327 Serge 3834
		reg = TRANS_DP_CTL(pipe);
3835
		temp = I915_READ(reg);
3836
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3837
			  TRANS_DP_SYNC_MASK |
3838
			  TRANS_DP_BPC_MASK);
3839
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3840
			 TRANS_DP_ENH_FRAMING);
3841
		temp |= bpc << 9; /* same format but at 11:9 */
3842
 
3843
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3844
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3845
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3846
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3847
 
3848
		switch (intel_trans_dp_port_sel(crtc)) {
3849
		case PCH_DP_B:
3850
			temp |= TRANS_DP_PORT_SEL_B;
3851
			break;
3852
		case PCH_DP_C:
3853
			temp |= TRANS_DP_PORT_SEL_C;
3854
			break;
3855
		case PCH_DP_D:
3856
			temp |= TRANS_DP_PORT_SEL_D;
3857
			break;
3858
		default:
3243 Serge 3859
			BUG();
2327 Serge 3860
		}
3861
 
3862
		I915_WRITE(reg, temp);
3863
	}
3864
 
3243 Serge 3865
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 3866
}
3867
 
3243 Serge 3868
static void lpt_pch_enable(struct drm_crtc *crtc)
3869
{
3870
	struct drm_device *dev = crtc->dev;
3871
	struct drm_i915_private *dev_priv = dev->dev_private;
3872
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 3873
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3874
 
4104 Serge 3875
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 3876
 
3877
	lpt_program_iclkip(crtc);
3878
 
3879
	/* Set transcoder timing. */
4104 Serge 3880
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 3881
 
3882
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3883
}
3884
 
5060 serge 3885
void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 serge 3886
{
4104 Serge 3887
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3031 serge 3888
 
3889
	if (pll == NULL)
3890
		return;
3891
 
5354 serge 3892
	if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
3893
		WARN(1, "bad %s crtc mask\n", pll->name);
3031 serge 3894
		return;
3895
	}
3896
 
5354 serge 3897
	pll->config.crtc_mask &= ~(1 << crtc->pipe);
3898
	if (pll->config.crtc_mask == 0) {
4104 Serge 3899
		WARN_ON(pll->on);
3900
		WARN_ON(pll->active);
3901
	}
3902
 
3903
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3031 serge 3904
}
3905
 
5060 serge 3906
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3031 serge 3907
{
4104 Serge 3908
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
5354 serge 3909
	struct intel_shared_dpll *pll;
4104 Serge 3910
	enum intel_dpll_id i;
3031 serge 3911
 
3912
	if (HAS_PCH_IBX(dev_priv->dev)) {
3913
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 3914
		i = (enum intel_dpll_id) crtc->pipe;
3915
		pll = &dev_priv->shared_dplls[i];
3031 serge 3916
 
4104 Serge 3917
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3918
			      crtc->base.base.id, pll->name);
3031 serge 3919
 
5354 serge 3920
		WARN_ON(pll->new_config->crtc_mask);
5060 serge 3921
 
3031 serge 3922
		goto found;
3923
	}
3924
 
4104 Serge 3925
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3926
		pll = &dev_priv->shared_dplls[i];
3031 serge 3927
 
3928
		/* Only want to check enabled timings first */
5354 serge 3929
		if (pll->new_config->crtc_mask == 0)
3031 serge 3930
			continue;
3931
 
5354 serge 3932
		if (memcmp(&crtc->new_config->dpll_hw_state,
3933
			   &pll->new_config->hw_state,
3934
			   sizeof(pll->new_config->hw_state)) == 0) {
3935
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
3936
				      crtc->base.base.id, pll->name,
3937
				      pll->new_config->crtc_mask,
3938
				      pll->active);
3031 serge 3939
			goto found;
3940
		}
3941
	}
3942
 
3943
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 3944
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3945
		pll = &dev_priv->shared_dplls[i];
5354 serge 3946
		if (pll->new_config->crtc_mask == 0) {
4104 Serge 3947
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3948
				      crtc->base.base.id, pll->name);
3031 serge 3949
			goto found;
3950
		}
3951
	}
3952
 
3953
	return NULL;
3954
 
3955
found:
5354 serge 3956
	if (pll->new_config->crtc_mask == 0)
3957
		pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
5060 serge 3958
 
5354 serge 3959
	crtc->new_config->shared_dpll = i;
4104 Serge 3960
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3961
			 pipe_name(crtc->pipe));
3962
 
5354 serge 3963
	pll->new_config->crtc_mask |= 1 << crtc->pipe;
3031 serge 3964
 
3965
	return pll;
3966
}
3967
 
5354 serge 3968
/**
3969
 * intel_shared_dpll_start_config - start a new PLL staged config
3970
 * @dev_priv: DRM device
3971
 * @clear_pipes: mask of pipes that will have their PLLs freed
3972
 *
3973
 * Starts a new PLL staged config, copying the current config but
3974
 * releasing the references of pipes specified in clear_pipes.
3975
 */
3976
static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
3977
					  unsigned clear_pipes)
3978
{
3979
	struct intel_shared_dpll *pll;
3980
	enum intel_dpll_id i;
3981
 
3982
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3983
		pll = &dev_priv->shared_dplls[i];
3984
 
3985
		pll->new_config = kmemdup(&pll->config, sizeof pll->config,
3986
					  GFP_KERNEL);
3987
		if (!pll->new_config)
3988
			goto cleanup;
3989
 
3990
		pll->new_config->crtc_mask &= ~clear_pipes;
3991
	}
3992
 
3993
	return 0;
3994
 
3995
cleanup:
3996
	while (--i >= 0) {
3997
		pll = &dev_priv->shared_dplls[i];
3998
		kfree(pll->new_config);
3999
		pll->new_config = NULL;
4000
	}
4001
 
4002
	return -ENOMEM;
4003
}
4004
 
4005
static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
4006
{
4007
	struct intel_shared_dpll *pll;
4008
	enum intel_dpll_id i;
4009
 
4010
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4011
		pll = &dev_priv->shared_dplls[i];
4012
 
4013
		WARN_ON(pll->new_config == &pll->config);
4014
 
4015
		pll->config = *pll->new_config;
4016
		kfree(pll->new_config);
4017
		pll->new_config = NULL;
4018
	}
4019
}
4020
 
4021
static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
4022
{
4023
	struct intel_shared_dpll *pll;
4024
	enum intel_dpll_id i;
4025
 
4026
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4027
		pll = &dev_priv->shared_dplls[i];
4028
 
4029
		WARN_ON(pll->new_config == &pll->config);
4030
 
4031
		kfree(pll->new_config);
4032
		pll->new_config = NULL;
4033
	}
4034
}
4035
 
4104 Serge 4036
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
2342 Serge 4037
{
4038
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 4039
	int dslreg = PIPEDSL(pipe);
2342 Serge 4040
	u32 temp;
4041
 
4042
	temp = I915_READ(dslreg);
4043
	udelay(500);
4044
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4045
		if (wait_for(I915_READ(dslreg) != temp, 5))
4104 Serge 4046
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
2342 Serge 4047
	}
4048
}
4049
 
5354 serge 4050
static void skylake_pfit_enable(struct intel_crtc *crtc)
4051
{
4052
	struct drm_device *dev = crtc->base.dev;
4053
	struct drm_i915_private *dev_priv = dev->dev_private;
4054
	int pipe = crtc->pipe;
4055
 
4056
	if (crtc->config.pch_pfit.enabled) {
4057
		I915_WRITE(PS_CTL(pipe), PS_ENABLE);
4058
		I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4059
		I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4060
	}
4061
}
4062
 
4104 Serge 4063
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4064
{
4065
	struct drm_device *dev = crtc->base.dev;
4066
	struct drm_i915_private *dev_priv = dev->dev_private;
4067
	int pipe = crtc->pipe;
4068
 
4069
	if (crtc->config.pch_pfit.enabled) {
4070
		/* Force use of hard-coded filter coefficients
4071
		 * as some pre-programmed values are broken,
4072
		 * e.g. x201.
4073
		 */
4074
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4075
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4076
						 PF_PIPE_SEL_IVB(pipe));
4077
		else
4078
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4079
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4080
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4081
	}
4082
}
4083
 
4084
static void intel_enable_planes(struct drm_crtc *crtc)
4085
{
4086
	struct drm_device *dev = crtc->dev;
4087
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5060 serge 4088
	struct drm_plane *plane;
4104 Serge 4089
	struct intel_plane *intel_plane;
4090
 
5060 serge 4091
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4092
		intel_plane = to_intel_plane(plane);
4104 Serge 4093
		if (intel_plane->pipe == pipe)
4094
			intel_plane_restore(&intel_plane->base);
5060 serge 4095
	}
4104 Serge 4096
}
4097
 
4098
static void intel_disable_planes(struct drm_crtc *crtc)
4099
{
4100
	struct drm_device *dev = crtc->dev;
4101
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5060 serge 4102
	struct drm_plane *plane;
4104 Serge 4103
	struct intel_plane *intel_plane;
4104
 
5060 serge 4105
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4106
		intel_plane = to_intel_plane(plane);
4104 Serge 4107
		if (intel_plane->pipe == pipe)
4108
			intel_plane_disable(&intel_plane->base);
5060 serge 4109
	}
4104 Serge 4110
}
4111
 
4560 Serge 4112
void hsw_enable_ips(struct intel_crtc *crtc)
4113
{
5060 serge 4114
	struct drm_device *dev = crtc->base.dev;
4115
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 4116
 
4117
	if (!crtc->config.ips_enabled)
4118
		return;
4119
 
5060 serge 4120
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4121
	intel_wait_for_vblank(dev, crtc->pipe);
4122
 
4560 Serge 4123
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4124
	if (IS_BROADWELL(dev)) {
4560 Serge 4125
		mutex_lock(&dev_priv->rps.hw_lock);
4126
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4127
		mutex_unlock(&dev_priv->rps.hw_lock);
4128
		/* Quoting Art Runyan: "its not safe to expect any particular
4129
		 * value in IPS_CTL bit 31 after enabling IPS through the
4130
		 * mailbox." Moreover, the mailbox may return a bogus state,
4131
		 * so we need to just enable it and continue on.
4132
		 */
4133
	} else {
4134
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4135
		/* The bit only becomes 1 in the next vblank, so this wait here
4136
		 * is essentially intel_wait_for_vblank. If we don't have this
4137
		 * and don't wait for vblanks until the end of crtc_enable, then
4138
		 * the HW state readout code will complain that the expected
4139
		 * IPS_CTL value is not the one we read. */
4140
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4141
			DRM_ERROR("Timed out waiting for IPS enable\n");
4142
	}
4143
}
4144
 
4145
void hsw_disable_ips(struct intel_crtc *crtc)
4146
{
4147
	struct drm_device *dev = crtc->base.dev;
4148
	struct drm_i915_private *dev_priv = dev->dev_private;
4149
 
4150
	if (!crtc->config.ips_enabled)
4151
		return;
4152
 
4153
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4154
	if (IS_BROADWELL(dev)) {
4560 Serge 4155
		mutex_lock(&dev_priv->rps.hw_lock);
4156
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4157
		mutex_unlock(&dev_priv->rps.hw_lock);
5060 serge 4158
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4159
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4160
			DRM_ERROR("Timed out waiting for IPS disable\n");
4560 Serge 4161
	} else {
4162
		I915_WRITE(IPS_CTL, 0);
4163
		POSTING_READ(IPS_CTL);
4164
	}
4165
 
4166
	/* We need to wait for a vblank before we can disable the plane. */
4167
	intel_wait_for_vblank(dev, crtc->pipe);
4168
}
4169
 
4170
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4171
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4172
{
4173
	struct drm_device *dev = crtc->dev;
4174
	struct drm_i915_private *dev_priv = dev->dev_private;
4175
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4176
	enum pipe pipe = intel_crtc->pipe;
4177
	int palreg = PALETTE(pipe);
4178
	int i;
4179
	bool reenable_ips = false;
4180
 
4181
	/* The clocks have to be on to load the palette. */
4182
	if (!crtc->enabled || !intel_crtc->active)
4183
		return;
4184
 
4185
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
5354 serge 4186
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4560 Serge 4187
			assert_dsi_pll_enabled(dev_priv);
4188
		else
4189
			assert_pll_enabled(dev_priv, pipe);
4190
	}
4191
 
4192
	/* use legacy palette for Ironlake */
5060 serge 4193
	if (!HAS_GMCH_DISPLAY(dev))
4560 Serge 4194
		palreg = LGC_PALETTE(pipe);
4195
 
4196
	/* Workaround : Do not read or write the pipe palette/gamma data while
4197
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4198
	 */
4199
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
4200
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4201
	     GAMMA_MODE_MODE_SPLIT)) {
4202
		hsw_disable_ips(intel_crtc);
4203
		reenable_ips = true;
4204
	}
4205
 
4206
	for (i = 0; i < 256; i++) {
4207
		I915_WRITE(palreg + 4 * i,
4208
			   (intel_crtc->lut_r[i] << 16) |
4209
			   (intel_crtc->lut_g[i] << 8) |
4210
			   intel_crtc->lut_b[i]);
4211
	}
4212
 
4213
	if (reenable_ips)
4214
		hsw_enable_ips(intel_crtc);
4215
}
4216
 
5060 serge 4217
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4218
{
4219
	if (!enable && intel_crtc->overlay) {
4220
		struct drm_device *dev = intel_crtc->base.dev;
4221
		struct drm_i915_private *dev_priv = dev->dev_private;
4222
 
4223
		mutex_lock(&dev->struct_mutex);
4224
		dev_priv->mm.interruptible = false;
5354 serge 4225
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
5060 serge 4226
        dev_priv->mm.interruptible = true;
4227
		mutex_unlock(&dev->struct_mutex);
4228
	}
4229
 
4230
	/* Let userspace switch the overlay on again. In most cases userspace
4231
	 * has to recompute where to put it anyway.
4232
	 */
4233
}
4234
 
4235
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4236
{
4237
	struct drm_device *dev = crtc->dev;
4238
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4239
	int pipe = intel_crtc->pipe;
4240
 
5354 serge 4241
	intel_enable_primary_hw_plane(crtc->primary, crtc);
5060 serge 4242
	intel_enable_planes(crtc);
4243
	intel_crtc_update_cursor(crtc, true);
4244
	intel_crtc_dpms_overlay(intel_crtc, true);
4245
 
4246
	hsw_enable_ips(intel_crtc);
4247
 
4248
	mutex_lock(&dev->struct_mutex);
4249
	intel_update_fbc(dev);
4250
	mutex_unlock(&dev->struct_mutex);
5354 serge 4251
 
4252
	/*
4253
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4254
	 * to compute the mask of flip planes precisely. For the time being
4255
	 * consider this a flip from a NULL plane.
4256
	 */
4257
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4258
}
4259
 
4260
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4261
{
4262
	struct drm_device *dev = crtc->dev;
4263
	struct drm_i915_private *dev_priv = dev->dev_private;
4264
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4265
	int pipe = intel_crtc->pipe;
4266
	int plane = intel_crtc->plane;
4267
 
4268
 
4269
	if (dev_priv->fbc.plane == plane)
4270
		intel_disable_fbc(dev);
4271
 
4272
	hsw_disable_ips(intel_crtc);
4273
 
4274
	intel_crtc_dpms_overlay(intel_crtc, false);
4275
	intel_crtc_update_cursor(crtc, false);
4276
	intel_disable_planes(crtc);
5354 serge 4277
	intel_disable_primary_hw_plane(crtc->primary, crtc);
4278
 
4279
	/*
4280
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4281
	 * to compute the mask of flip planes precisely. For the time being
4282
	 * consider this a flip to a NULL plane.
4283
	 */
4284
//	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4285
}
4286
 
2327 Serge 4287
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4288
{
4289
    struct drm_device *dev = crtc->dev;
4290
    struct drm_i915_private *dev_priv = dev->dev_private;
4291
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4292
	struct intel_encoder *encoder;
2327 Serge 4293
    int pipe = intel_crtc->pipe;
4294
 
3031 serge 4295
	WARN_ON(!crtc->enabled);
4296
 
2327 Serge 4297
    if (intel_crtc->active)
4298
        return;
4299
 
5060 serge 4300
	if (intel_crtc->config.has_pch_encoder)
4301
		intel_prepare_shared_dpll(intel_crtc);
4302
 
4303
	if (intel_crtc->config.has_dp_encoder)
4304
		intel_dp_set_m_n(intel_crtc);
4305
 
4306
	intel_set_pipe_timings(intel_crtc);
4307
 
4308
	if (intel_crtc->config.has_pch_encoder) {
4309
		intel_cpu_transcoder_set_m_n(intel_crtc,
5354 serge 4310
				     &intel_crtc->config.fdi_m_n, NULL);
5060 serge 4311
	}
4312
 
4313
	ironlake_set_pipeconf(crtc);
4314
 
2327 Serge 4315
    intel_crtc->active = true;
4104 Serge 4316
 
5354 serge 4317
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4318
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4104 Serge 4319
 
4320
	for_each_encoder_on_crtc(dev, crtc, encoder)
4321
		if (encoder->pre_enable)
4322
			encoder->pre_enable(encoder);
2327 Serge 4323
 
3746 Serge 4324
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 4325
		/* Note: FDI PLL enabling _must_ be done before we enable the
4326
		 * cpu pipes, hence this is separate from all the other fdi/pch
4327
		 * enabling. */
3031 serge 4328
		ironlake_fdi_pll_enable(intel_crtc);
4329
	} else {
4330
		assert_fdi_tx_disabled(dev_priv, pipe);
4331
		assert_fdi_rx_disabled(dev_priv, pipe);
4332
	}
2327 Serge 4333
 
4104 Serge 4334
	ironlake_pfit_enable(intel_crtc);
3031 serge 4335
 
2327 Serge 4336
    /*
4337
     * On ILK+ LUT must be loaded before the pipe is running but with
4338
     * clocks enabled
4339
     */
4340
    intel_crtc_load_lut(crtc);
4341
 
4560 Serge 4342
	intel_update_watermarks(crtc);
5060 serge 4343
	intel_enable_pipe(intel_crtc);
2327 Serge 4344
 
3746 Serge 4345
	if (intel_crtc->config.has_pch_encoder)
2327 Serge 4346
        ironlake_pch_enable(crtc);
4347
 
3031 serge 4348
	for_each_encoder_on_crtc(dev, crtc, encoder)
4349
		encoder->enable(encoder);
4350
 
4351
	if (HAS_PCH_CPT(dev))
4104 Serge 4352
		cpt_verify_modeset(dev, intel_crtc->pipe);
3031 serge 4353
 
5354 serge 4354
	assert_vblank_disabled(crtc);
4355
	drm_crtc_vblank_on(crtc);
4356
 
5060 serge 4357
	intel_crtc_enable_planes(crtc);
2327 Serge 4358
}
4359
 
4104 Serge 4360
/* IPS only exists on ULT machines and is tied to pipe A. */
4361
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4362
{
4363
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4364
}
4365
 
4560 Serge 4366
/*
4367
 * This implements the workaround described in the "notes" section of the mode
4368
 * set sequence documentation. When going from no pipes or single pipe to
4369
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4370
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4371
 */
4372
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4373
{
4374
	struct drm_device *dev = crtc->base.dev;
4375
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4376
 
4377
	/* We want to get the other_active_crtc only if there's only 1 other
4378
	 * active crtc. */
5060 serge 4379
	for_each_intel_crtc(dev, crtc_it) {
4560 Serge 4380
		if (!crtc_it->active || crtc_it == crtc)
4381
			continue;
4382
 
4383
		if (other_active_crtc)
4104 Serge 4384
		return;
4385
 
4560 Serge 4386
		other_active_crtc = crtc_it;
4387
	}
4388
	if (!other_active_crtc)
4389
		return;
4104 Serge 4390
 
4560 Serge 4391
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4392
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4104 Serge 4393
}
4394
 
3243 Serge 4395
static void haswell_crtc_enable(struct drm_crtc *crtc)
4396
{
4397
	struct drm_device *dev = crtc->dev;
4398
	struct drm_i915_private *dev_priv = dev->dev_private;
4399
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4400
	struct intel_encoder *encoder;
4401
	int pipe = intel_crtc->pipe;
4402
 
4403
	WARN_ON(!crtc->enabled);
4404
 
4405
	if (intel_crtc->active)
4406
		return;
4407
 
5060 serge 4408
	if (intel_crtc_to_shared_dpll(intel_crtc))
4409
		intel_enable_shared_dpll(intel_crtc);
4410
 
4411
	if (intel_crtc->config.has_dp_encoder)
4412
		intel_dp_set_m_n(intel_crtc);
4413
 
4414
	intel_set_pipe_timings(intel_crtc);
4415
 
5354 serge 4416
	if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4417
		I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4418
			   intel_crtc->config.pixel_multiplier - 1);
4419
	}
4420
 
5060 serge 4421
	if (intel_crtc->config.has_pch_encoder) {
4422
		intel_cpu_transcoder_set_m_n(intel_crtc,
5354 serge 4423
				     &intel_crtc->config.fdi_m_n, NULL);
5060 serge 4424
	}
4425
 
4426
	haswell_set_pipeconf(crtc);
4427
 
4428
	intel_set_pipe_csc(crtc);
4429
 
3243 Serge 4430
	intel_crtc->active = true;
4104 Serge 4431
 
5354 serge 4432
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3243 Serge 4433
	for_each_encoder_on_crtc(dev, crtc, encoder)
4434
		if (encoder->pre_enable)
4435
			encoder->pre_enable(encoder);
4436
 
5060 serge 4437
	if (intel_crtc->config.has_pch_encoder) {
5354 serge 4438
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4439
						      true);
5060 serge 4440
		dev_priv->display.fdi_link_train(crtc);
4441
	}
4442
 
3243 Serge 4443
	intel_ddi_enable_pipe_clock(intel_crtc);
4444
 
5354 serge 4445
	if (IS_SKYLAKE(dev))
4446
		skylake_pfit_enable(intel_crtc);
4447
	else
4104 Serge 4448
	ironlake_pfit_enable(intel_crtc);
3243 Serge 4449
 
4450
	/*
4451
	 * On ILK+ LUT must be loaded before the pipe is running but with
4452
	 * clocks enabled
4453
	 */
4454
	intel_crtc_load_lut(crtc);
4455
 
4456
	intel_ddi_set_pipe_settings(crtc);
3746 Serge 4457
	intel_ddi_enable_transcoder_func(crtc);
3243 Serge 4458
 
4560 Serge 4459
	intel_update_watermarks(crtc);
5060 serge 4460
	intel_enable_pipe(intel_crtc);
3243 Serge 4461
 
3746 Serge 4462
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 4463
		lpt_pch_enable(crtc);
4464
 
5060 serge 4465
	if (intel_crtc->config.dp_encoder_is_mst)
4466
		intel_ddi_set_vc_payload_alloc(crtc, true);
4467
 
4560 Serge 4468
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 4469
		encoder->enable(encoder);
4560 Serge 4470
		intel_opregion_notify_encoder(encoder, true);
4471
	}
3243 Serge 4472
 
5354 serge 4473
	assert_vblank_disabled(crtc);
4474
	drm_crtc_vblank_on(crtc);
4475
 
4560 Serge 4476
	/* If we change the relative order between pipe/planes enabling, we need
4477
	 * to change the workaround. */
4478
	haswell_mode_set_planes_workaround(intel_crtc);
5060 serge 4479
	intel_crtc_enable_planes(crtc);
3243 Serge 4480
}
4481
 
5354 serge 4482
static void skylake_pfit_disable(struct intel_crtc *crtc)
4483
{
4484
	struct drm_device *dev = crtc->base.dev;
4485
	struct drm_i915_private *dev_priv = dev->dev_private;
4486
	int pipe = crtc->pipe;
4487
 
4488
	/* To avoid upsetting the power well on haswell only disable the pfit if
4489
	 * it's in use. The hw state code will make sure we get this right. */
4490
	if (crtc->config.pch_pfit.enabled) {
4491
		I915_WRITE(PS_CTL(pipe), 0);
4492
		I915_WRITE(PS_WIN_POS(pipe), 0);
4493
		I915_WRITE(PS_WIN_SZ(pipe), 0);
4494
	}
4495
}
4496
 
4104 Serge 4497
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4498
{
4499
	struct drm_device *dev = crtc->base.dev;
4500
	struct drm_i915_private *dev_priv = dev->dev_private;
4501
	int pipe = crtc->pipe;
4502
 
4503
	/* To avoid upsetting the power well on haswell only disable the pfit if
4504
	 * it's in use. The hw state code will make sure we get this right. */
4505
	if (crtc->config.pch_pfit.enabled) {
4506
		I915_WRITE(PF_CTL(pipe), 0);
4507
		I915_WRITE(PF_WIN_POS(pipe), 0);
4508
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4509
	}
4510
}
4511
 
2327 Serge 4512
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4513
{
4514
    struct drm_device *dev = crtc->dev;
4515
    struct drm_i915_private *dev_priv = dev->dev_private;
4516
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4517
	struct intel_encoder *encoder;
2327 Serge 4518
    int pipe = intel_crtc->pipe;
4519
    u32 reg, temp;
4520
 
4521
    if (!intel_crtc->active)
4522
        return;
4523
 
5060 serge 4524
	intel_crtc_disable_planes(crtc);
4525
 
5354 serge 4526
	drm_crtc_vblank_off(crtc);
4527
	assert_vblank_disabled(crtc);
4528
 
3031 serge 4529
	for_each_encoder_on_crtc(dev, crtc, encoder)
4530
		encoder->disable(encoder);
2336 Serge 4531
 
4104 Serge 4532
	if (intel_crtc->config.has_pch_encoder)
5354 serge 4533
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 4534
 
5354 serge 4535
	intel_disable_pipe(intel_crtc);
4536
 
4104 Serge 4537
	ironlake_pfit_disable(intel_crtc);
2327 Serge 4538
 
3031 serge 4539
	for_each_encoder_on_crtc(dev, crtc, encoder)
4540
		if (encoder->post_disable)
4541
			encoder->post_disable(encoder);
4542
 
4104 Serge 4543
	if (intel_crtc->config.has_pch_encoder) {
2327 Serge 4544
    ironlake_fdi_disable(crtc);
4545
 
3243 Serge 4546
	ironlake_disable_pch_transcoder(dev_priv, pipe);
2327 Serge 4547
 
4548
    if (HAS_PCH_CPT(dev)) {
4549
        /* disable TRANS_DP_CTL */
4550
        reg = TRANS_DP_CTL(pipe);
4551
        temp = I915_READ(reg);
4104 Serge 4552
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4553
				  TRANS_DP_PORT_SEL_MASK);
2327 Serge 4554
        temp |= TRANS_DP_PORT_SEL_NONE;
4555
        I915_WRITE(reg, temp);
4556
 
4557
        /* disable DPLL_SEL */
4558
        temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 4559
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
2327 Serge 4560
        I915_WRITE(PCH_DPLL_SEL, temp);
4561
    }
4562
 
4563
    /* disable PCH DPLL */
4104 Serge 4564
		intel_disable_shared_dpll(intel_crtc);
2327 Serge 4565
 
3031 serge 4566
	ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 4567
	}
2327 Serge 4568
 
4569
    intel_crtc->active = false;
4560 Serge 4570
	intel_update_watermarks(crtc);
2327 Serge 4571
 
4572
    mutex_lock(&dev->struct_mutex);
4573
    intel_update_fbc(dev);
4574
    mutex_unlock(&dev->struct_mutex);
4575
}
4576
 
3243 Serge 4577
static void haswell_crtc_disable(struct drm_crtc *crtc)
4578
{
4579
	struct drm_device *dev = crtc->dev;
4580
	struct drm_i915_private *dev_priv = dev->dev_private;
4581
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4582
	struct intel_encoder *encoder;
3746 Serge 4583
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 4584
 
4585
	if (!intel_crtc->active)
4586
		return;
4587
 
5060 serge 4588
	intel_crtc_disable_planes(crtc);
4560 Serge 4589
 
5354 serge 4590
	drm_crtc_vblank_off(crtc);
4591
	assert_vblank_disabled(crtc);
4592
 
4560 Serge 4593
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4594
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 4595
		encoder->disable(encoder);
4560 Serge 4596
	}
3243 Serge 4597
 
4104 Serge 4598
	if (intel_crtc->config.has_pch_encoder)
5354 serge 4599
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4600
						      false);
4601
	intel_disable_pipe(intel_crtc);
3243 Serge 4602
 
5097 serge 4603
	if (intel_crtc->config.dp_encoder_is_mst)
4604
		intel_ddi_set_vc_payload_alloc(crtc, false);
4605
 
3243 Serge 4606
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4607
 
5354 serge 4608
	if (IS_SKYLAKE(dev))
4609
		skylake_pfit_disable(intel_crtc);
4610
	else
4104 Serge 4611
	ironlake_pfit_disable(intel_crtc);
3243 Serge 4612
 
4613
	intel_ddi_disable_pipe_clock(intel_crtc);
4614
 
3746 Serge 4615
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 4616
		lpt_disable_pch_transcoder(dev_priv);
4617
		intel_ddi_fdi_disable(crtc);
4618
	}
4619
 
5060 serge 4620
	for_each_encoder_on_crtc(dev, crtc, encoder)
4621
		if (encoder->post_disable)
4622
			encoder->post_disable(encoder);
4623
 
3243 Serge 4624
	intel_crtc->active = false;
4560 Serge 4625
	intel_update_watermarks(crtc);
3243 Serge 4626
 
4627
	mutex_lock(&dev->struct_mutex);
4628
	intel_update_fbc(dev);
4629
	mutex_unlock(&dev->struct_mutex);
5060 serge 4630
 
4631
	if (intel_crtc_to_shared_dpll(intel_crtc))
4632
		intel_disable_shared_dpll(intel_crtc);
3243 Serge 4633
}
4634
 
3031 serge 4635
static void ironlake_crtc_off(struct drm_crtc *crtc)
2327 Serge 4636
{
4637
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 4638
	intel_put_shared_dpll(intel_crtc);
2327 Serge 4639
}
4640
 
3243 Serge 4641
 
4104 Serge 4642
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4643
{
4644
	struct drm_device *dev = crtc->base.dev;
4645
	struct drm_i915_private *dev_priv = dev->dev_private;
4646
	struct intel_crtc_config *pipe_config = &crtc->config;
4647
 
4648
	if (!crtc->config.gmch_pfit.control)
4649
		return;
4650
 
4651
	/*
4652
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4653
	 * according to register description and PRM.
4654
	 */
4655
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4656
	assert_pipe_disabled(dev_priv, crtc->pipe);
4657
 
4658
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4659
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4660
 
4661
	/* Border color in case we don't scale up to the full screen. Black by
4662
	 * default, change to something else for debugging. */
4663
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4664
}
4665
 
5060 serge 4666
static enum intel_display_power_domain port_to_power_domain(enum port port)
4560 Serge 4667
{
5060 serge 4668
	switch (port) {
4669
	case PORT_A:
4670
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4671
	case PORT_B:
4672
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4673
	case PORT_C:
4674
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4675
	case PORT_D:
4676
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4677
	default:
4678
		WARN_ON_ONCE(1);
4679
		return POWER_DOMAIN_PORT_OTHER;
4680
	}
4681
}
4682
 
4683
#define for_each_power_domain(domain, mask)				\
4684
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4685
		if ((1 << (domain)) & (mask))
4686
 
4687
enum intel_display_power_domain
4688
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4689
{
4690
	struct drm_device *dev = intel_encoder->base.dev;
4691
	struct intel_digital_port *intel_dig_port;
4692
 
4693
	switch (intel_encoder->type) {
4694
	case INTEL_OUTPUT_UNKNOWN:
4695
		/* Only DDI platforms should ever use this output type */
4696
		WARN_ON_ONCE(!HAS_DDI(dev));
4697
	case INTEL_OUTPUT_DISPLAYPORT:
4698
	case INTEL_OUTPUT_HDMI:
4699
	case INTEL_OUTPUT_EDP:
4700
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4701
		return port_to_power_domain(intel_dig_port->port);
4702
	case INTEL_OUTPUT_DP_MST:
4703
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4704
		return port_to_power_domain(intel_dig_port->port);
4705
	case INTEL_OUTPUT_ANALOG:
4706
		return POWER_DOMAIN_PORT_CRT;
4707
	case INTEL_OUTPUT_DSI:
4708
		return POWER_DOMAIN_PORT_DSI;
4709
	default:
4710
		return POWER_DOMAIN_PORT_OTHER;
4711
	}
4712
}
4713
 
4714
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4715
{
4716
	struct drm_device *dev = crtc->dev;
4717
	struct intel_encoder *intel_encoder;
4718
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4719
	enum pipe pipe = intel_crtc->pipe;
4720
	unsigned long mask;
4721
	enum transcoder transcoder;
4722
 
4723
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4724
 
4725
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4726
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4727
	if (intel_crtc->config.pch_pfit.enabled ||
4728
	    intel_crtc->config.pch_pfit.force_thru)
4729
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4730
 
4731
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4732
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4733
 
4734
	return mask;
4735
}
4736
 
4737
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4738
{
4739
	struct drm_i915_private *dev_priv = dev->dev_private;
4740
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4741
	struct intel_crtc *crtc;
4742
 
4743
	/*
4744
	 * First get all needed power domains, then put all unneeded, to avoid
4745
	 * any unnecessary toggling of the power wells.
4746
	 */
4747
	for_each_intel_crtc(dev, crtc) {
4748
		enum intel_display_power_domain domain;
4749
 
4750
		if (!crtc->base.enabled)
4751
			continue;
4752
 
4753
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4754
 
4755
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4756
			intel_display_power_get(dev_priv, domain);
4757
	}
4758
 
5354 serge 4759
	if (dev_priv->display.modeset_global_resources)
4760
		dev_priv->display.modeset_global_resources(dev);
4761
 
5060 serge 4762
	for_each_intel_crtc(dev, crtc) {
4763
		enum intel_display_power_domain domain;
4764
 
4765
		for_each_power_domain(domain, crtc->enabled_power_domains)
4766
			intel_display_power_put(dev_priv, domain);
4767
 
4768
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4769
	}
4770
 
4771
	intel_display_set_init_power(dev_priv, false);
4772
}
4773
 
4774
/* returns HPLL frequency in kHz */
4775
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4776
{
4560 Serge 4777
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4778
 
4779
	/* Obtain SKU information */
4780
	mutex_lock(&dev_priv->dpio_lock);
4781
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4782
		CCK_FUSE_HPLL_FREQ_MASK;
4783
	mutex_unlock(&dev_priv->dpio_lock);
4784
 
5060 serge 4785
	return vco_freq[hpll_freq] * 1000;
4560 Serge 4786
}
4787
 
5060 serge 4788
static void vlv_update_cdclk(struct drm_device *dev)
4789
{
4790
	struct drm_i915_private *dev_priv = dev->dev_private;
4791
 
4792
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5354 serge 4793
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5060 serge 4794
			 dev_priv->vlv_cdclk_freq);
4795
 
4796
	/*
4797
	 * Program the gmbus_freq based on the cdclk frequency.
4798
	 * BSpec erroneously claims we should aim for 4MHz, but
4799
	 * in fact 1MHz is the correct frequency.
4800
	 */
5354 serge 4801
	I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
5060 serge 4802
}
4803
 
4560 Serge 4804
/* Adjust CDclk dividers to allow high res or save power if possible */
4805
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4806
{
4807
	struct drm_i915_private *dev_priv = dev->dev_private;
4808
	u32 val, cmd;
4809
 
5060 serge 4810
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4811
 
4812
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4560 Serge 4813
		cmd = 2;
5060 serge 4814
	else if (cdclk == 266667)
4560 Serge 4815
		cmd = 1;
4816
	else
4817
		cmd = 0;
4818
 
4819
	mutex_lock(&dev_priv->rps.hw_lock);
4820
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4821
	val &= ~DSPFREQGUAR_MASK;
4822
	val |= (cmd << DSPFREQGUAR_SHIFT);
4823
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4824
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4825
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4826
		     50)) {
4827
		DRM_ERROR("timed out waiting for CDclk change\n");
4828
	}
4829
	mutex_unlock(&dev_priv->rps.hw_lock);
4830
 
5060 serge 4831
	if (cdclk == 400000) {
5354 serge 4832
		u32 divider;
4560 Serge 4833
 
5354 serge 4834
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4560 Serge 4835
 
4836
		mutex_lock(&dev_priv->dpio_lock);
4837
		/* adjust cdclk divider */
4838
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5060 serge 4839
		val &= ~DISPLAY_FREQUENCY_VALUES;
4560 Serge 4840
		val |= divider;
4841
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5060 serge 4842
 
4843
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4844
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4845
			     50))
4846
			DRM_ERROR("timed out waiting for CDclk change\n");
4560 Serge 4847
		mutex_unlock(&dev_priv->dpio_lock);
4848
	}
4849
 
4850
	mutex_lock(&dev_priv->dpio_lock);
4851
	/* adjust self-refresh exit latency value */
4852
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4853
	val &= ~0x7f;
4854
 
4855
	/*
4856
	 * For high bandwidth configs, we set a higher latency in the bunit
4857
	 * so that the core display fetch happens in time to avoid underruns.
4858
	 */
5060 serge 4859
	if (cdclk == 400000)
4560 Serge 4860
		val |= 4500 / 250; /* 4.5 usec */
4861
	else
4862
		val |= 3000 / 250; /* 3.0 usec */
4863
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4864
	mutex_unlock(&dev_priv->dpio_lock);
4865
 
5060 serge 4866
	vlv_update_cdclk(dev);
4560 Serge 4867
}
4868
 
5354 serge 4869
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4870
{
4871
	struct drm_i915_private *dev_priv = dev->dev_private;
4872
	u32 val, cmd;
4873
 
4874
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4875
 
4876
	switch (cdclk) {
4877
	case 400000:
4878
		cmd = 3;
4879
		break;
4880
	case 333333:
4881
	case 320000:
4882
		cmd = 2;
4883
		break;
4884
	case 266667:
4885
		cmd = 1;
4886
		break;
4887
	case 200000:
4888
		cmd = 0;
4889
		break;
4890
	default:
4891
		WARN_ON(1);
4892
		return;
4893
	}
4894
 
4895
	mutex_lock(&dev_priv->rps.hw_lock);
4896
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4897
	val &= ~DSPFREQGUAR_MASK_CHV;
4898
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
4899
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4900
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4901
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
4902
		     50)) {
4903
		DRM_ERROR("timed out waiting for CDclk change\n");
4904
	}
4905
	mutex_unlock(&dev_priv->rps.hw_lock);
4906
 
4907
	vlv_update_cdclk(dev);
4908
}
4909
 
4560 Serge 4910
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4911
				 int max_pixclk)
4912
{
5354 serge 4913
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
4560 Serge 4914
 
5354 serge 4915
	/* FIXME: Punit isn't quite ready yet */
4916
	if (IS_CHERRYVIEW(dev_priv->dev))
4917
		return 400000;
4918
 
4560 Serge 4919
	/*
4920
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4921
	 *   200MHz
4922
	 *   267MHz
5060 serge 4923
	 *   320/333MHz (depends on HPLL freq)
4560 Serge 4924
	 *   400MHz
4925
	 * So we check to see whether we're above 90% of the lower bin and
4926
	 * adjust if needed.
5060 serge 4927
	 *
4928
	 * We seem to get an unstable or solid color picture at 200MHz.
4929
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4930
	 * are off.
4560 Serge 4931
	 */
5060 serge 4932
	if (max_pixclk > freq_320*9/10)
4933
		return 400000;
4934
	else if (max_pixclk > 266667*9/10)
4935
		return freq_320;
4936
	else if (max_pixclk > 0)
4937
		return 266667;
4938
	else
4939
		return 200000;
4560 Serge 4940
}
4941
 
5060 serge 4942
/* compute the max pixel clock for new configuration */
4943
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4560 Serge 4944
{
4945
	struct drm_device *dev = dev_priv->dev;
4946
	struct intel_crtc *intel_crtc;
4947
	int max_pixclk = 0;
4948
 
5060 serge 4949
	for_each_intel_crtc(dev, intel_crtc) {
4950
		if (intel_crtc->new_enabled)
4560 Serge 4951
			max_pixclk = max(max_pixclk,
5060 serge 4952
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4560 Serge 4953
	}
4954
 
4955
	return max_pixclk;
4956
}
4957
 
4958
static void valleyview_modeset_global_pipes(struct drm_device *dev,
5060 serge 4959
					    unsigned *prepare_pipes)
4560 Serge 4960
{
4961
	struct drm_i915_private *dev_priv = dev->dev_private;
4962
	struct intel_crtc *intel_crtc;
5060 serge 4963
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4560 Serge 4964
 
5060 serge 4965
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4966
	    dev_priv->vlv_cdclk_freq)
4560 Serge 4967
		return;
4968
 
5060 serge 4969
	/* disable/enable all currently active pipes while we change cdclk */
4970
	for_each_intel_crtc(dev, intel_crtc)
4560 Serge 4971
		if (intel_crtc->base.enabled)
4972
			*prepare_pipes |= (1 << intel_crtc->pipe);
4973
}
4974
 
4975
static void valleyview_modeset_global_resources(struct drm_device *dev)
4976
{
4977
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4978
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4560 Serge 4979
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4980
 
5354 serge 4981
	if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4982
		/*
4983
		 * FIXME: We can end up here with all power domains off, yet
4984
		 * with a CDCLK frequency other than the minimum. To account
4985
		 * for this take the PIPE-A power domain, which covers the HW
4986
		 * blocks needed for the following programming. This can be
4987
		 * removed once it's guaranteed that we get here either with
4988
		 * the minimum CDCLK set, or the required power domains
4989
		 * enabled.
4990
		 */
4991
		intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
4992
 
4993
		if (IS_CHERRYVIEW(dev))
4994
			cherryview_set_cdclk(dev, req_cdclk);
4995
		else
4560 Serge 4996
		valleyview_set_cdclk(dev, req_cdclk);
5354 serge 4997
 
4998
		intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4999
	}
4560 Serge 5000
}
5001
 
4104 Serge 5002
static void valleyview_crtc_enable(struct drm_crtc *crtc)
5003
{
5004
	struct drm_device *dev = crtc->dev;
5354 serge 5005
	struct drm_i915_private *dev_priv = to_i915(dev);
4104 Serge 5006
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5007
	struct intel_encoder *encoder;
5008
	int pipe = intel_crtc->pipe;
4560 Serge 5009
	bool is_dsi;
4104 Serge 5010
 
5011
	WARN_ON(!crtc->enabled);
5012
 
5013
	if (intel_crtc->active)
5014
		return;
5015
 
5354 serge 5016
	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5060 serge 5017
 
5354 serge 5018
	if (!is_dsi) {
5019
		if (IS_CHERRYVIEW(dev))
5020
			chv_prepare_pll(intel_crtc, &intel_crtc->config);
5021
		else
5022
			vlv_prepare_pll(intel_crtc, &intel_crtc->config);
5023
	}
5060 serge 5024
 
5025
	if (intel_crtc->config.has_dp_encoder)
5026
		intel_dp_set_m_n(intel_crtc);
5027
 
5028
	intel_set_pipe_timings(intel_crtc);
5029
 
5354 serge 5030
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
5031
		struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 5032
 
5354 serge 5033
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5034
		I915_WRITE(CHV_CANVAS(pipe), 0);
5035
	}
5036
 
5060 serge 5037
	i9xx_set_pipeconf(intel_crtc);
5038
 
4104 Serge 5039
	intel_crtc->active = true;
5040
 
5354 serge 5041
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 5042
 
4104 Serge 5043
	for_each_encoder_on_crtc(dev, crtc, encoder)
5044
		if (encoder->pre_pll_enable)
5045
			encoder->pre_pll_enable(encoder);
5046
 
5060 serge 5047
	if (!is_dsi) {
5048
		if (IS_CHERRYVIEW(dev))
5354 serge 5049
			chv_enable_pll(intel_crtc, &intel_crtc->config);
5060 serge 5050
		else
5354 serge 5051
			vlv_enable_pll(intel_crtc, &intel_crtc->config);
5060 serge 5052
	}
4104 Serge 5053
 
5054
	for_each_encoder_on_crtc(dev, crtc, encoder)
5055
		if (encoder->pre_enable)
5056
			encoder->pre_enable(encoder);
5057
 
5058
	i9xx_pfit_enable(intel_crtc);
5059
 
5060
	intel_crtc_load_lut(crtc);
5061
 
4560 Serge 5062
	intel_update_watermarks(crtc);
5060 serge 5063
	intel_enable_pipe(intel_crtc);
4104 Serge 5064
 
5065
	for_each_encoder_on_crtc(dev, crtc, encoder)
5066
		encoder->enable(encoder);
5060 serge 5067
 
5354 serge 5068
	assert_vblank_disabled(crtc);
5069
	drm_crtc_vblank_on(crtc);
5070
 
5060 serge 5071
	intel_crtc_enable_planes(crtc);
5072
 
5073
	/* Underruns don't raise interrupts, so check manually. */
5354 serge 5074
	i9xx_check_fifo_underruns(dev_priv);
4104 Serge 5075
}
5076
 
5060 serge 5077
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5078
{
5079
	struct drm_device *dev = crtc->base.dev;
5080
	struct drm_i915_private *dev_priv = dev->dev_private;
5081
 
5082
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
5083
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
5084
}
5085
 
2327 Serge 5086
static void i9xx_crtc_enable(struct drm_crtc *crtc)
5087
{
5088
    struct drm_device *dev = crtc->dev;
5354 serge 5089
	struct drm_i915_private *dev_priv = to_i915(dev);
2327 Serge 5090
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5091
	struct intel_encoder *encoder;
2327 Serge 5092
    int pipe = intel_crtc->pipe;
5093
 
3031 serge 5094
	WARN_ON(!crtc->enabled);
5095
 
2327 Serge 5096
    if (intel_crtc->active)
5097
        return;
5098
 
5060 serge 5099
	i9xx_set_pll_dividers(intel_crtc);
5100
 
5101
	if (intel_crtc->config.has_dp_encoder)
5102
		intel_dp_set_m_n(intel_crtc);
5103
 
5104
	intel_set_pipe_timings(intel_crtc);
5105
 
5106
	i9xx_set_pipeconf(intel_crtc);
5107
 
2327 Serge 5108
    intel_crtc->active = true;
5109
 
5060 serge 5110
	if (!IS_GEN2(dev))
5354 serge 5111
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 5112
 
3480 Serge 5113
	for_each_encoder_on_crtc(dev, crtc, encoder)
5114
		if (encoder->pre_enable)
5115
			encoder->pre_enable(encoder);
5116
 
4104 Serge 5117
	i9xx_enable_pll(intel_crtc);
5118
 
5119
	i9xx_pfit_enable(intel_crtc);
5120
 
5121
	intel_crtc_load_lut(crtc);
5122
 
4560 Serge 5123
	intel_update_watermarks(crtc);
5060 serge 5124
	intel_enable_pipe(intel_crtc);
2327 Serge 5125
 
5060 serge 5126
	for_each_encoder_on_crtc(dev, crtc, encoder)
5127
		encoder->enable(encoder);
3031 serge 5128
 
5354 serge 5129
	assert_vblank_disabled(crtc);
5130
	drm_crtc_vblank_on(crtc);
5131
 
5060 serge 5132
	intel_crtc_enable_planes(crtc);
4104 Serge 5133
 
5060 serge 5134
	/*
5135
	 * Gen2 reports pipe underruns whenever all planes are disabled.
5136
	 * So don't enable underrun reporting before at least some planes
5137
	 * are enabled.
5138
	 * FIXME: Need to fix the logic to work when we turn off all planes
5139
	 * but leave the pipe running.
5140
	 */
5141
	if (IS_GEN2(dev))
5354 serge 5142
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 5143
 
5144
	/* Underruns don't raise interrupts, so check manually. */
5354 serge 5145
	i9xx_check_fifo_underruns(dev_priv);
2327 Serge 5146
}
5147
 
3746 Serge 5148
static void i9xx_pfit_disable(struct intel_crtc *crtc)
5149
{
5150
	struct drm_device *dev = crtc->base.dev;
5151
	struct drm_i915_private *dev_priv = dev->dev_private;
5152
 
4104 Serge 5153
	if (!crtc->config.gmch_pfit.control)
5154
		return;
5155
 
3746 Serge 5156
	assert_pipe_disabled(dev_priv, crtc->pipe);
5157
 
4104 Serge 5158
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
5159
			 I915_READ(PFIT_CONTROL));
3746 Serge 5160
		I915_WRITE(PFIT_CONTROL, 0);
5161
}
5162
 
2327 Serge 5163
static void i9xx_crtc_disable(struct drm_crtc *crtc)
5164
{
5165
    struct drm_device *dev = crtc->dev;
5166
    struct drm_i915_private *dev_priv = dev->dev_private;
5167
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5168
	struct intel_encoder *encoder;
2327 Serge 5169
    int pipe = intel_crtc->pipe;
5170
 
5171
    if (!intel_crtc->active)
5172
        return;
5173
 
5060 serge 5174
	/*
5175
	 * Gen2 reports pipe underruns whenever all planes are disabled.
5176
	 * So diasble underrun reporting before all the planes get disabled.
5177
	 * FIXME: Need to fix the logic to work when we turn off all planes
5178
	 * but leave the pipe running.
5179
	 */
5180
	if (IS_GEN2(dev))
5354 serge 5181
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 5182
 
5183
	/*
5184
	 * Vblank time updates from the shadow to live plane control register
5185
	 * are blocked if the memory self-refresh mode is active at that
5186
	 * moment. So to make sure the plane gets truly disabled, disable
5187
	 * first the self-refresh mode. The self-refresh enable bit in turn
5188
	 * will be checked/applied by the HW only at the next frame start
5189
	 * event which is after the vblank start event, so we need to have a
5190
	 * wait-for-vblank between disabling the plane and the pipe.
5191
	 */
5192
	intel_set_memory_cxsr(dev_priv, false);
5193
	intel_crtc_disable_planes(crtc);
5194
 
5195
	/*
5196
	 * On gen2 planes are double buffered but the pipe isn't, so we must
5197
	 * wait for planes to fully turn off before disabling the pipe.
5198
	 * We also need to wait on all gmch platforms because of the
5199
	 * self-refresh mode constraint explained above.
5200
	 */
5201
		intel_wait_for_vblank(dev, pipe);
2327 Serge 5202
 
5354 serge 5203
	drm_crtc_vblank_off(crtc);
5204
	assert_vblank_disabled(crtc);
3480 Serge 5205
 
5354 serge 5206
	for_each_encoder_on_crtc(dev, crtc, encoder)
5207
		encoder->disable(encoder);
5208
 
5209
	intel_disable_pipe(intel_crtc);
5210
 
3746 Serge 5211
	i9xx_pfit_disable(intel_crtc);
3480 Serge 5212
 
4104 Serge 5213
	for_each_encoder_on_crtc(dev, crtc, encoder)
5214
		if (encoder->post_disable)
5215
			encoder->post_disable(encoder);
2327 Serge 5216
 
5354 serge 5217
	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
5060 serge 5218
		if (IS_CHERRYVIEW(dev))
5219
			chv_disable_pll(dev_priv, pipe);
5220
		else if (IS_VALLEYVIEW(dev))
4557 Serge 5221
		vlv_disable_pll(dev_priv, pipe);
5060 serge 5222
		else
5354 serge 5223
			i9xx_disable_pll(intel_crtc);
5060 serge 5224
	}
4104 Serge 5225
 
5060 serge 5226
	if (!IS_GEN2(dev))
5354 serge 5227
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 5228
 
2327 Serge 5229
    intel_crtc->active = false;
4560 Serge 5230
	intel_update_watermarks(crtc);
5231
 
5060 serge 5232
	mutex_lock(&dev->struct_mutex);
2327 Serge 5233
    intel_update_fbc(dev);
5060 serge 5234
	mutex_unlock(&dev->struct_mutex);
2327 Serge 5235
}
5236
 
3031 serge 5237
static void i9xx_crtc_off(struct drm_crtc *crtc)
2327 Serge 5238
{
5239
}
5240
 
5060 serge 5241
/* Master function to enable/disable CRTC and corresponding power wells */
5242
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5243
{
5244
	struct drm_device *dev = crtc->dev;
5245
	struct drm_i915_private *dev_priv = dev->dev_private;
5246
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5247
	enum intel_display_power_domain domain;
5248
	unsigned long domains;
5249
 
5250
	if (enable) {
5251
		if (!intel_crtc->active) {
5252
			domains = get_crtc_power_domains(crtc);
5253
			for_each_power_domain(domain, domains)
5254
				intel_display_power_get(dev_priv, domain);
5255
			intel_crtc->enabled_power_domains = domains;
5256
 
5257
			dev_priv->display.crtc_enable(crtc);
5258
		}
5259
	} else {
5260
		if (intel_crtc->active) {
5261
			dev_priv->display.crtc_disable(crtc);
5262
 
5263
			domains = intel_crtc->enabled_power_domains;
5264
			for_each_power_domain(domain, domains)
5265
				intel_display_power_put(dev_priv, domain);
5266
			intel_crtc->enabled_power_domains = 0;
5267
		}
5268
	}
2330 Serge 5269
}
2327 Serge 5270
 
3031 serge 5271
/**
5272
 * Sets the power management mode of the pipe and plane.
5273
 */
5274
void intel_crtc_update_dpms(struct drm_crtc *crtc)
5275
{
5276
	struct drm_device *dev = crtc->dev;
5277
	struct intel_encoder *intel_encoder;
5278
	bool enable = false;
5279
 
5280
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5281
		enable |= intel_encoder->connectors_active;
5282
 
5060 serge 5283
	intel_crtc_control(crtc, enable);
3031 serge 5284
}
5285
 
2330 Serge 5286
static void intel_crtc_disable(struct drm_crtc *crtc)
5287
{
5288
	struct drm_device *dev = crtc->dev;
3031 serge 5289
	struct drm_connector *connector;
5290
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 5291
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
5292
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2327 Serge 5293
 
3031 serge 5294
	/* crtc should still be enabled when we disable it. */
5295
	WARN_ON(!crtc->enabled);
2327 Serge 5296
 
4104 Serge 5297
	dev_priv->display.crtc_disable(crtc);
3031 serge 5298
	dev_priv->display.off(crtc);
5299
 
5060 serge 5300
	if (crtc->primary->fb) {
4280 Serge 5301
		mutex_lock(&dev->struct_mutex);
5060 serge 5302
		intel_unpin_fb_obj(old_obj);
5303
		i915_gem_track_fb(old_obj, NULL,
5304
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
4280 Serge 5305
		mutex_unlock(&dev->struct_mutex);
5060 serge 5306
		crtc->primary->fb = NULL;
4280 Serge 5307
	}
3031 serge 5308
 
5309
	/* Update computed state. */
5310
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5311
		if (!connector->encoder || !connector->encoder->crtc)
5312
			continue;
5313
 
5314
		if (connector->encoder->crtc != crtc)
5315
			continue;
5316
 
5317
		connector->dpms = DRM_MODE_DPMS_OFF;
5318
		to_intel_encoder(connector->encoder)->connectors_active = false;
2330 Serge 5319
	}
5320
}
2327 Serge 5321
 
3031 serge 5322
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 5323
{
3031 serge 5324
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5325
 
5326
	drm_encoder_cleanup(encoder);
5327
	kfree(intel_encoder);
2330 Serge 5328
}
2327 Serge 5329
 
4104 Serge 5330
/* Simple dpms helper for encoders with just one connector, no cloning and only
3031 serge 5331
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5332
 * state of the entire output pipe. */
4104 Serge 5333
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
2330 Serge 5334
{
3031 serge 5335
	if (mode == DRM_MODE_DPMS_ON) {
5336
		encoder->connectors_active = true;
5337
 
5338
		intel_crtc_update_dpms(encoder->base.crtc);
5339
	} else {
5340
		encoder->connectors_active = false;
5341
 
5342
		intel_crtc_update_dpms(encoder->base.crtc);
5343
	}
2330 Serge 5344
}
2327 Serge 5345
 
3031 serge 5346
/* Cross check the actual hw state with our own modeset state tracking (and it's
5347
 * internal consistency). */
5348
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 5349
{
3031 serge 5350
	if (connector->get_hw_state(connector)) {
5351
		struct intel_encoder *encoder = connector->encoder;
5352
		struct drm_crtc *crtc;
5353
		bool encoder_enabled;
5354
		enum pipe pipe;
5355
 
5356
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5357
			      connector->base.base.id,
5060 serge 5358
			      connector->base.name);
3031 serge 5359
 
5060 serge 5360
		/* there is no real hw state for MST connectors */
5361
		if (connector->mst_port)
5362
			return;
5363
 
3031 serge 5364
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5365
		     "wrong connector dpms state\n");
5366
		WARN(connector->base.encoder != &encoder->base,
5367
		     "active connector not linked to encoder\n");
5060 serge 5368
 
5369
		if (encoder) {
3031 serge 5370
		WARN(!encoder->connectors_active,
5371
		     "encoder->connectors_active not set\n");
5372
 
5373
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5374
		WARN(!encoder_enabled, "encoder not enabled\n");
5375
		if (WARN_ON(!encoder->base.crtc))
5376
			return;
5377
 
5378
		crtc = encoder->base.crtc;
5379
 
5380
		WARN(!crtc->enabled, "crtc not enabled\n");
5381
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5382
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5383
		     "encoder active on the wrong pipe\n");
5384
	}
5060 serge 5385
	}
2330 Serge 5386
}
2327 Serge 5387
 
3031 serge 5388
/* Even simpler default implementation, if there's really no special case to
5389
 * consider. */
5390
void intel_connector_dpms(struct drm_connector *connector, int mode)
2330 Serge 5391
{
3031 serge 5392
	/* All the simple cases only support two dpms states. */
5393
	if (mode != DRM_MODE_DPMS_ON)
5394
		mode = DRM_MODE_DPMS_OFF;
2342 Serge 5395
 
3031 serge 5396
	if (mode == connector->dpms)
5397
		return;
5398
 
5399
	connector->dpms = mode;
5400
 
5401
	/* Only need to change hw state when actually enabled */
4104 Serge 5402
	if (connector->encoder)
5403
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3031 serge 5404
 
5405
	intel_modeset_check_state(connector->dev);
2330 Serge 5406
}
2327 Serge 5407
 
3031 serge 5408
/* Simple connector->get_hw_state implementation for encoders that support only
5409
 * one connector and no cloning and hence the encoder state determines the state
5410
 * of the connector. */
5411
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 5412
{
3031 serge 5413
	enum pipe pipe = 0;
5414
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 5415
 
3031 serge 5416
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 5417
}
5418
 
4104 Serge 5419
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5420
				     struct intel_crtc_config *pipe_config)
5421
{
5422
	struct drm_i915_private *dev_priv = dev->dev_private;
5423
	struct intel_crtc *pipe_B_crtc =
5424
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5425
 
5426
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5427
		      pipe_name(pipe), pipe_config->fdi_lanes);
5428
	if (pipe_config->fdi_lanes > 4) {
5429
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5430
			      pipe_name(pipe), pipe_config->fdi_lanes);
5431
		return false;
5432
	}
5433
 
4560 Serge 5434
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 5435
		if (pipe_config->fdi_lanes > 2) {
5436
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5437
				      pipe_config->fdi_lanes);
5438
			return false;
5439
		} else {
5440
			return true;
5441
		}
5442
	}
5443
 
5444
	if (INTEL_INFO(dev)->num_pipes == 2)
5445
		return true;
5446
 
5447
	/* Ivybridge 3 pipe is really complicated */
5448
	switch (pipe) {
5449
	case PIPE_A:
5450
		return true;
5451
	case PIPE_B:
5452
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5453
		    pipe_config->fdi_lanes > 2) {
5454
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5455
				      pipe_name(pipe), pipe_config->fdi_lanes);
5456
			return false;
5457
		}
5458
		return true;
5459
	case PIPE_C:
5460
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5461
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5462
			if (pipe_config->fdi_lanes > 2) {
5463
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5464
					      pipe_name(pipe), pipe_config->fdi_lanes);
5465
				return false;
5466
			}
5467
		} else {
5468
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5469
			return false;
5470
		}
5471
		return true;
5472
	default:
5473
		BUG();
5474
	}
5475
}
5476
 
5477
#define RETRY 1
5478
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
3746 Serge 5479
				      struct intel_crtc_config *pipe_config)
2330 Serge 5480
{
4104 Serge 5481
	struct drm_device *dev = intel_crtc->base.dev;
3746 Serge 5482
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4104 Serge 5483
	int lane, link_bw, fdi_dotclock;
5484
	bool setup_ok, needs_recompute = false;
2330 Serge 5485
 
4104 Serge 5486
retry:
5487
	/* FDI is a binary signal running at ~2.7GHz, encoding
5488
	 * each output octet as 10 bits. The actual frequency
5489
	 * is stored as a divider into a 100MHz clock, and the
5490
	 * mode pixel clock is stored in units of 1KHz.
5491
	 * Hence the bw of each lane in terms of the mode signal
5492
	 * is:
5493
	 */
5494
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5495
 
4560 Serge 5496
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 5497
 
5498
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5499
					   pipe_config->pipe_bpp);
5500
 
5501
	pipe_config->fdi_lanes = lane;
5502
 
5503
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5504
			       link_bw, &pipe_config->fdi_m_n);
5505
 
5506
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5507
					    intel_crtc->pipe, pipe_config);
5508
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5509
		pipe_config->pipe_bpp -= 2*3;
5510
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5511
			      pipe_config->pipe_bpp);
5512
		needs_recompute = true;
5513
		pipe_config->bw_constrained = true;
5514
 
5515
		goto retry;
5516
	}
5517
 
5518
	if (needs_recompute)
5519
		return RETRY;
5520
 
5521
	return setup_ok ? 0 : -EINVAL;
5522
}
5523
 
5524
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5525
				   struct intel_crtc_config *pipe_config)
5526
{
5060 serge 5527
	pipe_config->ips_enabled = i915.enable_ips &&
4104 Serge 5528
				   hsw_crtc_supports_ips(crtc) &&
5529
				   pipe_config->pipe_bpp <= 24;
5530
}
5531
 
5532
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5533
				     struct intel_crtc_config *pipe_config)
5534
{
5535
	struct drm_device *dev = crtc->base.dev;
5354 serge 5536
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 5537
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5538
 
4560 Serge 5539
	/* FIXME should check pixel clock limits on all platforms */
5540
	if (INTEL_INFO(dev)->gen < 4) {
5541
		int clock_limit =
5542
			dev_priv->display.get_display_clock_speed(dev);
5543
 
5544
		/*
5545
		 * Enable pixel doubling when the dot clock
5546
		 * is > 90% of the (display) core speed.
5547
		 *
5548
		 * GDG double wide on either pipe,
5549
		 * otherwise pipe A only.
5550
		 */
5551
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5552
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5553
			clock_limit *= 2;
5554
			pipe_config->double_wide = true;
5555
		}
5556
 
5557
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4104 Serge 5558
			return -EINVAL;
2330 Serge 5559
	}
5560
 
4560 Serge 5561
	/*
5562
	 * Pipe horizontal size must be even in:
5563
	 * - DVO ganged mode
5564
	 * - LVDS dual channel mode
5565
	 * - Double wide pipe
5566
	 */
5354 serge 5567
	if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4560 Serge 5568
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5569
		pipe_config->pipe_src_w &= ~1;
5570
 
4104 Serge 5571
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5572
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 5573
	 */
5574
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5575
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4104 Serge 5576
		return -EINVAL;
3031 serge 5577
 
3746 Serge 5578
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5579
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5580
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5581
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5582
		 * for lvds. */
5583
		pipe_config->pipe_bpp = 8*3;
5584
	}
5585
 
4104 Serge 5586
	if (HAS_IPS(dev))
5587
		hsw_compute_ips_config(crtc, pipe_config);
5588
 
5589
	if (pipe_config->has_pch_encoder)
5590
		return ironlake_fdi_compute_config(crtc, pipe_config);
5591
 
5592
	return 0;
2330 Serge 5593
}
5594
 
3031 serge 5595
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5596
{
5060 serge 5597
	struct drm_i915_private *dev_priv = dev->dev_private;
5598
	u32 val;
5599
	int divider;
5600
 
5354 serge 5601
	/* FIXME: Punit isn't quite ready yet */
5602
	if (IS_CHERRYVIEW(dev))
5603
		return 400000;
5604
 
5605
	if (dev_priv->hpll_freq == 0)
5606
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
5607
 
5060 serge 5608
	mutex_lock(&dev_priv->dpio_lock);
5609
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5610
	mutex_unlock(&dev_priv->dpio_lock);
5611
 
5612
	divider = val & DISPLAY_FREQUENCY_VALUES;
5613
 
5614
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5615
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5616
	     "cdclk change in progress\n");
5617
 
5354 serge 5618
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
3031 serge 5619
}
5620
 
2327 Serge 5621
static int i945_get_display_clock_speed(struct drm_device *dev)
5622
{
5623
	return 400000;
5624
}
5625
 
5626
static int i915_get_display_clock_speed(struct drm_device *dev)
5627
{
5628
	return 333000;
5629
}
5630
 
5631
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5632
{
5633
	return 200000;
5634
}
5635
 
4104 Serge 5636
static int pnv_get_display_clock_speed(struct drm_device *dev)
5637
{
5638
	u16 gcfgc = 0;
5639
 
5640
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5641
 
5642
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5643
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5644
		return 267000;
5645
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5646
		return 333000;
5647
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5648
		return 444000;
5649
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5650
		return 200000;
5651
	default:
5652
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5653
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5654
		return 133000;
5655
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5656
		return 167000;
5657
	}
5658
}
5659
 
2327 Serge 5660
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5661
{
5662
	u16 gcfgc = 0;
5663
 
5664
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5665
 
5666
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5667
		return 133000;
5668
	else {
5669
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5670
		case GC_DISPLAY_CLOCK_333_MHZ:
5671
			return 333000;
5672
		default:
5673
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5674
			return 190000;
5675
		}
5676
	}
5677
}
5678
 
5679
static int i865_get_display_clock_speed(struct drm_device *dev)
5680
{
5681
	return 266000;
5682
}
5683
 
5684
static int i855_get_display_clock_speed(struct drm_device *dev)
5685
{
5686
	u16 hpllcc = 0;
5687
	/* Assume that the hardware is in the high speed state.  This
5688
	 * should be the default.
5689
	 */
5690
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5691
	case GC_CLOCK_133_200:
5692
	case GC_CLOCK_100_200:
5693
		return 200000;
5694
	case GC_CLOCK_166_250:
5695
		return 250000;
5696
	case GC_CLOCK_100_133:
5697
		return 133000;
5698
	}
5699
 
5700
	/* Shouldn't happen */
5701
	return 0;
5702
}
5703
 
5704
static int i830_get_display_clock_speed(struct drm_device *dev)
5705
{
5706
	return 133000;
5707
}
5708
 
5709
static void
3746 Serge 5710
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 5711
{
3746 Serge 5712
	while (*num > DATA_LINK_M_N_MASK ||
5713
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 5714
		*num >>= 1;
5715
		*den >>= 1;
5716
	}
5717
}
5718
 
3746 Serge 5719
static void compute_m_n(unsigned int m, unsigned int n,
5720
			uint32_t *ret_m, uint32_t *ret_n)
5721
{
5722
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5723
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5724
	intel_reduce_m_n_ratio(ret_m, ret_n);
5725
}
5726
 
3480 Serge 5727
void
5728
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5729
		       int pixel_clock, int link_clock,
5730
		       struct intel_link_m_n *m_n)
2327 Serge 5731
{
3480 Serge 5732
	m_n->tu = 64;
3746 Serge 5733
 
5734
	compute_m_n(bits_per_pixel * pixel_clock,
5735
		    link_clock * nlanes * 8,
5736
		    &m_n->gmch_m, &m_n->gmch_n);
5737
 
5738
	compute_m_n(pixel_clock, link_clock,
5739
		    &m_n->link_m, &m_n->link_n);
2327 Serge 5740
}
5741
 
5742
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5743
{
5060 serge 5744
	if (i915.panel_use_ssc >= 0)
5745
		return i915.panel_use_ssc != 0;
4104 Serge 5746
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 5747
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5748
}
5749
 
5354 serge 5750
static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
3031 serge 5751
{
5354 serge 5752
	struct drm_device *dev = crtc->base.dev;
3031 serge 5753
	struct drm_i915_private *dev_priv = dev->dev_private;
5754
	int refclk;
2327 Serge 5755
 
3031 serge 5756
	if (IS_VALLEYVIEW(dev)) {
4560 Serge 5757
		refclk = 100000;
5354 serge 5758
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
3031 serge 5759
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 5760
		refclk = dev_priv->vbt.lvds_ssc_freq;
5761
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 5762
	} else if (!IS_GEN2(dev)) {
5763
		refclk = 96000;
5764
	} else {
5765
		refclk = 48000;
5766
	}
2327 Serge 5767
 
3031 serge 5768
	return refclk;
5769
}
2327 Serge 5770
 
4104 Serge 5771
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 5772
{
4104 Serge 5773
	return (1 << dpll->n) << 16 | dpll->m2;
5774
}
3746 Serge 5775
 
4104 Serge 5776
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5777
{
5778
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 5779
}
2327 Serge 5780
 
3746 Serge 5781
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
3031 serge 5782
				     intel_clock_t *reduced_clock)
5783
{
3746 Serge 5784
	struct drm_device *dev = crtc->base.dev;
3031 serge 5785
	u32 fp, fp2 = 0;
2327 Serge 5786
 
3031 serge 5787
	if (IS_PINEVIEW(dev)) {
5354 serge 5788
		fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
3031 serge 5789
		if (reduced_clock)
4104 Serge 5790
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 5791
	} else {
5354 serge 5792
		fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
3031 serge 5793
		if (reduced_clock)
4104 Serge 5794
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 5795
	}
2327 Serge 5796
 
5354 serge 5797
	crtc->new_config->dpll_hw_state.fp0 = fp;
2327 Serge 5798
 
3746 Serge 5799
	crtc->lowfreq_avail = false;
5354 serge 5800
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5060 serge 5801
	    reduced_clock && i915.powersave) {
5354 serge 5802
		crtc->new_config->dpll_hw_state.fp1 = fp2;
3746 Serge 5803
		crtc->lowfreq_avail = true;
3031 serge 5804
	} else {
5354 serge 5805
		crtc->new_config->dpll_hw_state.fp1 = fp;
3031 serge 5806
	}
5807
}
2327 Serge 5808
 
4560 Serge 5809
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5810
		pipe)
4104 Serge 5811
{
5812
	u32 reg_val;
5813
 
5814
	/*
5815
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5816
	 * and set it to a reasonable value instead.
5817
	 */
4560 Serge 5818
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 5819
	reg_val &= 0xffffff00;
5820
	reg_val |= 0x00000030;
4560 Serge 5821
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 5822
 
4560 Serge 5823
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 5824
	reg_val &= 0x8cffffff;
5825
	reg_val = 0x8c000000;
4560 Serge 5826
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 5827
 
4560 Serge 5828
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 5829
	reg_val &= 0xffffff00;
4560 Serge 5830
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 5831
 
4560 Serge 5832
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 5833
	reg_val &= 0x00ffffff;
5834
	reg_val |= 0xb0000000;
4560 Serge 5835
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 5836
}
5837
 
5838
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5839
					 struct intel_link_m_n *m_n)
5840
{
5841
	struct drm_device *dev = crtc->base.dev;
5842
	struct drm_i915_private *dev_priv = dev->dev_private;
5843
	int pipe = crtc->pipe;
5844
 
5845
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5846
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5847
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5848
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5849
}
5850
 
5851
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 5852
					 struct intel_link_m_n *m_n,
5853
					 struct intel_link_m_n *m2_n2)
4104 Serge 5854
{
5855
	struct drm_device *dev = crtc->base.dev;
5856
	struct drm_i915_private *dev_priv = dev->dev_private;
5857
	int pipe = crtc->pipe;
5858
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5859
 
5860
	if (INTEL_INFO(dev)->gen >= 5) {
5861
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5862
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5863
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5864
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5354 serge 5865
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
5866
		 * for gen < 8) and if DRRS is supported (to make sure the
5867
		 * registers are not unnecessarily accessed).
5868
		 */
5869
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
5870
			crtc->config.has_drrs) {
5871
			I915_WRITE(PIPE_DATA_M2(transcoder),
5872
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5873
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5874
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
5875
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
5876
		}
4104 Serge 5877
	} else {
5878
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5879
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5880
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5881
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5882
	}
5883
}
5884
 
5354 serge 5885
void intel_dp_set_m_n(struct intel_crtc *crtc)
3031 serge 5886
{
3746 Serge 5887
	if (crtc->config.has_pch_encoder)
5888
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5889
	else
5354 serge 5890
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
5891
						   &crtc->config.dp_m2_n2);
3746 Serge 5892
}
5893
 
5354 serge 5894
static void vlv_update_pll(struct intel_crtc *crtc,
5895
			   struct intel_crtc_config *pipe_config)
3746 Serge 5896
{
5060 serge 5897
	u32 dpll, dpll_md;
5898
 
5899
	/*
5900
	 * Enable DPIO clock input. We should never disable the reference
5901
	 * clock for pipe B, since VGA hotplug / manual detection depends
5902
	 * on it.
5903
	 */
5904
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5905
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5906
	/* We should never disable this, set it here for state tracking */
5907
	if (crtc->pipe == PIPE_B)
5908
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5909
	dpll |= DPLL_VCO_ENABLE;
5354 serge 5910
	pipe_config->dpll_hw_state.dpll = dpll;
5060 serge 5911
 
5354 serge 5912
	dpll_md = (pipe_config->pixel_multiplier - 1)
5060 serge 5913
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 5914
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
5060 serge 5915
}
5916
 
5354 serge 5917
static void vlv_prepare_pll(struct intel_crtc *crtc,
5918
			    const struct intel_crtc_config *pipe_config)
5060 serge 5919
{
3746 Serge 5920
	struct drm_device *dev = crtc->base.dev;
3031 serge 5921
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5922
	int pipe = crtc->pipe;
5060 serge 5923
	u32 mdiv;
3031 serge 5924
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5060 serge 5925
	u32 coreclk, reg_val;
2327 Serge 5926
 
3480 Serge 5927
	mutex_lock(&dev_priv->dpio_lock);
5928
 
5354 serge 5929
	bestn = pipe_config->dpll.n;
5930
	bestm1 = pipe_config->dpll.m1;
5931
	bestm2 = pipe_config->dpll.m2;
5932
	bestp1 = pipe_config->dpll.p1;
5933
	bestp2 = pipe_config->dpll.p2;
3031 serge 5934
 
4104 Serge 5935
	/* See eDP HDMI DPIO driver vbios notes doc */
5936
 
5937
	/* PLL B needs special handling */
5060 serge 5938
	if (pipe == PIPE_B)
4560 Serge 5939
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 5940
 
5941
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 5942
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 5943
 
5944
	/* Disable target IRef on PLL */
4560 Serge 5945
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 5946
	reg_val &= 0x00ffffff;
4560 Serge 5947
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 5948
 
5949
	/* Disable fast lock */
4560 Serge 5950
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 5951
 
5952
	/* Set idtafcrecal before PLL is enabled */
3031 serge 5953
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5954
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5955
	mdiv |= ((bestn << DPIO_N_SHIFT));
5956
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 5957
 
5958
	/*
5959
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5960
	 * but we don't support that).
5961
	 * Note: don't use the DAC post divider as it seems unstable.
5962
	 */
5963
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 5964
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 5965
 
3031 serge 5966
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 5967
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 5968
 
4104 Serge 5969
	/* Set HBR and RBR LPF coefficients */
5354 serge 5970
	if (pipe_config->port_clock == 162000 ||
5971
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
5972
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4560 Serge 5973
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 5974
				 0x009f0003);
5975
	else
4560 Serge 5976
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 5977
				 0x00d0000f);
3031 serge 5978
 
5354 serge 5979
	if (crtc->config.has_dp_encoder) {
4104 Serge 5980
		/* Use SSC source */
5060 serge 5981
		if (pipe == PIPE_A)
4560 Serge 5982
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5983
					 0x0df40000);
5984
		else
4560 Serge 5985
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5986
					 0x0df70000);
5987
	} else { /* HDMI or VGA */
5988
		/* Use bend source */
5060 serge 5989
		if (pipe == PIPE_A)
4560 Serge 5990
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5991
					 0x0df70000);
5992
		else
4560 Serge 5993
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5994
					 0x0df40000);
5995
	}
3031 serge 5996
 
4560 Serge 5997
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 5998
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5354 serge 5999
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
6000
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
4104 Serge 6001
		coreclk |= 0x01000000;
4560 Serge 6002
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 6003
 
4560 Serge 6004
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5060 serge 6005
	mutex_unlock(&dev_priv->dpio_lock);
6006
}
4104 Serge 6007
 
5354 serge 6008
static void chv_update_pll(struct intel_crtc *crtc,
6009
			   struct intel_crtc_config *pipe_config)
5060 serge 6010
{
5354 serge 6011
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
6012
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
6013
		DPLL_VCO_ENABLE;
6014
	if (crtc->pipe != PIPE_A)
6015
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6016
 
6017
	pipe_config->dpll_hw_state.dpll_md =
6018
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6019
}
6020
 
6021
static void chv_prepare_pll(struct intel_crtc *crtc,
6022
			    const struct intel_crtc_config *pipe_config)
6023
{
5060 serge 6024
	struct drm_device *dev = crtc->base.dev;
6025
	struct drm_i915_private *dev_priv = dev->dev_private;
6026
	int pipe = crtc->pipe;
6027
	int dpll_reg = DPLL(crtc->pipe);
6028
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6029
	u32 loopfilter, intcoeff;
6030
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6031
	int refclk;
6032
 
5354 serge 6033
	bestn = pipe_config->dpll.n;
6034
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6035
	bestm1 = pipe_config->dpll.m1;
6036
	bestm2 = pipe_config->dpll.m2 >> 22;
6037
	bestp1 = pipe_config->dpll.p1;
6038
	bestp2 = pipe_config->dpll.p2;
5060 serge 6039
 
4560 Serge 6040
	/*
5060 serge 6041
	 * Enable Refclk and SSC
4560 Serge 6042
	 */
5060 serge 6043
	I915_WRITE(dpll_reg,
5354 serge 6044
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
3031 serge 6045
 
5060 serge 6046
	mutex_lock(&dev_priv->dpio_lock);
3031 serge 6047
 
5060 serge 6048
	/* p1 and p2 divider */
6049
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6050
			5 << DPIO_CHV_S1_DIV_SHIFT |
6051
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6052
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6053
			1 << DPIO_CHV_K_DIV_SHIFT);
3243 Serge 6054
 
5060 serge 6055
	/* Feedback post-divider - m2 */
6056
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6057
 
6058
	/* Feedback refclk divider - n and m1 */
6059
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6060
			DPIO_CHV_M1_DIV_BY_2 |
6061
			1 << DPIO_CHV_N_DIV_SHIFT);
6062
 
6063
	/* M2 fraction division */
6064
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6065
 
6066
	/* M2 fraction division enable */
6067
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
6068
		       DPIO_CHV_FRAC_DIV_EN |
6069
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
6070
 
6071
	/* Loop filter */
5354 serge 6072
	refclk = i9xx_get_refclk(crtc, 0);
5060 serge 6073
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
6074
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
6075
	if (refclk == 100000)
6076
		intcoeff = 11;
6077
	else if (refclk == 38400)
6078
		intcoeff = 10;
6079
	else
6080
		intcoeff = 9;
6081
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
6082
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6083
 
6084
	/* AFC Recal */
6085
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6086
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6087
			DPIO_AFC_RECAL);
6088
 
3480 Serge 6089
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 6090
}
6091
 
5354 serge 6092
/**
6093
 * vlv_force_pll_on - forcibly enable just the PLL
6094
 * @dev_priv: i915 private structure
6095
 * @pipe: pipe PLL to enable
6096
 * @dpll: PLL configuration
6097
 *
6098
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
6099
 * in cases where we need the PLL enabled even when @pipe is not going to
6100
 * be enabled.
6101
 */
6102
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
6103
		      const struct dpll *dpll)
6104
{
6105
	struct intel_crtc *crtc =
6106
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6107
	struct intel_crtc_config pipe_config = {
6108
		.pixel_multiplier = 1,
6109
		.dpll = *dpll,
6110
	};
6111
 
6112
	if (IS_CHERRYVIEW(dev)) {
6113
		chv_update_pll(crtc, &pipe_config);
6114
		chv_prepare_pll(crtc, &pipe_config);
6115
		chv_enable_pll(crtc, &pipe_config);
6116
	} else {
6117
		vlv_update_pll(crtc, &pipe_config);
6118
		vlv_prepare_pll(crtc, &pipe_config);
6119
		vlv_enable_pll(crtc, &pipe_config);
6120
	}
6121
}
6122
 
6123
/**
6124
 * vlv_force_pll_off - forcibly disable just the PLL
6125
 * @dev_priv: i915 private structure
6126
 * @pipe: pipe PLL to disable
6127
 *
6128
 * Disable the PLL for @pipe. To be used in cases where we need
6129
 * the PLL enabled even when @pipe is not going to be enabled.
6130
 */
6131
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
6132
{
6133
	if (IS_CHERRYVIEW(dev))
6134
		chv_disable_pll(to_i915(dev), pipe);
6135
	else
6136
		vlv_disable_pll(to_i915(dev), pipe);
6137
}
6138
 
3746 Serge 6139
static void i9xx_update_pll(struct intel_crtc *crtc,
6140
			    intel_clock_t *reduced_clock,
3031 serge 6141
			    int num_connectors)
6142
{
3746 Serge 6143
	struct drm_device *dev = crtc->base.dev;
3031 serge 6144
	struct drm_i915_private *dev_priv = dev->dev_private;
6145
	u32 dpll;
6146
	bool is_sdvo;
5354 serge 6147
	struct dpll *clock = &crtc->new_config->dpll;
3031 serge 6148
 
3746 Serge 6149
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 6150
 
5354 serge 6151
	is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
6152
		intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
3031 serge 6153
 
6154
	dpll = DPLL_VGA_MODE_DIS;
6155
 
5354 serge 6156
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
3031 serge 6157
		dpll |= DPLLB_MODE_LVDS;
6158
	else
6159
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 6160
 
4104 Serge 6161
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5354 serge 6162
		dpll |= (crtc->new_config->pixel_multiplier - 1)
3746 Serge 6163
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
2342 Serge 6164
		}
4104 Serge 6165
 
6166
	if (is_sdvo)
6167
		dpll |= DPLL_SDVO_HIGH_SPEED;
6168
 
5354 serge 6169
	if (crtc->new_config->has_dp_encoder)
4104 Serge 6170
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 6171
 
3031 serge 6172
	/* compute bitmask from p1 value */
6173
	if (IS_PINEVIEW(dev))
6174
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
6175
	else {
6176
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6177
		if (IS_G4X(dev) && reduced_clock)
6178
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6179
	}
6180
	switch (clock->p2) {
6181
	case 5:
6182
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6183
		break;
6184
	case 7:
6185
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6186
		break;
6187
	case 10:
6188
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6189
		break;
6190
	case 14:
6191
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6192
		break;
6193
	}
6194
	if (INTEL_INFO(dev)->gen >= 4)
6195
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 6196
 
5354 serge 6197
	if (crtc->new_config->sdvo_tv_clock)
3031 serge 6198
		dpll |= PLL_REF_INPUT_TVCLKINBC;
5354 serge 6199
	else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
3031 serge 6200
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6201
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6202
	else
6203
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 6204
 
3031 serge 6205
	dpll |= DPLL_VCO_ENABLE;
5354 serge 6206
	crtc->new_config->dpll_hw_state.dpll = dpll;
2327 Serge 6207
 
4104 Serge 6208
	if (INTEL_INFO(dev)->gen >= 4) {
5354 serge 6209
		u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
4104 Serge 6210
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 6211
		crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
4104 Serge 6212
	}
3031 serge 6213
}
2327 Serge 6214
 
3746 Serge 6215
static void i8xx_update_pll(struct intel_crtc *crtc,
6216
			    intel_clock_t *reduced_clock,
3031 serge 6217
			    int num_connectors)
6218
{
3746 Serge 6219
	struct drm_device *dev = crtc->base.dev;
3031 serge 6220
	struct drm_i915_private *dev_priv = dev->dev_private;
6221
	u32 dpll;
5354 serge 6222
	struct dpll *clock = &crtc->new_config->dpll;
2327 Serge 6223
 
3746 Serge 6224
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 6225
 
3031 serge 6226
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 6227
 
5354 serge 6228
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3031 serge 6229
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6230
	} else {
6231
		if (clock->p1 == 2)
6232
			dpll |= PLL_P1_DIVIDE_BY_TWO;
6233
		else
6234
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6235
		if (clock->p2 == 4)
6236
			dpll |= PLL_P2_DIVIDE_BY_4;
6237
	}
2327 Serge 6238
 
5354 serge 6239
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
4104 Serge 6240
		dpll |= DPLL_DVO_2X_MODE;
6241
 
5354 serge 6242
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
3031 serge 6243
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6244
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6245
	else
6246
		dpll |= PLL_REF_INPUT_DREFCLK;
6247
 
6248
	dpll |= DPLL_VCO_ENABLE;
5354 serge 6249
	crtc->new_config->dpll_hw_state.dpll = dpll;
3031 serge 6250
}
6251
 
4104 Serge 6252
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 6253
{
6254
	struct drm_device *dev = intel_crtc->base.dev;
6255
	struct drm_i915_private *dev_priv = dev->dev_private;
6256
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 6257
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4104 Serge 6258
	struct drm_display_mode *adjusted_mode =
6259
		&intel_crtc->config.adjusted_mode;
5060 serge 6260
	uint32_t crtc_vtotal, crtc_vblank_end;
6261
	int vsyncshift = 0;
3243 Serge 6262
 
4104 Serge 6263
	/* We need to be careful not to changed the adjusted mode, for otherwise
6264
	 * the hw state checker will get angry at the mismatch. */
6265
	crtc_vtotal = adjusted_mode->crtc_vtotal;
6266
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6267
 
5060 serge 6268
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3243 Serge 6269
		/* the chip adds 2 halflines automatically */
4104 Serge 6270
		crtc_vtotal -= 1;
6271
		crtc_vblank_end -= 1;
5060 serge 6272
 
5354 serge 6273
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
5060 serge 6274
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6275
		else
6276
			vsyncshift = adjusted_mode->crtc_hsync_start -
6277
				adjusted_mode->crtc_htotal / 2;
6278
		if (vsyncshift < 0)
6279
			vsyncshift += adjusted_mode->crtc_htotal;
3243 Serge 6280
	}
6281
 
6282
	if (INTEL_INFO(dev)->gen > 3)
6283
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6284
 
6285
	I915_WRITE(HTOTAL(cpu_transcoder),
6286
		   (adjusted_mode->crtc_hdisplay - 1) |
6287
		   ((adjusted_mode->crtc_htotal - 1) << 16));
6288
	I915_WRITE(HBLANK(cpu_transcoder),
6289
		   (adjusted_mode->crtc_hblank_start - 1) |
6290
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6291
	I915_WRITE(HSYNC(cpu_transcoder),
6292
		   (adjusted_mode->crtc_hsync_start - 1) |
6293
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6294
 
6295
	I915_WRITE(VTOTAL(cpu_transcoder),
6296
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 6297
		   ((crtc_vtotal - 1) << 16));
3243 Serge 6298
	I915_WRITE(VBLANK(cpu_transcoder),
6299
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 6300
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 6301
	I915_WRITE(VSYNC(cpu_transcoder),
6302
		   (adjusted_mode->crtc_vsync_start - 1) |
6303
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6304
 
6305
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6306
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6307
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
6308
	 * bits. */
6309
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
6310
	    (pipe == PIPE_B || pipe == PIPE_C))
6311
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6312
 
6313
	/* pipesrc controls the size that is scaled from, which should
6314
	 * always be the user's requested size.
6315
	 */
6316
	I915_WRITE(PIPESRC(pipe),
4560 Serge 6317
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
6318
		   (intel_crtc->config.pipe_src_h - 1));
3243 Serge 6319
}
6320
 
4104 Serge 6321
static void intel_get_pipe_timings(struct intel_crtc *crtc,
6322
				   struct intel_crtc_config *pipe_config)
6323
{
6324
	struct drm_device *dev = crtc->base.dev;
6325
	struct drm_i915_private *dev_priv = dev->dev_private;
6326
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6327
	uint32_t tmp;
6328
 
6329
	tmp = I915_READ(HTOTAL(cpu_transcoder));
6330
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6331
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6332
	tmp = I915_READ(HBLANK(cpu_transcoder));
6333
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6334
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6335
	tmp = I915_READ(HSYNC(cpu_transcoder));
6336
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6337
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6338
 
6339
	tmp = I915_READ(VTOTAL(cpu_transcoder));
6340
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6341
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6342
	tmp = I915_READ(VBLANK(cpu_transcoder));
6343
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6344
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6345
	tmp = I915_READ(VSYNC(cpu_transcoder));
6346
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6347
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6348
 
6349
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6350
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6351
		pipe_config->adjusted_mode.crtc_vtotal += 1;
6352
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
6353
	}
6354
 
6355
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 6356
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6357
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6358
 
6359
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
6360
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 6361
}
6362
 
5060 serge 6363
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
4104 Serge 6364
					     struct intel_crtc_config *pipe_config)
6365
{
5060 serge 6366
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
6367
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
6368
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
6369
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4104 Serge 6370
 
5060 serge 6371
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
6372
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
6373
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
6374
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4104 Serge 6375
 
5060 serge 6376
	mode->flags = pipe_config->adjusted_mode.flags;
4104 Serge 6377
 
5060 serge 6378
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
6379
	mode->flags |= pipe_config->adjusted_mode.flags;
4104 Serge 6380
}
6381
 
3746 Serge 6382
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6383
{
6384
	struct drm_device *dev = intel_crtc->base.dev;
6385
	struct drm_i915_private *dev_priv = dev->dev_private;
6386
	uint32_t pipeconf;
6387
 
4104 Serge 6388
	pipeconf = 0;
3746 Serge 6389
 
5354 serge 6390
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
6391
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
6392
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
4104 Serge 6393
 
4560 Serge 6394
	if (intel_crtc->config.double_wide)
3746 Serge 6395
			pipeconf |= PIPECONF_DOUBLE_WIDE;
6396
 
4104 Serge 6397
	/* only g4x and later have fancy bpc/dither controls */
6398
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6399
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6400
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6401
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 6402
				    PIPECONF_DITHER_TYPE_SP;
6403
 
4104 Serge 6404
		switch (intel_crtc->config.pipe_bpp) {
6405
		case 18:
6406
			pipeconf |= PIPECONF_6BPC;
6407
			break;
6408
		case 24:
6409
			pipeconf |= PIPECONF_8BPC;
6410
			break;
6411
		case 30:
6412
			pipeconf |= PIPECONF_10BPC;
6413
			break;
6414
		default:
6415
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6416
			BUG();
3746 Serge 6417
		}
6418
	}
6419
 
6420
	if (HAS_PIPE_CXSR(dev)) {
6421
		if (intel_crtc->lowfreq_avail) {
6422
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6423
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6424
		} else {
6425
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6426
		}
6427
	}
6428
 
5060 serge 6429
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6430
		if (INTEL_INFO(dev)->gen < 4 ||
5354 serge 6431
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3746 Serge 6432
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6433
	else
5060 serge 6434
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6435
	} else
3746 Serge 6436
		pipeconf |= PIPECONF_PROGRESSIVE;
6437
 
4104 Serge 6438
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
3746 Serge 6439
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6440
 
6441
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6442
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6443
}
6444
 
5354 serge 6445
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
3031 serge 6446
{
5354 serge 6447
	struct drm_device *dev = crtc->base.dev;
3031 serge 6448
	struct drm_i915_private *dev_priv = dev->dev_private;
6449
	int refclk, num_connectors = 0;
6450
	intel_clock_t clock, reduced_clock;
4104 Serge 6451
	bool ok, has_reduced_clock = false;
4560 Serge 6452
	bool is_lvds = false, is_dsi = false;
3031 serge 6453
	struct intel_encoder *encoder;
6454
	const intel_limit_t *limit;
6455
 
5354 serge 6456
	for_each_intel_encoder(dev, encoder) {
6457
		if (encoder->new_crtc != crtc)
6458
			continue;
6459
 
3031 serge 6460
		switch (encoder->type) {
6461
		case INTEL_OUTPUT_LVDS:
6462
			is_lvds = true;
6463
			break;
4560 Serge 6464
		case INTEL_OUTPUT_DSI:
6465
			is_dsi = true;
6466
			break;
5354 serge 6467
		default:
6468
			break;
3031 serge 6469
		}
6470
 
6471
		num_connectors++;
6472
	}
6473
 
4560 Serge 6474
	if (is_dsi)
5060 serge 6475
		return 0;
4560 Serge 6476
 
5354 serge 6477
	if (!crtc->new_config->clock_set) {
3031 serge 6478
	refclk = i9xx_get_refclk(crtc, num_connectors);
6479
 
6480
	/*
4560 Serge 6481
		 * Returns a set of divisors for the desired target clock with
6482
		 * the given refclk, or FALSE.  The returned values represent
6483
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6484
		 * 2) / p1 / p2.
3031 serge 6485
	 */
6486
	limit = intel_limit(crtc, refclk);
4104 Serge 6487
	ok = dev_priv->display.find_dpll(limit, crtc,
5354 serge 6488
						 crtc->new_config->port_clock,
4104 Serge 6489
					 refclk, NULL, &clock);
4560 Serge 6490
		if (!ok) {
3031 serge 6491
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6492
		return -EINVAL;
6493
	}
6494
 
6495
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6496
		/*
4560 Serge 6497
			 * Ensure we match the reduced clock's P to the target
6498
			 * clock.  If the clocks don't match, we can't switch
6499
			 * the display clock by using the FP0/FP1. In such case
6500
			 * we will disable the LVDS downclock feature.
3031 serge 6501
		*/
4104 Serge 6502
		has_reduced_clock =
6503
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 6504
						    dev_priv->lvds_downclock,
4104 Serge 6505
						    refclk, &clock,
3031 serge 6506
						    &reduced_clock);
6507
	}
3746 Serge 6508
	/* Compat-code for transition, will disappear. */
5354 serge 6509
		crtc->new_config->dpll.n = clock.n;
6510
		crtc->new_config->dpll.m1 = clock.m1;
6511
		crtc->new_config->dpll.m2 = clock.m2;
6512
		crtc->new_config->dpll.p1 = clock.p1;
6513
		crtc->new_config->dpll.p2 = clock.p2;
3746 Serge 6514
	}
3031 serge 6515
 
4560 Serge 6516
	if (IS_GEN2(dev)) {
5354 serge 6517
		i8xx_update_pll(crtc,
3243 Serge 6518
				has_reduced_clock ? &reduced_clock : NULL,
6519
				num_connectors);
5060 serge 6520
	} else if (IS_CHERRYVIEW(dev)) {
5354 serge 6521
		chv_update_pll(crtc, crtc->new_config);
4560 Serge 6522
	} else if (IS_VALLEYVIEW(dev)) {
5354 serge 6523
		vlv_update_pll(crtc, crtc->new_config);
4560 Serge 6524
	} else {
5354 serge 6525
		i9xx_update_pll(crtc,
3031 serge 6526
				has_reduced_clock ? &reduced_clock : NULL,
6527
				num_connectors);
4560 Serge 6528
	}
3031 serge 6529
 
5060 serge 6530
	return 0;
2327 Serge 6531
}
6532
 
4104 Serge 6533
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6534
				 struct intel_crtc_config *pipe_config)
6535
{
6536
	struct drm_device *dev = crtc->base.dev;
6537
	struct drm_i915_private *dev_priv = dev->dev_private;
6538
	uint32_t tmp;
6539
 
4560 Serge 6540
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6541
		return;
6542
 
4104 Serge 6543
	tmp = I915_READ(PFIT_CONTROL);
6544
	if (!(tmp & PFIT_ENABLE))
6545
		return;
6546
 
6547
	/* Check whether the pfit is attached to our pipe. */
6548
	if (INTEL_INFO(dev)->gen < 4) {
6549
		if (crtc->pipe != PIPE_B)
6550
			return;
6551
	} else {
6552
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6553
			return;
6554
	}
6555
 
6556
	pipe_config->gmch_pfit.control = tmp;
6557
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6558
	if (INTEL_INFO(dev)->gen < 5)
6559
		pipe_config->gmch_pfit.lvds_border_bits =
6560
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6561
}
6562
 
4398 Serge 6563
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6564
			       struct intel_crtc_config *pipe_config)
6565
{
6566
	struct drm_device *dev = crtc->base.dev;
6567
	struct drm_i915_private *dev_priv = dev->dev_private;
6568
	int pipe = pipe_config->cpu_transcoder;
6569
	intel_clock_t clock;
6570
	u32 mdiv;
6571
	int refclk = 100000;
6572
 
5060 serge 6573
	/* In case of MIPI DPLL will not even be used */
6574
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6575
		return;
6576
 
4398 Serge 6577
	mutex_lock(&dev_priv->dpio_lock);
4560 Serge 6578
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4398 Serge 6579
	mutex_unlock(&dev_priv->dpio_lock);
6580
 
6581
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6582
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6583
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6584
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6585
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6586
 
4560 Serge 6587
	vlv_clock(refclk, &clock);
4398 Serge 6588
 
4560 Serge 6589
	/* clock.dot is the fast clock */
6590
	pipe_config->port_clock = clock.dot / 5;
4398 Serge 6591
}
6592
 
5060 serge 6593
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6594
				  struct intel_plane_config *plane_config)
6595
{
6596
	struct drm_device *dev = crtc->base.dev;
6597
	struct drm_i915_private *dev_priv = dev->dev_private;
6598
	u32 val, base, offset;
6599
	int pipe = crtc->pipe, plane = crtc->plane;
6600
	int fourcc, pixel_format;
6601
	int aligned_height;
6602
 
6603
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6604
	if (!crtc->base.primary->fb) {
6605
		DRM_DEBUG_KMS("failed to alloc fb\n");
6606
		return;
6607
	}
6608
 
6609
	val = I915_READ(DSPCNTR(plane));
6610
 
6611
	if (INTEL_INFO(dev)->gen >= 4)
6612
		if (val & DISPPLANE_TILED)
6613
			plane_config->tiled = true;
6614
 
6615
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6616
	fourcc = intel_format_to_fourcc(pixel_format);
6617
	crtc->base.primary->fb->pixel_format = fourcc;
6618
	crtc->base.primary->fb->bits_per_pixel =
6619
		drm_format_plane_cpp(fourcc, 0) * 8;
6620
 
6621
	if (INTEL_INFO(dev)->gen >= 4) {
6622
		if (plane_config->tiled)
6623
			offset = I915_READ(DSPTILEOFF(plane));
6624
		else
6625
			offset = I915_READ(DSPLINOFF(plane));
6626
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6627
	} else {
6628
		base = I915_READ(DSPADDR(plane));
6629
	}
6630
	plane_config->base = base;
6631
 
6632
	val = I915_READ(PIPESRC(pipe));
6633
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6634
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6635
 
6636
	val = I915_READ(DSPSTRIDE(pipe));
5354 serge 6637
	crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
5060 serge 6638
 
6639
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6640
					    plane_config->tiled);
6641
 
5367 serge 6642
	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
6643
					aligned_height);
5060 serge 6644
 
6645
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6646
		      pipe, plane, crtc->base.primary->fb->width,
6647
		      crtc->base.primary->fb->height,
6648
		      crtc->base.primary->fb->bits_per_pixel, base,
6649
		      crtc->base.primary->fb->pitches[0],
6650
		      plane_config->size);
6651
 
6652
}
6653
 
6654
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6655
			       struct intel_crtc_config *pipe_config)
6656
{
6657
	struct drm_device *dev = crtc->base.dev;
6658
	struct drm_i915_private *dev_priv = dev->dev_private;
6659
	int pipe = pipe_config->cpu_transcoder;
6660
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6661
	intel_clock_t clock;
6662
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6663
	int refclk = 100000;
6664
 
6665
	mutex_lock(&dev_priv->dpio_lock);
6666
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6667
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6668
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6669
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6670
	mutex_unlock(&dev_priv->dpio_lock);
6671
 
6672
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6673
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6674
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6675
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6676
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6677
 
6678
	chv_clock(refclk, &clock);
6679
 
6680
	/* clock.dot is the fast clock */
6681
	pipe_config->port_clock = clock.dot / 5;
6682
}
6683
 
3746 Serge 6684
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6685
				 struct intel_crtc_config *pipe_config)
6686
{
6687
	struct drm_device *dev = crtc->base.dev;
6688
	struct drm_i915_private *dev_priv = dev->dev_private;
6689
	uint32_t tmp;
6690
 
5354 serge 6691
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 6692
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6693
		return false;
6694
 
4104 Serge 6695
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6696
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6697
 
3746 Serge 6698
	tmp = I915_READ(PIPECONF(crtc->pipe));
6699
	if (!(tmp & PIPECONF_ENABLE))
6700
		return false;
6701
 
4280 Serge 6702
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6703
		switch (tmp & PIPECONF_BPC_MASK) {
6704
		case PIPECONF_6BPC:
6705
			pipe_config->pipe_bpp = 18;
6706
			break;
6707
		case PIPECONF_8BPC:
6708
			pipe_config->pipe_bpp = 24;
6709
			break;
6710
		case PIPECONF_10BPC:
6711
			pipe_config->pipe_bpp = 30;
6712
			break;
6713
		default:
6714
			break;
6715
		}
6716
	}
6717
 
5060 serge 6718
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6719
		pipe_config->limited_color_range = true;
6720
 
4560 Serge 6721
	if (INTEL_INFO(dev)->gen < 4)
6722
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6723
 
4104 Serge 6724
	intel_get_pipe_timings(crtc, pipe_config);
6725
 
6726
	i9xx_get_pfit_config(crtc, pipe_config);
6727
 
6728
	if (INTEL_INFO(dev)->gen >= 4) {
6729
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6730
		pipe_config->pixel_multiplier =
6731
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6732
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6733
		pipe_config->dpll_hw_state.dpll_md = tmp;
6734
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6735
		tmp = I915_READ(DPLL(crtc->pipe));
6736
		pipe_config->pixel_multiplier =
6737
			((tmp & SDVO_MULTIPLIER_MASK)
6738
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6739
	} else {
6740
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6741
		 * port and will be fixed up in the encoder->get_config
6742
		 * function. */
6743
		pipe_config->pixel_multiplier = 1;
6744
	}
6745
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6746
	if (!IS_VALLEYVIEW(dev)) {
5354 serge 6747
		/*
6748
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
6749
		 * on 830. Filter it out here so that we don't
6750
		 * report errors due to that.
6751
		 */
6752
		if (IS_I830(dev))
6753
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
6754
 
4104 Serge 6755
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6756
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6757
	} else {
6758
		/* Mask out read-only status bits. */
6759
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6760
						     DPLL_PORTC_READY_MASK |
6761
						     DPLL_PORTB_READY_MASK);
6762
	}
6763
 
5060 serge 6764
	if (IS_CHERRYVIEW(dev))
6765
		chv_crtc_clock_get(crtc, pipe_config);
6766
	else if (IS_VALLEYVIEW(dev))
4560 Serge 6767
		vlv_crtc_clock_get(crtc, pipe_config);
6768
	else
6769
		i9xx_crtc_clock_get(crtc, pipe_config);
6770
 
3746 Serge 6771
	return true;
6772
}
6773
 
3243 Serge 6774
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 6775
{
6776
	struct drm_i915_private *dev_priv = dev->dev_private;
6777
	struct intel_encoder *encoder;
3746 Serge 6778
	u32 val, final;
2327 Serge 6779
	bool has_lvds = false;
2342 Serge 6780
	bool has_cpu_edp = false;
6781
	bool has_panel = false;
6782
	bool has_ck505 = false;
6783
	bool can_ssc = false;
2327 Serge 6784
 
6785
	/* We need to take the global config into account */
5354 serge 6786
	for_each_intel_encoder(dev, encoder) {
2327 Serge 6787
			switch (encoder->type) {
6788
			case INTEL_OUTPUT_LVDS:
2342 Serge 6789
			has_panel = true;
2327 Serge 6790
				has_lvds = true;
2342 Serge 6791
			break;
2327 Serge 6792
			case INTEL_OUTPUT_EDP:
2342 Serge 6793
			has_panel = true;
4104 Serge 6794
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 6795
				has_cpu_edp = true;
2327 Serge 6796
				break;
5354 serge 6797
		default:
6798
			break;
2327 Serge 6799
			}
6800
		}
2342 Serge 6801
 
6802
	if (HAS_PCH_IBX(dev)) {
4104 Serge 6803
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 6804
		can_ssc = has_ck505;
6805
	} else {
6806
		has_ck505 = false;
6807
		can_ssc = true;
2327 Serge 6808
	}
6809
 
4104 Serge 6810
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6811
		      has_panel, has_lvds, has_ck505);
2342 Serge 6812
 
2327 Serge 6813
	/* Ironlake: try to setup display ref clock before DPLL
6814
	 * enabling. This is only under driver's control after
6815
	 * PCH B stepping, previous chipset stepping should be
6816
	 * ignoring this setting.
6817
	 */
3746 Serge 6818
	val = I915_READ(PCH_DREF_CONTROL);
6819
 
6820
	/* As we must carefully and slowly disable/enable each source in turn,
6821
	 * compute the final state we want first and check if we need to
6822
	 * make any changes at all.
6823
	 */
6824
	final = val;
6825
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6826
	if (has_ck505)
6827
		final |= DREF_NONSPREAD_CK505_ENABLE;
6828
	else
6829
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6830
 
6831
	final &= ~DREF_SSC_SOURCE_MASK;
6832
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6833
	final &= ~DREF_SSC1_ENABLE;
6834
 
6835
	if (has_panel) {
6836
		final |= DREF_SSC_SOURCE_ENABLE;
6837
 
6838
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6839
			final |= DREF_SSC1_ENABLE;
6840
 
6841
		if (has_cpu_edp) {
6842
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6843
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6844
			else
6845
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6846
		} else
6847
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6848
	} else {
6849
		final |= DREF_SSC_SOURCE_DISABLE;
6850
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6851
	}
6852
 
6853
	if (final == val)
6854
		return;
6855
 
2327 Serge 6856
	/* Always enable nonspread source */
3746 Serge 6857
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 6858
 
6859
	if (has_ck505)
3746 Serge 6860
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 6861
	else
3746 Serge 6862
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 6863
 
6864
	if (has_panel) {
3746 Serge 6865
		val &= ~DREF_SSC_SOURCE_MASK;
6866
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 6867
 
2342 Serge 6868
		/* SSC must be turned on before enabling the CPU output  */
6869
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6870
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 6871
			val |= DREF_SSC1_ENABLE;
3031 serge 6872
		} else
3746 Serge 6873
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 6874
 
2342 Serge 6875
		/* Get SSC going before enabling the outputs */
3746 Serge 6876
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 6877
			POSTING_READ(PCH_DREF_CONTROL);
6878
			udelay(200);
2342 Serge 6879
 
3746 Serge 6880
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 6881
 
6882
		/* Enable CPU source on CPU attached eDP */
2342 Serge 6883
		if (has_cpu_edp) {
6884
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6885
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 6886
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5060 serge 6887
			} else
3746 Serge 6888
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 6889
		} else
3746 Serge 6890
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 6891
 
3746 Serge 6892
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 6893
		POSTING_READ(PCH_DREF_CONTROL);
6894
		udelay(200);
2327 Serge 6895
		} else {
2342 Serge 6896
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6897
 
3746 Serge 6898
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 6899
 
6900
		/* Turn off CPU output */
3746 Serge 6901
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 6902
 
3746 Serge 6903
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 6904
		POSTING_READ(PCH_DREF_CONTROL);
6905
		udelay(200);
2342 Serge 6906
 
6907
		/* Turn off the SSC source */
3746 Serge 6908
		val &= ~DREF_SSC_SOURCE_MASK;
6909
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 6910
 
6911
		/* Turn off SSC1 */
3746 Serge 6912
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 6913
 
3746 Serge 6914
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 6915
		POSTING_READ(PCH_DREF_CONTROL);
6916
		udelay(200);
2327 Serge 6917
	}
3746 Serge 6918
 
6919
	BUG_ON(val != final);
2327 Serge 6920
}
6921
 
4104 Serge 6922
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 6923
{
4104 Serge 6924
	uint32_t tmp;
3243 Serge 6925
 
6926
		tmp = I915_READ(SOUTH_CHICKEN2);
6927
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6928
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6929
 
6930
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6931
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6932
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6933
 
6934
		tmp = I915_READ(SOUTH_CHICKEN2);
6935
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6936
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6937
 
6938
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 6939
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
3243 Serge 6940
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 6941
}
3243 Serge 6942
 
4104 Serge 6943
/* WaMPhyProgramming:hsw */
6944
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6945
{
6946
	uint32_t tmp;
6947
 
3243 Serge 6948
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6949
	tmp &= ~(0xFF << 24);
6950
	tmp |= (0x12 << 24);
6951
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6952
 
6953
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6954
	tmp |= (1 << 11);
6955
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6956
 
6957
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6958
	tmp |= (1 << 11);
6959
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6960
 
6961
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6962
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6963
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6964
 
6965
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6966
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6967
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6968
 
6969
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6970
		tmp &= ~(7 << 13);
6971
		tmp |= (5 << 13);
6972
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6973
 
6974
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6975
		tmp &= ~(7 << 13);
6976
		tmp |= (5 << 13);
6977
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6978
 
6979
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6980
	tmp &= ~0xFF;
6981
	tmp |= 0x1C;
6982
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6983
 
6984
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6985
	tmp &= ~0xFF;
6986
	tmp |= 0x1C;
6987
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6988
 
6989
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6990
	tmp &= ~(0xFF << 16);
6991
	tmp |= (0x1C << 16);
6992
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6993
 
6994
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6995
	tmp &= ~(0xFF << 16);
6996
	tmp |= (0x1C << 16);
6997
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6998
 
6999
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
7000
		tmp |= (1 << 27);
7001
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
7002
 
7003
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
7004
		tmp |= (1 << 27);
7005
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
7006
 
7007
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
7008
		tmp &= ~(0xF << 28);
7009
		tmp |= (4 << 28);
7010
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
7011
 
7012
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
7013
		tmp &= ~(0xF << 28);
7014
		tmp |= (4 << 28);
7015
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 7016
}
3243 Serge 7017
 
4104 Serge 7018
/* Implements 3 different sequences from BSpec chapter "Display iCLK
7019
 * Programming" based on the parameters passed:
7020
 * - Sequence to enable CLKOUT_DP
7021
 * - Sequence to enable CLKOUT_DP without spread
7022
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
7023
 */
7024
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
7025
				 bool with_fdi)
7026
{
7027
	struct drm_i915_private *dev_priv = dev->dev_private;
7028
	uint32_t reg, tmp;
3480 Serge 7029
 
4104 Serge 7030
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
7031
		with_spread = true;
7032
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
7033
		 with_fdi, "LP PCH doesn't have FDI\n"))
7034
		with_fdi = false;
7035
 
7036
	mutex_lock(&dev_priv->dpio_lock);
7037
 
7038
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7039
	tmp &= ~SBI_SSCCTL_DISABLE;
7040
	tmp |= SBI_SSCCTL_PATHALT;
7041
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7042
 
7043
	udelay(24);
7044
 
7045
	if (with_spread) {
7046
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7047
		tmp &= ~SBI_SSCCTL_PATHALT;
7048
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7049
 
7050
		if (with_fdi) {
7051
			lpt_reset_fdi_mphy(dev_priv);
7052
			lpt_program_fdi_mphy(dev_priv);
7053
		}
7054
	}
7055
 
7056
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7057
	       SBI_GEN0 : SBI_DBUFF0;
7058
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7059
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7060
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7061
 
3480 Serge 7062
	mutex_unlock(&dev_priv->dpio_lock);
3243 Serge 7063
}
7064
 
4104 Serge 7065
/* Sequence to disable CLKOUT_DP */
7066
static void lpt_disable_clkout_dp(struct drm_device *dev)
7067
{
7068
	struct drm_i915_private *dev_priv = dev->dev_private;
7069
	uint32_t reg, tmp;
7070
 
7071
	mutex_lock(&dev_priv->dpio_lock);
7072
 
7073
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7074
	       SBI_GEN0 : SBI_DBUFF0;
7075
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7076
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7077
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7078
 
7079
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7080
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
7081
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
7082
			tmp |= SBI_SSCCTL_PATHALT;
7083
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7084
			udelay(32);
7085
		}
7086
		tmp |= SBI_SSCCTL_DISABLE;
7087
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7088
	}
7089
 
7090
	mutex_unlock(&dev_priv->dpio_lock);
7091
}
7092
 
7093
static void lpt_init_pch_refclk(struct drm_device *dev)
7094
{
7095
	struct intel_encoder *encoder;
7096
	bool has_vga = false;
7097
 
5354 serge 7098
	for_each_intel_encoder(dev, encoder) {
4104 Serge 7099
		switch (encoder->type) {
7100
		case INTEL_OUTPUT_ANALOG:
7101
			has_vga = true;
7102
			break;
5354 serge 7103
		default:
7104
			break;
4104 Serge 7105
		}
7106
	}
7107
 
7108
	if (has_vga)
7109
		lpt_enable_clkout_dp(dev, true, true);
7110
	else
7111
		lpt_disable_clkout_dp(dev);
7112
}
7113
 
3243 Serge 7114
/*
7115
 * Initialize reference clocks when the driver loads
7116
 */
7117
void intel_init_pch_refclk(struct drm_device *dev)
7118
{
7119
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
7120
		ironlake_init_pch_refclk(dev);
7121
	else if (HAS_PCH_LPT(dev))
7122
		lpt_init_pch_refclk(dev);
7123
}
7124
 
2342 Serge 7125
static int ironlake_get_refclk(struct drm_crtc *crtc)
7126
{
7127
	struct drm_device *dev = crtc->dev;
7128
	struct drm_i915_private *dev_priv = dev->dev_private;
7129
	struct intel_encoder *encoder;
7130
	int num_connectors = 0;
7131
	bool is_lvds = false;
7132
 
5354 serge 7133
	for_each_intel_encoder(dev, encoder) {
7134
		if (encoder->new_crtc != to_intel_crtc(crtc))
7135
			continue;
7136
 
2342 Serge 7137
		switch (encoder->type) {
7138
		case INTEL_OUTPUT_LVDS:
7139
			is_lvds = true;
7140
			break;
5354 serge 7141
		default:
7142
			break;
2342 Serge 7143
		}
7144
		num_connectors++;
7145
	}
7146
 
7147
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 7148
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 7149
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 7150
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 7151
	}
7152
 
7153
	return 120000;
7154
}
7155
 
4104 Serge 7156
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 7157
{
7158
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
7159
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7160
	int pipe = intel_crtc->pipe;
7161
	uint32_t val;
7162
 
4104 Serge 7163
	val = 0;
3031 serge 7164
 
3746 Serge 7165
	switch (intel_crtc->config.pipe_bpp) {
3031 serge 7166
	case 18:
3480 Serge 7167
		val |= PIPECONF_6BPC;
3031 serge 7168
		break;
7169
	case 24:
3480 Serge 7170
		val |= PIPECONF_8BPC;
3031 serge 7171
		break;
7172
	case 30:
3480 Serge 7173
		val |= PIPECONF_10BPC;
3031 serge 7174
		break;
7175
	case 36:
3480 Serge 7176
		val |= PIPECONF_12BPC;
3031 serge 7177
		break;
7178
	default:
3243 Serge 7179
		/* Case prevented by intel_choose_pipe_bpp_dither. */
7180
		BUG();
3031 serge 7181
	}
7182
 
4104 Serge 7183
	if (intel_crtc->config.dither)
3031 serge 7184
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7185
 
4104 Serge 7186
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 7187
		val |= PIPECONF_INTERLACED_ILK;
7188
	else
7189
		val |= PIPECONF_PROGRESSIVE;
7190
 
3746 Serge 7191
	if (intel_crtc->config.limited_color_range)
3480 Serge 7192
		val |= PIPECONF_COLOR_RANGE_SELECT;
7193
 
3031 serge 7194
	I915_WRITE(PIPECONF(pipe), val);
7195
	POSTING_READ(PIPECONF(pipe));
7196
}
7197
 
3480 Serge 7198
/*
7199
 * Set up the pipe CSC unit.
7200
 *
7201
 * Currently only full range RGB to limited range RGB conversion
7202
 * is supported, but eventually this should handle various
7203
 * RGB<->YCbCr scenarios as well.
7204
 */
3746 Serge 7205
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 7206
{
7207
	struct drm_device *dev = crtc->dev;
7208
	struct drm_i915_private *dev_priv = dev->dev_private;
7209
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7210
	int pipe = intel_crtc->pipe;
7211
	uint16_t coeff = 0x7800; /* 1.0 */
7212
 
7213
	/*
7214
	 * TODO: Check what kind of values actually come out of the pipe
7215
	 * with these coeff/postoff values and adjust to get the best
7216
	 * accuracy. Perhaps we even need to take the bpc value into
7217
	 * consideration.
7218
	 */
7219
 
3746 Serge 7220
	if (intel_crtc->config.limited_color_range)
3480 Serge 7221
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
7222
 
7223
	/*
7224
	 * GY/GU and RY/RU should be the other way around according
7225
	 * to BSpec, but reality doesn't agree. Just set them up in
7226
	 * a way that results in the correct picture.
7227
	 */
7228
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
7229
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
7230
 
7231
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
7232
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
7233
 
7234
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
7235
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
7236
 
7237
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
7238
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
7239
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
7240
 
7241
	if (INTEL_INFO(dev)->gen > 6) {
7242
		uint16_t postoff = 0;
7243
 
3746 Serge 7244
		if (intel_crtc->config.limited_color_range)
4398 Serge 7245
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 7246
 
7247
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
7248
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
7249
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
7250
 
7251
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
7252
	} else {
7253
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
7254
 
3746 Serge 7255
		if (intel_crtc->config.limited_color_range)
3480 Serge 7256
			mode |= CSC_BLACK_SCREEN_OFFSET;
7257
 
7258
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
7259
	}
7260
}
7261
 
4104 Serge 7262
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 7263
{
4560 Serge 7264
	struct drm_device *dev = crtc->dev;
7265
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 7266
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 7267
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 7268
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 7269
	uint32_t val;
7270
 
4104 Serge 7271
	val = 0;
3243 Serge 7272
 
4560 Serge 7273
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
3243 Serge 7274
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7275
 
4104 Serge 7276
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 7277
		val |= PIPECONF_INTERLACED_ILK;
7278
	else
7279
		val |= PIPECONF_PROGRESSIVE;
7280
 
7281
	I915_WRITE(PIPECONF(cpu_transcoder), val);
7282
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 7283
 
7284
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7285
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 7286
 
5354 serge 7287
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
4560 Serge 7288
		val = 0;
7289
 
7290
		switch (intel_crtc->config.pipe_bpp) {
7291
		case 18:
7292
			val |= PIPEMISC_DITHER_6_BPC;
7293
			break;
7294
		case 24:
7295
			val |= PIPEMISC_DITHER_8_BPC;
7296
			break;
7297
		case 30:
7298
			val |= PIPEMISC_DITHER_10_BPC;
7299
			break;
7300
		case 36:
7301
			val |= PIPEMISC_DITHER_12_BPC;
7302
			break;
7303
		default:
7304
			/* Case prevented by pipe_config_set_bpp. */
7305
			BUG();
7306
		}
7307
 
7308
		if (intel_crtc->config.dither)
7309
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7310
 
7311
		I915_WRITE(PIPEMISC(pipe), val);
7312
	}
3243 Serge 7313
}
7314
 
3031 serge 7315
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7316
				    intel_clock_t *clock,
7317
				    bool *has_reduced_clock,
7318
				    intel_clock_t *reduced_clock)
7319
{
7320
	struct drm_device *dev = crtc->dev;
7321
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 7322
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 7323
	int refclk;
7324
	const intel_limit_t *limit;
4104 Serge 7325
	bool ret, is_lvds = false;
3031 serge 7326
 
5354 serge 7327
	is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
3031 serge 7328
 
7329
	refclk = ironlake_get_refclk(crtc);
7330
 
7331
	/*
7332
	 * Returns a set of divisors for the desired target clock with the given
7333
	 * refclk, or FALSE.  The returned values represent the clock equation:
7334
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7335
	 */
5354 serge 7336
	limit = intel_limit(intel_crtc, refclk);
7337
	ret = dev_priv->display.find_dpll(limit, intel_crtc,
7338
					  intel_crtc->new_config->port_clock,
4104 Serge 7339
					  refclk, NULL, clock);
3031 serge 7340
	if (!ret)
7341
		return false;
7342
 
7343
	if (is_lvds && dev_priv->lvds_downclock_avail) {
7344
		/*
7345
		 * Ensure we match the reduced clock's P to the target clock.
7346
		 * If the clocks don't match, we can't switch the display clock
7347
		 * by using the FP0/FP1. In such case we will disable the LVDS
7348
		 * downclock feature.
7349
		*/
4104 Serge 7350
		*has_reduced_clock =
5354 serge 7351
			dev_priv->display.find_dpll(limit, intel_crtc,
3031 serge 7352
						     dev_priv->lvds_downclock,
4104 Serge 7353
						    refclk, clock,
3031 serge 7354
						     reduced_clock);
7355
	}
7356
 
7357
	return true;
7358
}
7359
 
3243 Serge 7360
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
7361
{
7362
	/*
7363
	 * Account for spread spectrum to avoid
7364
	 * oversubscribing the link. Max center spread
7365
	 * is 2.5%; use 5% for safety's sake.
7366
	 */
7367
	u32 bps = target_clock * bpp * 21 / 20;
5060 serge 7368
	return DIV_ROUND_UP(bps, link_bw * 8);
3243 Serge 7369
}
7370
 
4104 Serge 7371
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 7372
{
4104 Serge 7373
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 7374
}
7375
 
3243 Serge 7376
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
4104 Serge 7377
				      u32 *fp,
3746 Serge 7378
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 7379
{
7380
	struct drm_crtc *crtc = &intel_crtc->base;
7381
	struct drm_device *dev = crtc->dev;
7382
	struct drm_i915_private *dev_priv = dev->dev_private;
7383
	struct intel_encoder *intel_encoder;
7384
	uint32_t dpll;
3746 Serge 7385
	int factor, num_connectors = 0;
4104 Serge 7386
	bool is_lvds = false, is_sdvo = false;
3243 Serge 7387
 
5354 serge 7388
	for_each_intel_encoder(dev, intel_encoder) {
7389
		if (intel_encoder->new_crtc != to_intel_crtc(crtc))
7390
			continue;
7391
 
3243 Serge 7392
		switch (intel_encoder->type) {
7393
		case INTEL_OUTPUT_LVDS:
7394
			is_lvds = true;
7395
			break;
7396
		case INTEL_OUTPUT_SDVO:
7397
		case INTEL_OUTPUT_HDMI:
7398
			is_sdvo = true;
7399
			break;
5354 serge 7400
		default:
7401
			break;
3243 Serge 7402
		}
7403
 
7404
		num_connectors++;
7405
	}
7406
 
2327 Serge 7407
    /* Enable autotuning of the PLL clock (if permissible) */
7408
    factor = 21;
7409
    if (is_lvds) {
7410
        if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 7411
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 7412
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
2327 Serge 7413
            factor = 25;
5354 serge 7414
	} else if (intel_crtc->new_config->sdvo_tv_clock)
2327 Serge 7415
        factor = 20;
7416
 
5354 serge 7417
	if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
3746 Serge 7418
		*fp |= FP_CB_TUNE;
2327 Serge 7419
 
3746 Serge 7420
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7421
		*fp2 |= FP_CB_TUNE;
7422
 
2327 Serge 7423
    dpll = 0;
7424
 
7425
    if (is_lvds)
7426
        dpll |= DPLLB_MODE_LVDS;
7427
    else
7428
        dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 7429
 
5354 serge 7430
	dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
3746 Serge 7431
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 7432
 
4104 Serge 7433
	if (is_sdvo)
7434
		dpll |= DPLL_SDVO_HIGH_SPEED;
5354 serge 7435
	if (intel_crtc->new_config->has_dp_encoder)
4104 Serge 7436
		dpll |= DPLL_SDVO_HIGH_SPEED;
7437
 
2327 Serge 7438
    /* compute bitmask from p1 value */
5354 serge 7439
	dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2327 Serge 7440
    /* also FPA1 */
5354 serge 7441
	dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 7442
 
5354 serge 7443
	switch (intel_crtc->new_config->dpll.p2) {
2327 Serge 7444
    case 5:
7445
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7446
        break;
7447
    case 7:
7448
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7449
        break;
7450
    case 10:
7451
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7452
        break;
7453
    case 14:
7454
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7455
        break;
7456
    }
7457
 
4104 Serge 7458
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
2327 Serge 7459
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7460
    else
7461
        dpll |= PLL_REF_INPUT_DREFCLK;
7462
 
4104 Serge 7463
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 7464
}
7465
 
5354 serge 7466
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
3243 Serge 7467
{
5354 serge 7468
	struct drm_device *dev = crtc->base.dev;
3243 Serge 7469
	intel_clock_t clock, reduced_clock;
4104 Serge 7470
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 7471
	bool ok, has_reduced_clock = false;
3746 Serge 7472
	bool is_lvds = false;
4104 Serge 7473
	struct intel_shared_dpll *pll;
3243 Serge 7474
 
5354 serge 7475
	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
3243 Serge 7476
 
7477
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7478
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7479
 
5354 serge 7480
	ok = ironlake_compute_clocks(&crtc->base, &clock,
3243 Serge 7481
				     &has_reduced_clock, &reduced_clock);
5354 serge 7482
	if (!ok && !crtc->new_config->clock_set) {
3243 Serge 7483
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7484
		return -EINVAL;
7485
	}
3746 Serge 7486
	/* Compat-code for transition, will disappear. */
5354 serge 7487
	if (!crtc->new_config->clock_set) {
7488
		crtc->new_config->dpll.n = clock.n;
7489
		crtc->new_config->dpll.m1 = clock.m1;
7490
		crtc->new_config->dpll.m2 = clock.m2;
7491
		crtc->new_config->dpll.p1 = clock.p1;
7492
		crtc->new_config->dpll.p2 = clock.p2;
3746 Serge 7493
	}
3243 Serge 7494
 
4104 Serge 7495
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5354 serge 7496
	if (crtc->new_config->has_pch_encoder) {
7497
		fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
3243 Serge 7498
	if (has_reduced_clock)
4104 Serge 7499
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 7500
 
5354 serge 7501
		dpll = ironlake_compute_dpll(crtc,
4104 Serge 7502
					     &fp, &reduced_clock,
5060 serge 7503
					     has_reduced_clock ? &fp2 : NULL);
3243 Serge 7504
 
5354 serge 7505
		crtc->new_config->dpll_hw_state.dpll = dpll;
7506
		crtc->new_config->dpll_hw_state.fp0 = fp;
4104 Serge 7507
		if (has_reduced_clock)
5354 serge 7508
			crtc->new_config->dpll_hw_state.fp1 = fp2;
4104 Serge 7509
		else
5354 serge 7510
			crtc->new_config->dpll_hw_state.fp1 = fp;
2327 Serge 7511
 
5354 serge 7512
		pll = intel_get_shared_dpll(crtc);
3031 serge 7513
		if (pll == NULL) {
4104 Serge 7514
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5354 serge 7515
					 pipe_name(crtc->pipe));
2342 Serge 7516
			return -EINVAL;
2327 Serge 7517
        }
5354 serge 7518
	}
2327 Serge 7519
 
5060 serge 7520
	if (is_lvds && has_reduced_clock && i915.powersave)
5354 serge 7521
		crtc->lowfreq_avail = true;
4104 Serge 7522
	else
5354 serge 7523
		crtc->lowfreq_avail = false;
2327 Serge 7524
 
5060 serge 7525
	return 0;
4104 Serge 7526
}
3243 Serge 7527
 
4560 Serge 7528
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7529
					 struct intel_link_m_n *m_n)
4104 Serge 7530
{
7531
	struct drm_device *dev = crtc->base.dev;
7532
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 7533
	enum pipe pipe = crtc->pipe;
4104 Serge 7534
 
4560 Serge 7535
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7536
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7537
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7538
		& ~TU_SIZE_MASK;
7539
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7540
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7541
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7542
}
7543
 
7544
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7545
					 enum transcoder transcoder,
5354 serge 7546
					 struct intel_link_m_n *m_n,
7547
					 struct intel_link_m_n *m2_n2)
4560 Serge 7548
{
7549
	struct drm_device *dev = crtc->base.dev;
7550
	struct drm_i915_private *dev_priv = dev->dev_private;
7551
	enum pipe pipe = crtc->pipe;
7552
 
7553
	if (INTEL_INFO(dev)->gen >= 5) {
7554
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7555
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7556
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 7557
					& ~TU_SIZE_MASK;
4560 Serge 7558
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7559
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 7560
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5354 serge 7561
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
7562
		 * gen < 8) and if DRRS is supported (to make sure the
7563
		 * registers are not unnecessarily read).
7564
		 */
7565
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
7566
			crtc->config.has_drrs) {
7567
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
7568
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
7569
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
7570
					& ~TU_SIZE_MASK;
7571
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
7572
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
7573
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7574
		}
4560 Serge 7575
	} else {
7576
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7577
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7578
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7579
			& ~TU_SIZE_MASK;
7580
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7581
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7582
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7583
	}
3243 Serge 7584
}
7585
 
4560 Serge 7586
void intel_dp_get_m_n(struct intel_crtc *crtc,
7587
		      struct intel_crtc_config *pipe_config)
7588
{
7589
	if (crtc->config.has_pch_encoder)
7590
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7591
	else
7592
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 7593
					     &pipe_config->dp_m_n,
7594
					     &pipe_config->dp_m2_n2);
4560 Serge 7595
}
7596
 
7597
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7598
					struct intel_crtc_config *pipe_config)
7599
{
7600
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 7601
				     &pipe_config->fdi_m_n, NULL);
4560 Serge 7602
}
7603
 
5354 serge 7604
static void skylake_get_pfit_config(struct intel_crtc *crtc,
7605
				    struct intel_crtc_config *pipe_config)
7606
{
7607
	struct drm_device *dev = crtc->base.dev;
7608
	struct drm_i915_private *dev_priv = dev->dev_private;
7609
	uint32_t tmp;
7610
 
7611
	tmp = I915_READ(PS_CTL(crtc->pipe));
7612
 
7613
	if (tmp & PS_ENABLE) {
7614
		pipe_config->pch_pfit.enabled = true;
7615
		pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
7616
		pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
7617
	}
7618
}
7619
 
4104 Serge 7620
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7621
				     struct intel_crtc_config *pipe_config)
7622
{
7623
	struct drm_device *dev = crtc->base.dev;
7624
	struct drm_i915_private *dev_priv = dev->dev_private;
7625
	uint32_t tmp;
7626
 
7627
	tmp = I915_READ(PF_CTL(crtc->pipe));
7628
 
7629
	if (tmp & PF_ENABLE) {
7630
		pipe_config->pch_pfit.enabled = true;
7631
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7632
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7633
 
7634
		/* We currently do not free assignements of panel fitters on
7635
		 * ivb/hsw (since we don't use the higher upscaling modes which
7636
		 * differentiates them) so just WARN about this case for now. */
7637
		if (IS_GEN7(dev)) {
7638
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7639
				PF_PIPE_SEL_IVB(crtc->pipe));
7640
		}
7641
	}
7642
}
7643
 
5060 serge 7644
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7645
				      struct intel_plane_config *plane_config)
7646
{
7647
	struct drm_device *dev = crtc->base.dev;
7648
	struct drm_i915_private *dev_priv = dev->dev_private;
7649
	u32 val, base, offset;
7650
	int pipe = crtc->pipe, plane = crtc->plane;
7651
	int fourcc, pixel_format;
7652
	int aligned_height;
7653
 
7654
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7655
	if (!crtc->base.primary->fb) {
7656
		DRM_DEBUG_KMS("failed to alloc fb\n");
7657
		return;
7658
	}
7659
 
7660
	val = I915_READ(DSPCNTR(plane));
7661
 
7662
	if (INTEL_INFO(dev)->gen >= 4)
7663
		if (val & DISPPLANE_TILED)
7664
			plane_config->tiled = true;
7665
 
7666
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7667
	fourcc = intel_format_to_fourcc(pixel_format);
7668
	crtc->base.primary->fb->pixel_format = fourcc;
7669
	crtc->base.primary->fb->bits_per_pixel =
7670
		drm_format_plane_cpp(fourcc, 0) * 8;
7671
 
7672
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7673
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7674
		offset = I915_READ(DSPOFFSET(plane));
7675
	} else {
7676
		if (plane_config->tiled)
7677
			offset = I915_READ(DSPTILEOFF(plane));
7678
		else
7679
			offset = I915_READ(DSPLINOFF(plane));
7680
	}
7681
	plane_config->base = base;
7682
 
7683
	val = I915_READ(PIPESRC(pipe));
7684
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7685
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7686
 
7687
	val = I915_READ(DSPSTRIDE(pipe));
5354 serge 7688
	crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
5060 serge 7689
 
7690
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7691
					    plane_config->tiled);
7692
 
5354 serge 7693
	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
7694
					aligned_height);
5060 serge 7695
 
7696
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7697
		      pipe, plane, crtc->base.primary->fb->width,
7698
		      crtc->base.primary->fb->height,
7699
		      crtc->base.primary->fb->bits_per_pixel, base,
7700
		      crtc->base.primary->fb->pitches[0],
7701
		      plane_config->size);
7702
}
7703
 
3746 Serge 7704
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7705
				     struct intel_crtc_config *pipe_config)
7706
{
7707
	struct drm_device *dev = crtc->base.dev;
7708
	struct drm_i915_private *dev_priv = dev->dev_private;
7709
	uint32_t tmp;
7710
 
5354 serge 7711
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 7712
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7713
		return false;
7714
 
4104 Serge 7715
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7716
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7717
 
3746 Serge 7718
	tmp = I915_READ(PIPECONF(crtc->pipe));
7719
	if (!(tmp & PIPECONF_ENABLE))
7720
		return false;
7721
 
4280 Serge 7722
	switch (tmp & PIPECONF_BPC_MASK) {
7723
	case PIPECONF_6BPC:
7724
		pipe_config->pipe_bpp = 18;
7725
		break;
7726
	case PIPECONF_8BPC:
7727
		pipe_config->pipe_bpp = 24;
7728
		break;
7729
	case PIPECONF_10BPC:
7730
		pipe_config->pipe_bpp = 30;
7731
		break;
7732
	case PIPECONF_12BPC:
7733
		pipe_config->pipe_bpp = 36;
7734
		break;
7735
	default:
7736
		break;
7737
	}
7738
 
5060 serge 7739
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7740
		pipe_config->limited_color_range = true;
7741
 
4104 Serge 7742
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7743
		struct intel_shared_dpll *pll;
7744
 
3746 Serge 7745
		pipe_config->has_pch_encoder = true;
7746
 
4104 Serge 7747
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7748
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7749
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7750
 
7751
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7752
 
7753
		if (HAS_PCH_IBX(dev_priv->dev)) {
7754
			pipe_config->shared_dpll =
7755
				(enum intel_dpll_id) crtc->pipe;
7756
		} else {
7757
			tmp = I915_READ(PCH_DPLL_SEL);
7758
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7759
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7760
			else
7761
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7762
		}
7763
 
7764
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7765
 
7766
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7767
					   &pipe_config->dpll_hw_state));
7768
 
7769
		tmp = pipe_config->dpll_hw_state.dpll;
7770
		pipe_config->pixel_multiplier =
7771
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7772
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 7773
 
7774
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 7775
	} else {
7776
		pipe_config->pixel_multiplier = 1;
7777
	}
7778
 
7779
	intel_get_pipe_timings(crtc, pipe_config);
7780
 
7781
	ironlake_get_pfit_config(crtc, pipe_config);
7782
 
3746 Serge 7783
	return true;
7784
}
7785
 
4104 Serge 7786
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7787
{
7788
	struct drm_device *dev = dev_priv->dev;
7789
	struct intel_crtc *crtc;
7790
 
5060 serge 7791
	for_each_intel_crtc(dev, crtc)
4539 Serge 7792
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 7793
		     pipe_name(crtc->pipe));
7794
 
7795
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
5060 serge 7796
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7797
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7798
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
4104 Serge 7799
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7800
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7801
	     "CPU PWM1 enabled\n");
5060 serge 7802
	if (IS_HASWELL(dev))
4104 Serge 7803
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7804
	     "CPU PWM2 enabled\n");
7805
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7806
	     "PCH PWM1 enabled\n");
7807
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7808
	     "Utility pin enabled\n");
7809
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7810
 
5060 serge 7811
	/*
7812
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7813
	 * interrupts remain enabled. We used to check for that, but since it's
7814
	 * gen-specific and since we only disable LCPLL after we fully disable
7815
	 * the interrupts, the check below should be enough.
7816
	 */
7817
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4104 Serge 7818
}
7819
 
5060 serge 7820
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7821
{
7822
	struct drm_device *dev = dev_priv->dev;
7823
 
7824
	if (IS_HASWELL(dev))
7825
		return I915_READ(D_COMP_HSW);
7826
	else
7827
		return I915_READ(D_COMP_BDW);
7828
}
7829
 
7830
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7831
{
7832
	struct drm_device *dev = dev_priv->dev;
7833
 
7834
	if (IS_HASWELL(dev)) {
7835
		mutex_lock(&dev_priv->rps.hw_lock);
7836
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7837
					    val))
7838
			DRM_ERROR("Failed to write to D_COMP\n");
7839
		mutex_unlock(&dev_priv->rps.hw_lock);
7840
	} else {
7841
		I915_WRITE(D_COMP_BDW, val);
7842
		POSTING_READ(D_COMP_BDW);
7843
	}
7844
}
7845
 
4104 Serge 7846
/*
7847
 * This function implements pieces of two sequences from BSpec:
7848
 * - Sequence for display software to disable LCPLL
7849
 * - Sequence for display software to allow package C8+
7850
 * The steps implemented here are just the steps that actually touch the LCPLL
7851
 * register. Callers should take care of disabling all the display engine
7852
 * functions, doing the mode unset, fixing interrupts, etc.
7853
 */
4560 Serge 7854
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4104 Serge 7855
		       bool switch_to_fclk, bool allow_power_down)
7856
{
7857
	uint32_t val;
7858
 
7859
	assert_can_disable_lcpll(dev_priv);
7860
 
7861
	val = I915_READ(LCPLL_CTL);
7862
 
7863
	if (switch_to_fclk) {
7864
		val |= LCPLL_CD_SOURCE_FCLK;
7865
		I915_WRITE(LCPLL_CTL, val);
7866
 
7867
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7868
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7869
			DRM_ERROR("Switching to FCLK failed\n");
7870
 
7871
		val = I915_READ(LCPLL_CTL);
7872
	}
7873
 
7874
	val |= LCPLL_PLL_DISABLE;
7875
	I915_WRITE(LCPLL_CTL, val);
7876
	POSTING_READ(LCPLL_CTL);
7877
 
7878
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7879
		DRM_ERROR("LCPLL still locked\n");
7880
 
5060 serge 7881
	val = hsw_read_dcomp(dev_priv);
4104 Serge 7882
	val |= D_COMP_COMP_DISABLE;
5060 serge 7883
	hsw_write_dcomp(dev_priv, val);
7884
	ndelay(100);
4104 Serge 7885
 
5060 serge 7886
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7887
		     1))
4104 Serge 7888
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7889
 
7890
	if (allow_power_down) {
7891
		val = I915_READ(LCPLL_CTL);
7892
		val |= LCPLL_POWER_DOWN_ALLOW;
7893
		I915_WRITE(LCPLL_CTL, val);
7894
		POSTING_READ(LCPLL_CTL);
7895
	}
7896
}
7897
 
7898
/*
7899
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7900
 * source.
7901
 */
4560 Serge 7902
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 7903
{
7904
	uint32_t val;
7905
 
7906
	val = I915_READ(LCPLL_CTL);
7907
 
7908
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7909
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7910
		return;
7911
 
5060 serge 7912
	/*
7913
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7914
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7915
	 *
7916
	 * The other problem is that hsw_restore_lcpll() is called as part of
7917
	 * the runtime PM resume sequence, so we can't just call
7918
	 * gen6_gt_force_wake_get() because that function calls
7919
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7920
	 * while we are on the resume sequence. So to solve this problem we have
7921
	 * to call special forcewake code that doesn't touch runtime PM and
7922
	 * doesn't enable the forcewake delayed work.
7923
	 */
5354 serge 7924
	spin_lock_irq(&dev_priv->uncore.lock);
5060 serge 7925
	if (dev_priv->uncore.forcewake_count++ == 0)
7926
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
5354 serge 7927
	spin_unlock_irq(&dev_priv->uncore.lock);
4104 Serge 7928
 
7929
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7930
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7931
		I915_WRITE(LCPLL_CTL, val);
7932
		POSTING_READ(LCPLL_CTL);
7933
	}
7934
 
5060 serge 7935
	val = hsw_read_dcomp(dev_priv);
4104 Serge 7936
	val |= D_COMP_COMP_FORCE;
7937
	val &= ~D_COMP_COMP_DISABLE;
5060 serge 7938
	hsw_write_dcomp(dev_priv, val);
4104 Serge 7939
 
7940
	val = I915_READ(LCPLL_CTL);
7941
	val &= ~LCPLL_PLL_DISABLE;
7942
	I915_WRITE(LCPLL_CTL, val);
7943
 
7944
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7945
		DRM_ERROR("LCPLL not locked yet\n");
7946
 
7947
	if (val & LCPLL_CD_SOURCE_FCLK) {
7948
		val = I915_READ(LCPLL_CTL);
7949
		val &= ~LCPLL_CD_SOURCE_FCLK;
7950
		I915_WRITE(LCPLL_CTL, val);
7951
 
7952
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7953
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7954
			DRM_ERROR("Switching back to LCPLL failed\n");
7955
	}
7956
 
5060 serge 7957
	/* See the big comment above. */
5354 serge 7958
	spin_lock_irq(&dev_priv->uncore.lock);
5060 serge 7959
	if (--dev_priv->uncore.forcewake_count == 0)
7960
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
5354 serge 7961
	spin_unlock_irq(&dev_priv->uncore.lock);
4104 Serge 7962
}
7963
 
5060 serge 7964
/*
7965
 * Package states C8 and deeper are really deep PC states that can only be
7966
 * reached when all the devices on the system allow it, so even if the graphics
7967
 * device allows PC8+, it doesn't mean the system will actually get to these
7968
 * states. Our driver only allows PC8+ when going into runtime PM.
7969
 *
7970
 * The requirements for PC8+ are that all the outputs are disabled, the power
7971
 * well is disabled and most interrupts are disabled, and these are also
7972
 * requirements for runtime PM. When these conditions are met, we manually do
7973
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7974
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7975
 * hang the machine.
7976
 *
7977
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7978
 * the state of some registers, so when we come back from PC8+ we need to
7979
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7980
 * need to take care of the registers kept by RC6. Notice that this happens even
7981
 * if we don't put the device in PCI D3 state (which is what currently happens
7982
 * because of the runtime PM support).
7983
 *
7984
 * For more, read "Display Sequences for Package C8" on the hardware
7985
 * documentation.
7986
 */
7987
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 7988
{
7989
	struct drm_device *dev = dev_priv->dev;
7990
	uint32_t val;
7991
 
7992
	DRM_DEBUG_KMS("Enabling package C8+\n");
7993
 
7994
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7995
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7996
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7997
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7998
	}
7999
 
8000
	lpt_disable_clkout_dp(dev);
8001
	hsw_disable_lcpll(dev_priv, true, true);
8002
}
8003
 
5060 serge 8004
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 8005
{
8006
	struct drm_device *dev = dev_priv->dev;
8007
	uint32_t val;
8008
 
8009
	DRM_DEBUG_KMS("Disabling package C8+\n");
8010
 
8011
	hsw_restore_lcpll(dev_priv);
8012
	lpt_init_pch_refclk(dev);
8013
 
8014
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8015
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
8016
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
8017
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8018
	}
8019
 
8020
	intel_prepare_ddi(dev);
8021
}
8022
 
5354 serge 8023
static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
4104 Serge 8024
{
5354 serge 8025
	if (!intel_ddi_pll_select(crtc))
8026
		return -EINVAL;
8027
 
8028
	crtc->lowfreq_avail = false;
8029
 
8030
	return 0;
4104 Serge 8031
}
8032
 
5354 serge 8033
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
8034
				enum port port,
8035
				struct intel_crtc_config *pipe_config)
4104 Serge 8036
{
5354 serge 8037
	u32 temp;
8038
 
8039
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8040
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
8041
 
8042
	switch (pipe_config->ddi_pll_sel) {
8043
	case SKL_DPLL1:
8044
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
8045
		break;
8046
	case SKL_DPLL2:
8047
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
8048
		break;
8049
	case SKL_DPLL3:
8050
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
8051
		break;
8052
	}
4104 Serge 8053
}
8054
 
5354 serge 8055
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
8056
				enum port port,
8057
				struct intel_crtc_config *pipe_config)
4104 Serge 8058
{
5354 serge 8059
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
4104 Serge 8060
 
5354 serge 8061
	switch (pipe_config->ddi_pll_sel) {
8062
	case PORT_CLK_SEL_WRPLL1:
8063
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
8064
		break;
8065
	case PORT_CLK_SEL_WRPLL2:
8066
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
8067
		break;
8068
	}
4104 Serge 8069
}
8070
 
5060 serge 8071
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
8072
				       struct intel_crtc_config *pipe_config)
4104 Serge 8073
{
5060 serge 8074
	struct drm_device *dev = crtc->base.dev;
4104 Serge 8075
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 8076
	struct intel_shared_dpll *pll;
8077
	enum port port;
8078
	uint32_t tmp;
4104 Serge 8079
 
5060 serge 8080
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4560 Serge 8081
 
5060 serge 8082
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
4104 Serge 8083
 
5354 serge 8084
	if (IS_SKYLAKE(dev))
8085
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
8086
	else
8087
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
4104 Serge 8088
 
5060 serge 8089
	if (pipe_config->shared_dpll >= 0) {
8090
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
4560 Serge 8091
 
5060 serge 8092
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
8093
					   &pipe_config->dpll_hw_state));
4104 Serge 8094
	}
8095
 
4560 Serge 8096
	/*
5060 serge 8097
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
8098
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
8099
	 * the PCH transcoder is on.
4560 Serge 8100
	 */
5354 serge 8101
	if (INTEL_INFO(dev)->gen < 9 &&
8102
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
5060 serge 8103
		pipe_config->has_pch_encoder = true;
4560 Serge 8104
 
5060 serge 8105
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
8106
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8107
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3480 Serge 8108
 
5060 serge 8109
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
3480 Serge 8110
	}
4560 Serge 8111
}
8112
 
3746 Serge 8113
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
8114
				    struct intel_crtc_config *pipe_config)
8115
{
8116
	struct drm_device *dev = crtc->base.dev;
8117
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 8118
	enum intel_display_power_domain pfit_domain;
3746 Serge 8119
	uint32_t tmp;
8120
 
5354 serge 8121
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 8122
					 POWER_DOMAIN_PIPE(crtc->pipe)))
8123
		return false;
8124
 
4104 Serge 8125
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8126
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8127
 
8128
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8129
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
8130
		enum pipe trans_edp_pipe;
8131
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8132
		default:
8133
			WARN(1, "unknown pipe linked to edp transcoder\n");
8134
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
8135
		case TRANS_DDI_EDP_INPUT_A_ON:
8136
			trans_edp_pipe = PIPE_A;
8137
			break;
8138
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
8139
			trans_edp_pipe = PIPE_B;
8140
			break;
8141
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
8142
			trans_edp_pipe = PIPE_C;
8143
			break;
8144
		}
8145
 
8146
		if (trans_edp_pipe == crtc->pipe)
8147
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
8148
	}
8149
 
5354 serge 8150
	if (!intel_display_power_is_enabled(dev_priv,
4104 Serge 8151
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
8152
		return false;
8153
 
8154
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 8155
	if (!(tmp & PIPECONF_ENABLE))
8156
		return false;
8157
 
5060 serge 8158
	haswell_get_ddi_port_state(crtc, pipe_config);
3746 Serge 8159
 
4104 Serge 8160
	intel_get_pipe_timings(crtc, pipe_config);
8161
 
8162
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
5354 serge 8163
	if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
8164
		if (IS_SKYLAKE(dev))
8165
			skylake_get_pfit_config(crtc, pipe_config);
8166
		else
4104 Serge 8167
		ironlake_get_pfit_config(crtc, pipe_config);
5354 serge 8168
	}
4104 Serge 8169
 
4560 Serge 8170
	if (IS_HASWELL(dev))
4104 Serge 8171
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
8172
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
8173
 
5354 serge 8174
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
8175
		pipe_config->pixel_multiplier =
8176
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
8177
	} else {
4104 Serge 8178
	pipe_config->pixel_multiplier = 1;
4560 Serge 8179
	}
8180
 
2342 Serge 8181
	return true;
8182
}
8183
 
5354 serge 8184
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
2342 Serge 8185
{
5354 serge 8186
	struct drm_device *dev = crtc->dev;
8187
	struct drm_i915_private *dev_priv = dev->dev_private;
8188
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8189
	uint32_t cntl = 0, size = 0;
2342 Serge 8190
 
5354 serge 8191
	if (base) {
8192
		unsigned int width = intel_crtc->cursor_width;
8193
		unsigned int height = intel_crtc->cursor_height;
8194
		unsigned int stride = roundup_pow_of_two(width) * 4;
2342 Serge 8195
 
5354 serge 8196
		switch (stride) {
8197
		default:
8198
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
8199
				  width, stride);
8200
			stride = 256;
8201
			/* fallthrough */
8202
		case 256:
8203
		case 512:
8204
		case 1024:
8205
		case 2048:
8206
			break;
4560 Serge 8207
	}
3031 serge 8208
 
5354 serge 8209
		cntl |= CURSOR_ENABLE |
8210
			CURSOR_GAMMA_ENABLE |
8211
			CURSOR_FORMAT_ARGB |
8212
			CURSOR_STRIDE(stride);
3031 serge 8213
 
5354 serge 8214
		size = (height << 12) | width;
2342 Serge 8215
	}
8216
 
5354 serge 8217
	if (intel_crtc->cursor_cntl != 0 &&
8218
	    (intel_crtc->cursor_base != base ||
8219
	     intel_crtc->cursor_size != size ||
8220
	     intel_crtc->cursor_cntl != cntl)) {
8221
		/* On these chipsets we can only modify the base/size/stride
8222
		 * whilst the cursor is disabled.
3031 serge 8223
		 */
5060 serge 8224
			I915_WRITE(_CURACNTR, 0);
8225
			POSTING_READ(_CURACNTR);
8226
			intel_crtc->cursor_cntl = 0;
8227
		}
8228
 
5354 serge 8229
	if (intel_crtc->cursor_base != base) {
3031 serge 8230
		I915_WRITE(_CURABASE, base);
5354 serge 8231
		intel_crtc->cursor_base = base;
5060 serge 8232
	}
2327 Serge 8233
 
5354 serge 8234
	if (intel_crtc->cursor_size != size) {
8235
		I915_WRITE(CURSIZE, size);
8236
		intel_crtc->cursor_size = size;
8237
	}
8238
 
5060 serge 8239
	if (intel_crtc->cursor_cntl != cntl) {
3031 serge 8240
	I915_WRITE(_CURACNTR, cntl);
5060 serge 8241
		POSTING_READ(_CURACNTR);
8242
		intel_crtc->cursor_cntl = cntl;
8243
	}
3031 serge 8244
}
2327 Serge 8245
 
3031 serge 8246
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8247
{
8248
	struct drm_device *dev = crtc->dev;
8249
	struct drm_i915_private *dev_priv = dev->dev_private;
8250
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8251
	int pipe = intel_crtc->pipe;
5060 serge 8252
	uint32_t cntl;
2327 Serge 8253
 
5060 serge 8254
	cntl = 0;
3031 serge 8255
		if (base) {
5060 serge 8256
		cntl = MCURSOR_GAMMA_ENABLE;
8257
		switch (intel_crtc->cursor_width) {
8258
			case 64:
8259
				cntl |= CURSOR_MODE_64_ARGB_AX;
8260
				break;
8261
			case 128:
8262
				cntl |= CURSOR_MODE_128_ARGB_AX;
8263
				break;
8264
			case 256:
8265
				cntl |= CURSOR_MODE_256_ARGB_AX;
8266
				break;
8267
			default:
8268
				WARN_ON(1);
8269
				return;
8270
			}
3031 serge 8271
			cntl |= pipe << 28; /* Connect to correct pipe */
2327 Serge 8272
 
5060 serge 8273
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3480 Serge 8274
			cntl |= CURSOR_PIPE_CSC_ENABLE;
5354 serge 8275
	}
5060 serge 8276
 
5354 serge 8277
	if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
8278
		cntl |= CURSOR_ROTATE_180;
8279
 
5060 serge 8280
	if (intel_crtc->cursor_cntl != cntl) {
8281
		I915_WRITE(CURCNTR(pipe), cntl);
8282
		POSTING_READ(CURCNTR(pipe));
8283
		intel_crtc->cursor_cntl = cntl;
4104 Serge 8284
		}
2327 Serge 8285
 
3031 serge 8286
	/* and commit changes on next vblank */
5060 serge 8287
	I915_WRITE(CURBASE(pipe), base);
8288
	POSTING_READ(CURBASE(pipe));
5354 serge 8289
 
8290
	intel_crtc->cursor_base = base;
3031 serge 8291
}
2327 Serge 8292
 
3031 serge 8293
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5060 serge 8294
void intel_crtc_update_cursor(struct drm_crtc *crtc,
3031 serge 8295
				     bool on)
8296
{
8297
	struct drm_device *dev = crtc->dev;
8298
	struct drm_i915_private *dev_priv = dev->dev_private;
8299
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8300
	int pipe = intel_crtc->pipe;
5060 serge 8301
	int x = crtc->cursor_x;
8302
	int y = crtc->cursor_y;
4560 Serge 8303
	u32 base = 0, pos = 0;
2327 Serge 8304
 
4560 Serge 8305
	if (on)
8306
		base = intel_crtc->cursor_addr;
2327 Serge 8307
 
4560 Serge 8308
	if (x >= intel_crtc->config.pipe_src_w)
3031 serge 8309
			base = 0;
2327 Serge 8310
 
4560 Serge 8311
	if (y >= intel_crtc->config.pipe_src_h)
3031 serge 8312
		base = 0;
2327 Serge 8313
 
3031 serge 8314
	if (x < 0) {
4560 Serge 8315
		if (x + intel_crtc->cursor_width <= 0)
3031 serge 8316
			base = 0;
2327 Serge 8317
 
3031 serge 8318
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8319
		x = -x;
8320
	}
8321
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 8322
 
3031 serge 8323
	if (y < 0) {
4560 Serge 8324
		if (y + intel_crtc->cursor_height <= 0)
3031 serge 8325
			base = 0;
2327 Serge 8326
 
3031 serge 8327
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8328
		y = -y;
8329
	}
8330
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 8331
 
5060 serge 8332
	if (base == 0 && intel_crtc->cursor_base == 0)
3031 serge 8333
		return;
2327 Serge 8334
 
5060 serge 8335
	I915_WRITE(CURPOS(pipe), pos);
8336
 
5354 serge 8337
	/* ILK+ do this automagically */
8338
	if (HAS_GMCH_DISPLAY(dev) &&
8339
		to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
8340
		base += (intel_crtc->cursor_height *
8341
			intel_crtc->cursor_width - 1) * 4;
8342
	}
8343
 
8344
	if (IS_845G(dev) || IS_I865G(dev))
5060 serge 8345
		i845_update_cursor(crtc, base);
8346
	else
4560 Serge 8347
		i9xx_update_cursor(crtc, base);
3031 serge 8348
}
2327 Serge 8349
 
5354 serge 8350
static bool cursor_size_ok(struct drm_device *dev,
8351
			   uint32_t width, uint32_t height)
8352
{
8353
	if (width == 0 || height == 0)
8354
		return false;
8355
 
8356
	/*
8357
	 * 845g/865g are special in that they are only limited by
8358
	 * the width of their cursors, the height is arbitrary up to
8359
	 * the precision of the register. Everything else requires
8360
	 * square cursors, limited to a few power-of-two sizes.
5060 serge 8361
 */
5354 serge 8362
	if (IS_845G(dev) || IS_I865G(dev)) {
8363
		if ((width & 63) != 0)
8364
			return false;
8365
 
8366
		if (width > (IS_845G(dev) ? 64 : 512))
8367
			return false;
8368
 
8369
		if (height > 1023)
8370
			return false;
8371
	} else {
8372
		switch (width | height) {
8373
		case 256:
8374
		case 128:
8375
			if (IS_GEN2(dev))
8376
				return false;
8377
		case 64:
8378
			break;
8379
		default:
8380
			return false;
8381
		}
8382
	}
8383
 
8384
	return true;
8385
}
8386
 
5060 serge 8387
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8388
				     struct drm_i915_gem_object *obj,
3031 serge 8389
				 uint32_t width, uint32_t height)
8390
{
8391
	struct drm_device *dev = crtc->dev;
5354 serge 8392
	struct drm_i915_private *dev_priv = to_i915(dev);
3031 serge 8393
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 8394
	enum pipe pipe = intel_crtc->pipe;
8395
	unsigned old_width;
3031 serge 8396
	uint32_t addr;
8397
	int ret;
2327 Serge 8398
 
3031 serge 8399
	/* if we want to turn off the cursor ignore width and height */
5060 serge 8400
	if (!obj) {
3031 serge 8401
		DRM_DEBUG_KMS("cursor off\n");
8402
		addr = 0;
8403
		mutex_lock(&dev->struct_mutex);
8404
		goto finish;
8405
	}
2327 Serge 8406
 
3031 serge 8407
	/* we only need to pin inside GTT if cursor is non-phy */
8408
	mutex_lock(&dev->struct_mutex);
5060 serge 8409
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
3746 Serge 8410
		unsigned alignment;
8411
 
5097 serge 8412
		/*
8413
		 * Global gtt pte registers are special registers which actually
8414
		 * forward writes to a chunk of system memory. Which means that
8415
		 * there is no risk that the register values disappear as soon
8416
		 * as we call intel_runtime_pm_put(), so it is correct to wrap
8417
		 * only the pin/unpin/fence and not more.
8418
		 */
8419
		intel_runtime_pm_get(dev_priv);
8420
 
3746 Serge 8421
		/* Note that the w/a also requires 2 PTE of padding following
8422
		 * the bo. We currently fill all unused PTE with the shadow
8423
		 * page and so we should always have valid PTE following the
8424
		 * cursor preventing the VT-d warning.
8425
		 */
8426
		alignment = 0;
8427
		if (need_vtd_wa(dev))
8428
			alignment = 64*1024;
8429
 
8430
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
3031 serge 8431
		if (ret) {
5060 serge 8432
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
5097 serge 8433
			intel_runtime_pm_put(dev_priv);
3031 serge 8434
			goto fail_locked;
8435
		}
2327 Serge 8436
 
3031 serge 8437
		ret = i915_gem_object_put_fence(obj);
8438
		if (ret) {
5060 serge 8439
			DRM_DEBUG_KMS("failed to release fence for cursor");
5097 serge 8440
			intel_runtime_pm_put(dev_priv);
3031 serge 8441
			goto fail_unpin;
8442
		}
2327 Serge 8443
 
4104 Serge 8444
		addr = i915_gem_obj_ggtt_offset(obj);
5097 serge 8445
 
8446
		intel_runtime_pm_put(dev_priv);
3031 serge 8447
	} else {
5354 serge 8448
       int align = IS_I830(dev) ? 16 * 1024 : 256;
8449
       ret = 1;//i915_gem_object_attach_phys(obj, align);
8450
       if (ret) {
8451
           DRM_DEBUG_KMS("failed to attach phys object\n");
8452
           goto fail_locked;
8453
       }
8454
       addr = obj->phys_handle->busaddr;
3031 serge 8455
	}
2327 Serge 8456
 
3031 serge 8457
 finish:
8458
	if (intel_crtc->cursor_bo) {
5060 serge 8459
		if (!INTEL_INFO(dev)->cursor_needs_physical)
4104 Serge 8460
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
3031 serge 8461
	}
2327 Serge 8462
 
5060 serge 8463
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8464
			  INTEL_FRONTBUFFER_CURSOR(pipe));
3031 serge 8465
	mutex_unlock(&dev->struct_mutex);
2327 Serge 8466
 
5060 serge 8467
	old_width = intel_crtc->cursor_width;
8468
 
3031 serge 8469
	intel_crtc->cursor_addr = addr;
8470
	intel_crtc->cursor_bo = obj;
8471
	intel_crtc->cursor_width = width;
8472
	intel_crtc->cursor_height = height;
2327 Serge 8473
 
5060 serge 8474
	if (intel_crtc->active) {
8475
		if (old_width != width)
8476
			intel_update_watermarks(crtc);
4104 Serge 8477
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
5354 serge 8478
 
8479
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
5060 serge 8480
	}
2327 Serge 8481
 
3031 serge 8482
	return 0;
8483
fail_unpin:
4104 Serge 8484
	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 8485
fail_locked:
8486
	mutex_unlock(&dev->struct_mutex);
8487
	return ret;
8488
}
2327 Serge 8489
 
2330 Serge 8490
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8491
				 u16 *blue, uint32_t start, uint32_t size)
8492
{
8493
	int end = (start + size > 256) ? 256 : start + size, i;
8494
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8495
 
2330 Serge 8496
	for (i = start; i < end; i++) {
8497
		intel_crtc->lut_r[i] = red[i] >> 8;
8498
		intel_crtc->lut_g[i] = green[i] >> 8;
8499
		intel_crtc->lut_b[i] = blue[i] >> 8;
8500
	}
2327 Serge 8501
 
2330 Serge 8502
	intel_crtc_load_lut(crtc);
8503
}
2327 Serge 8504
 
2330 Serge 8505
/* VESA 640x480x72Hz mode to set on the pipe */
8506
static struct drm_display_mode load_detect_mode = {
8507
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8508
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8509
};
2327 Serge 8510
 
4560 Serge 8511
struct drm_framebuffer *
5060 serge 8512
__intel_framebuffer_create(struct drm_device *dev,
3031 serge 8513
			 struct drm_mode_fb_cmd2 *mode_cmd,
8514
			 struct drm_i915_gem_object *obj)
8515
{
8516
	struct intel_framebuffer *intel_fb;
8517
	int ret;
2327 Serge 8518
 
3031 serge 8519
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8520
	if (!intel_fb) {
5354 serge 8521
		drm_gem_object_unreference(&obj->base);
3031 serge 8522
		return ERR_PTR(-ENOMEM);
8523
	}
2327 Serge 8524
 
3031 serge 8525
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 8526
	if (ret)
8527
		goto err;
8528
 
8529
	return &intel_fb->base;
8530
err:
5354 serge 8531
	drm_gem_object_unreference(&obj->base);
3031 serge 8532
		kfree(intel_fb);
4560 Serge 8533
 
3031 serge 8534
		return ERR_PTR(ret);
8535
}
2327 Serge 8536
 
5060 serge 8537
static struct drm_framebuffer *
8538
intel_framebuffer_create(struct drm_device *dev,
8539
			 struct drm_mode_fb_cmd2 *mode_cmd,
8540
			 struct drm_i915_gem_object *obj)
8541
{
8542
	struct drm_framebuffer *fb;
8543
	int ret;
8544
 
8545
	ret = i915_mutex_lock_interruptible(dev);
8546
	if (ret)
8547
		return ERR_PTR(ret);
8548
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8549
	mutex_unlock(&dev->struct_mutex);
8550
 
8551
	return fb;
8552
}
8553
 
2330 Serge 8554
static u32
8555
intel_framebuffer_pitch_for_width(int width, int bpp)
8556
{
8557
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8558
	return ALIGN(pitch, 64);
8559
}
2327 Serge 8560
 
2330 Serge 8561
static u32
8562
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8563
{
8564
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5060 serge 8565
	return PAGE_ALIGN(pitch * mode->vdisplay);
2330 Serge 8566
}
2327 Serge 8567
 
2330 Serge 8568
static struct drm_framebuffer *
8569
intel_framebuffer_create_for_mode(struct drm_device *dev,
8570
				  struct drm_display_mode *mode,
8571
				  int depth, int bpp)
8572
{
8573
	struct drm_i915_gem_object *obj;
3243 Serge 8574
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 8575
 
5060 serge 8576
	obj = i915_gem_alloc_object(dev,
8577
				    intel_framebuffer_size_for_mode(mode, bpp));
8578
	if (obj == NULL)
8579
		return ERR_PTR(-ENOMEM);
8580
 
8581
	mode_cmd.width = mode->hdisplay;
8582
	mode_cmd.height = mode->vdisplay;
8583
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8584
								bpp);
8585
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8586
 
8587
	return intel_framebuffer_create(dev, &mode_cmd, obj);
2330 Serge 8588
}
2327 Serge 8589
 
2330 Serge 8590
static struct drm_framebuffer *
8591
mode_fits_in_fbdev(struct drm_device *dev,
8592
		   struct drm_display_mode *mode)
8593
{
4560 Serge 8594
#ifdef CONFIG_DRM_I915_FBDEV
2330 Serge 8595
	struct drm_i915_private *dev_priv = dev->dev_private;
8596
	struct drm_i915_gem_object *obj;
8597
	struct drm_framebuffer *fb;
2327 Serge 8598
 
5060 serge 8599
	if (!dev_priv->fbdev)
4280 Serge 8600
		return NULL;
2327 Serge 8601
 
5060 serge 8602
	if (!dev_priv->fbdev->fb)
2330 Serge 8603
		return NULL;
2327 Serge 8604
 
5060 serge 8605
	obj = dev_priv->fbdev->fb->obj;
8606
	BUG_ON(!obj);
8607
 
8608
	fb = &dev_priv->fbdev->fb->base;
3031 serge 8609
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8610
							       fb->bits_per_pixel))
4280 Serge 8611
		return NULL;
2327 Serge 8612
 
3031 serge 8613
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8614
		return NULL;
8615
 
4280 Serge 8616
	return fb;
4560 Serge 8617
#else
8618
	return NULL;
8619
#endif
2330 Serge 8620
}
2327 Serge 8621
 
3031 serge 8622
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 8623
				struct drm_display_mode *mode,
5060 serge 8624
				struct intel_load_detect_pipe *old,
8625
				struct drm_modeset_acquire_ctx *ctx)
2330 Serge 8626
{
8627
	struct intel_crtc *intel_crtc;
3031 serge 8628
	struct intel_encoder *intel_encoder =
8629
		intel_attached_encoder(connector);
2330 Serge 8630
	struct drm_crtc *possible_crtc;
8631
	struct drm_encoder *encoder = &intel_encoder->base;
8632
	struct drm_crtc *crtc = NULL;
8633
	struct drm_device *dev = encoder->dev;
3031 serge 8634
	struct drm_framebuffer *fb;
5060 serge 8635
	struct drm_mode_config *config = &dev->mode_config;
8636
	int ret, i = -1;
2327 Serge 8637
 
2330 Serge 8638
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 8639
		      connector->base.id, connector->name,
8640
		      encoder->base.id, encoder->name);
2327 Serge 8641
 
5060 serge 8642
retry:
8643
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8644
	if (ret)
8645
		goto fail_unlock;
8646
 
2330 Serge 8647
	/*
8648
	 * Algorithm gets a little messy:
8649
	 *
8650
	 *   - if the connector already has an assigned crtc, use it (but make
8651
	 *     sure it's on first)
8652
	 *
8653
	 *   - try to find the first unused crtc that can drive this connector,
8654
	 *     and use that if we find one
8655
	 */
2327 Serge 8656
 
2330 Serge 8657
	/* See if we already have a CRTC for this connector */
8658
	if (encoder->crtc) {
8659
		crtc = encoder->crtc;
2327 Serge 8660
 
5060 serge 8661
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8662
		if (ret)
8663
			goto fail_unlock;
5354 serge 8664
		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8665
		if (ret)
8666
			goto fail_unlock;
3480 Serge 8667
 
3031 serge 8668
		old->dpms_mode = connector->dpms;
2330 Serge 8669
		old->load_detect_temp = false;
2327 Serge 8670
 
2330 Serge 8671
		/* Make sure the crtc and connector are running */
3031 serge 8672
		if (connector->dpms != DRM_MODE_DPMS_ON)
8673
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 8674
 
2330 Serge 8675
		return true;
8676
	}
2327 Serge 8677
 
2330 Serge 8678
	/* Find an unused one (if possible) */
5060 serge 8679
	for_each_crtc(dev, possible_crtc) {
2330 Serge 8680
		i++;
8681
		if (!(encoder->possible_crtcs & (1 << i)))
8682
			continue;
5060 serge 8683
		if (possible_crtc->enabled)
8684
			continue;
8685
		/* This can occur when applying the pipe A quirk on resume. */
8686
		if (to_intel_crtc(possible_crtc)->new_enabled)
8687
			continue;
8688
 
2330 Serge 8689
			crtc = possible_crtc;
8690
			break;
8691
		}
2327 Serge 8692
 
2330 Serge 8693
	/*
8694
	 * If we didn't find an unused CRTC, don't use any.
8695
	 */
8696
	if (!crtc) {
8697
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5060 serge 8698
		goto fail_unlock;
2330 Serge 8699
	}
2327 Serge 8700
 
5060 serge 8701
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8702
	if (ret)
8703
		goto fail_unlock;
5354 serge 8704
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8705
	if (ret)
8706
		goto fail_unlock;
3031 serge 8707
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8708
	to_intel_connector(connector)->new_encoder = intel_encoder;
2327 Serge 8709
 
2330 Serge 8710
	intel_crtc = to_intel_crtc(crtc);
5060 serge 8711
	intel_crtc->new_enabled = true;
8712
	intel_crtc->new_config = &intel_crtc->config;
3031 serge 8713
	old->dpms_mode = connector->dpms;
2330 Serge 8714
	old->load_detect_temp = true;
8715
	old->release_fb = NULL;
2327 Serge 8716
 
2330 Serge 8717
	if (!mode)
8718
		mode = &load_detect_mode;
2327 Serge 8719
 
2330 Serge 8720
	/* We need a framebuffer large enough to accommodate all accesses
8721
	 * that the plane may generate whilst we perform load detection.
8722
	 * We can not rely on the fbcon either being present (we get called
8723
	 * during its initialisation to detect all boot displays, or it may
8724
	 * not even exist) or that it is large enough to satisfy the
8725
	 * requested mode.
8726
	 */
3031 serge 8727
	fb = mode_fits_in_fbdev(dev, mode);
8728
	if (fb == NULL) {
2330 Serge 8729
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 8730
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8731
		old->release_fb = fb;
2330 Serge 8732
	} else
8733
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 8734
	if (IS_ERR(fb)) {
2330 Serge 8735
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5060 serge 8736
		goto fail;
2330 Serge 8737
	}
2327 Serge 8738
 
3480 Serge 8739
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
2330 Serge 8740
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8741
		if (old->release_fb)
8742
			old->release_fb->funcs->destroy(old->release_fb);
5060 serge 8743
		goto fail;
2330 Serge 8744
	}
2327 Serge 8745
 
2330 Serge 8746
	/* let the connector get through one full cycle before testing */
8747
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8748
	return true;
5060 serge 8749
 
8750
 fail:
8751
	intel_crtc->new_enabled = crtc->enabled;
8752
	if (intel_crtc->new_enabled)
8753
		intel_crtc->new_config = &intel_crtc->config;
8754
	else
8755
		intel_crtc->new_config = NULL;
8756
fail_unlock:
8757
	if (ret == -EDEADLK) {
8758
		drm_modeset_backoff(ctx);
8759
		goto retry;
8760
	}
8761
 
8762
	return false;
2330 Serge 8763
}
2327 Serge 8764
 
3031 serge 8765
void intel_release_load_detect_pipe(struct drm_connector *connector,
2330 Serge 8766
				    struct intel_load_detect_pipe *old)
8767
{
3031 serge 8768
	struct intel_encoder *intel_encoder =
8769
		intel_attached_encoder(connector);
2330 Serge 8770
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 8771
	struct drm_crtc *crtc = encoder->crtc;
5060 serge 8772
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8773
 
2330 Serge 8774
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 8775
		      connector->base.id, connector->name,
8776
		      encoder->base.id, encoder->name);
2327 Serge 8777
 
2330 Serge 8778
	if (old->load_detect_temp) {
3031 serge 8779
		to_intel_connector(connector)->new_encoder = NULL;
8780
		intel_encoder->new_crtc = NULL;
5060 serge 8781
		intel_crtc->new_enabled = false;
8782
		intel_crtc->new_config = NULL;
3031 serge 8783
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8784
 
3480 Serge 8785
		if (old->release_fb) {
8786
			drm_framebuffer_unregister_private(old->release_fb);
8787
			drm_framebuffer_unreference(old->release_fb);
8788
		}
2327 Serge 8789
 
2330 Serge 8790
		return;
8791
	}
2327 Serge 8792
 
2330 Serge 8793
	/* Switch crtc and encoder back off if necessary */
3031 serge 8794
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8795
		connector->funcs->dpms(connector, old->dpms_mode);
2330 Serge 8796
}
2327 Serge 8797
 
4560 Serge 8798
static int i9xx_pll_refclk(struct drm_device *dev,
8799
			   const struct intel_crtc_config *pipe_config)
8800
{
8801
	struct drm_i915_private *dev_priv = dev->dev_private;
8802
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8803
 
8804
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8805
		return dev_priv->vbt.lvds_ssc_freq;
8806
	else if (HAS_PCH_SPLIT(dev))
8807
		return 120000;
8808
	else if (!IS_GEN2(dev))
8809
		return 96000;
8810
	else
8811
		return 48000;
8812
}
8813
 
2330 Serge 8814
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 8815
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8816
				struct intel_crtc_config *pipe_config)
2330 Serge 8817
{
4104 Serge 8818
	struct drm_device *dev = crtc->base.dev;
2330 Serge 8819
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 8820
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 8821
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 8822
	u32 fp;
8823
	intel_clock_t clock;
4560 Serge 8824
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 8825
 
2330 Serge 8826
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 8827
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 8828
	else
4560 Serge 8829
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 8830
 
2330 Serge 8831
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8832
	if (IS_PINEVIEW(dev)) {
8833
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8834
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8835
	} else {
8836
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8837
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8838
	}
2327 Serge 8839
 
2330 Serge 8840
	if (!IS_GEN2(dev)) {
8841
		if (IS_PINEVIEW(dev))
8842
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8843
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8844
		else
8845
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8846
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 8847
 
2330 Serge 8848
		switch (dpll & DPLL_MODE_MASK) {
8849
		case DPLLB_MODE_DAC_SERIAL:
8850
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8851
				5 : 10;
8852
			break;
8853
		case DPLLB_MODE_LVDS:
8854
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8855
				7 : 14;
8856
			break;
8857
		default:
8858
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8859
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 8860
			return;
2330 Serge 8861
		}
2327 Serge 8862
 
4104 Serge 8863
		if (IS_PINEVIEW(dev))
4560 Serge 8864
			pineview_clock(refclk, &clock);
4104 Serge 8865
		else
4560 Serge 8866
			i9xx_clock(refclk, &clock);
2330 Serge 8867
	} else {
4560 Serge 8868
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8869
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 8870
 
2330 Serge 8871
		if (is_lvds) {
8872
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8873
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 8874
 
8875
			if (lvds & LVDS_CLKB_POWER_UP)
8876
				clock.p2 = 7;
8877
			else
2330 Serge 8878
			clock.p2 = 14;
8879
		} else {
8880
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8881
				clock.p1 = 2;
8882
			else {
8883
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8884
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8885
			}
8886
			if (dpll & PLL_P2_DIVIDE_BY_4)
8887
				clock.p2 = 4;
8888
			else
8889
				clock.p2 = 2;
4560 Serge 8890
		}
2327 Serge 8891
 
4560 Serge 8892
		i9xx_clock(refclk, &clock);
2330 Serge 8893
	}
2327 Serge 8894
 
4560 Serge 8895
	/*
8896
	 * This value includes pixel_multiplier. We will use
8897
	 * port_clock to compute adjusted_mode.crtc_clock in the
8898
	 * encoder's get_config() function.
8899
	 */
8900
	pipe_config->port_clock = clock.dot;
4104 Serge 8901
}
8902
 
4560 Serge 8903
int intel_dotclock_calculate(int link_freq,
8904
			     const struct intel_link_m_n *m_n)
4104 Serge 8905
{
8906
	/*
8907
	 * The calculation for the data clock is:
4560 Serge 8908
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 8909
	 * But we want to avoid losing precison if possible, so:
4560 Serge 8910
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 8911
	 *
8912
	 * and the link clock is simpler:
4560 Serge 8913
	 * link_clock = (m * link_clock) / n
2330 Serge 8914
	 */
2327 Serge 8915
 
4560 Serge 8916
	if (!m_n->link_n)
8917
		return 0;
4104 Serge 8918
 
4560 Serge 8919
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8920
}
4104 Serge 8921
 
4560 Serge 8922
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8923
				   struct intel_crtc_config *pipe_config)
8924
{
8925
	struct drm_device *dev = crtc->base.dev;
4104 Serge 8926
 
4560 Serge 8927
	/* read out port_clock from the DPLL */
8928
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 8929
 
4560 Serge 8930
	/*
8931
	 * This value does not include pixel_multiplier.
8932
	 * We will check that port_clock and adjusted_mode.crtc_clock
8933
	 * agree once we know their relationship in the encoder's
8934
	 * get_config() function.
8935
	 */
8936
	pipe_config->adjusted_mode.crtc_clock =
8937
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8938
					 &pipe_config->fdi_m_n);
2330 Serge 8939
}
2327 Serge 8940
 
2330 Serge 8941
/** Returns the currently programmed mode of the given pipe. */
8942
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8943
					     struct drm_crtc *crtc)
8944
{
8945
	struct drm_i915_private *dev_priv = dev->dev_private;
8946
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 8947
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2330 Serge 8948
	struct drm_display_mode *mode;
4104 Serge 8949
	struct intel_crtc_config pipe_config;
3243 Serge 8950
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8951
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8952
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8953
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 8954
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 8955
 
2330 Serge 8956
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8957
	if (!mode)
8958
		return NULL;
8959
 
4104 Serge 8960
	/*
8961
	 * Construct a pipe_config sufficient for getting the clock info
8962
	 * back out of crtc_clock_get.
8963
	 *
8964
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8965
	 * to use a real value here instead.
8966
	 */
4560 Serge 8967
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
4104 Serge 8968
	pipe_config.pixel_multiplier = 1;
4560 Serge 8969
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8970
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8971
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
4104 Serge 8972
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8973
 
4560 Serge 8974
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
2330 Serge 8975
	mode->hdisplay = (htot & 0xffff) + 1;
8976
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8977
	mode->hsync_start = (hsync & 0xffff) + 1;
8978
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8979
	mode->vdisplay = (vtot & 0xffff) + 1;
8980
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8981
	mode->vsync_start = (vsync & 0xffff) + 1;
8982
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8983
 
8984
	drm_mode_set_name(mode);
8985
 
8986
	return mode;
8987
}
8988
 
3031 serge 8989
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8990
{
8991
	struct drm_device *dev = crtc->dev;
5060 serge 8992
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8993
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8994
 
5060 serge 8995
	if (!HAS_GMCH_DISPLAY(dev))
3031 serge 8996
		return;
2327 Serge 8997
 
3031 serge 8998
	if (!dev_priv->lvds_downclock_avail)
8999
		return;
2327 Serge 9000
 
3031 serge 9001
	/*
9002
	 * Since this is called by a timer, we should never get here in
9003
	 * the manual case.
9004
	 */
9005
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
9006
		int pipe = intel_crtc->pipe;
9007
		int dpll_reg = DPLL(pipe);
9008
		int dpll;
2327 Serge 9009
 
3031 serge 9010
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
2327 Serge 9011
 
3031 serge 9012
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 9013
 
3031 serge 9014
		dpll = I915_READ(dpll_reg);
9015
		dpll |= DISPLAY_RATE_SELECT_FPA1;
9016
		I915_WRITE(dpll_reg, dpll);
9017
		intel_wait_for_vblank(dev, pipe);
9018
		dpll = I915_READ(dpll_reg);
9019
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
9020
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
9021
	}
2327 Serge 9022
 
3031 serge 9023
}
2327 Serge 9024
 
3031 serge 9025
void intel_mark_busy(struct drm_device *dev)
9026
{
4104 Serge 9027
	struct drm_i915_private *dev_priv = dev->dev_private;
9028
 
5060 serge 9029
	if (dev_priv->mm.busy)
9030
		return;
9031
 
9032
	intel_runtime_pm_get(dev_priv);
4104 Serge 9033
	i915_update_gfx_val(dev_priv);
5060 serge 9034
	dev_priv->mm.busy = true;
3031 serge 9035
}
2327 Serge 9036
 
3031 serge 9037
void intel_mark_idle(struct drm_device *dev)
9038
{
4104 Serge 9039
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9040
	struct drm_crtc *crtc;
2327 Serge 9041
 
5060 serge 9042
	if (!dev_priv->mm.busy)
3031 serge 9043
		return;
2327 Serge 9044
 
5060 serge 9045
	dev_priv->mm.busy = false;
9046
 
9047
	if (!i915.powersave)
9048
		goto out;
9049
 
9050
	for_each_crtc(dev, crtc) {
9051
		if (!crtc->primary->fb)
3031 serge 9052
			continue;
2327 Serge 9053
 
3480 Serge 9054
		intel_decrease_pllclock(crtc);
3031 serge 9055
	}
4560 Serge 9056
 
5060 serge 9057
	if (INTEL_INFO(dev)->gen >= 6)
4560 Serge 9058
		gen6_rps_idle(dev->dev_private);
5060 serge 9059
 
9060
out:
9061
	intel_runtime_pm_put(dev_priv);
3031 serge 9062
}
2327 Serge 9063
 
2330 Serge 9064
static void intel_crtc_destroy(struct drm_crtc *crtc)
9065
{
9066
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9067
	struct drm_device *dev = crtc->dev;
9068
	struct intel_unpin_work *work;
2327 Serge 9069
 
5354 serge 9070
	spin_lock_irq(&dev->event_lock);
2330 Serge 9071
	work = intel_crtc->unpin_work;
9072
	intel_crtc->unpin_work = NULL;
5354 serge 9073
	spin_unlock_irq(&dev->event_lock);
2327 Serge 9074
 
2330 Serge 9075
	if (work) {
4293 Serge 9076
		cancel_work_sync(&work->work);
2330 Serge 9077
		kfree(work);
9078
	}
2327 Serge 9079
 
2330 Serge 9080
	drm_crtc_cleanup(crtc);
2327 Serge 9081
 
2330 Serge 9082
	kfree(intel_crtc);
9083
}
2327 Serge 9084
 
3031 serge 9085
#if 0
9086
static void intel_unpin_work_fn(struct work_struct *__work)
9087
{
9088
	struct intel_unpin_work *work =
9089
		container_of(__work, struct intel_unpin_work, work);
3243 Serge 9090
	struct drm_device *dev = work->crtc->dev;
5060 serge 9091
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
2327 Serge 9092
 
3243 Serge 9093
	mutex_lock(&dev->struct_mutex);
3031 serge 9094
	intel_unpin_fb_obj(work->old_fb_obj);
9095
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9096
	drm_gem_object_unreference(&work->old_fb_obj->base);
2327 Serge 9097
 
3243 Serge 9098
	intel_update_fbc(dev);
9099
	mutex_unlock(&dev->struct_mutex);
9100
 
5354 serge 9101
	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9102
 
3243 Serge 9103
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9104
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9105
 
3031 serge 9106
	kfree(work);
9107
}
2327 Serge 9108
 
3031 serge 9109
static void do_intel_finish_page_flip(struct drm_device *dev,
9110
				      struct drm_crtc *crtc)
9111
{
9112
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9113
	struct intel_unpin_work *work;
9114
	unsigned long flags;
2327 Serge 9115
 
3031 serge 9116
	/* Ignore early vblank irqs */
9117
	if (intel_crtc == NULL)
9118
		return;
2327 Serge 9119
 
5354 serge 9120
	/*
9121
	 * This is called both by irq handlers and the reset code (to complete
9122
	 * lost pageflips) so needs the full irqsave spinlocks.
9123
	 */
3031 serge 9124
	spin_lock_irqsave(&dev->event_lock, flags);
9125
	work = intel_crtc->unpin_work;
3243 Serge 9126
 
9127
	/* Ensure we don't miss a work->pending update ... */
9128
	smp_rmb();
9129
 
9130
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 9131
		spin_unlock_irqrestore(&dev->event_lock, flags);
9132
		return;
9133
	}
2327 Serge 9134
 
5354 serge 9135
	page_flip_completed(intel_crtc);
3243 Serge 9136
 
3031 serge 9137
	spin_unlock_irqrestore(&dev->event_lock, flags);
9138
}
2327 Serge 9139
 
3031 serge 9140
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9141
{
5060 serge 9142
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9143
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 9144
 
3031 serge 9145
	do_intel_finish_page_flip(dev, crtc);
9146
}
2327 Serge 9147
 
3031 serge 9148
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9149
{
5060 serge 9150
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9151
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 9152
 
3031 serge 9153
	do_intel_finish_page_flip(dev, crtc);
9154
}
2327 Serge 9155
 
5060 serge 9156
/* Is 'a' after or equal to 'b'? */
9157
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9158
{
9159
	return !((a - b) & 0x80000000);
9160
}
9161
 
9162
static bool page_flip_finished(struct intel_crtc *crtc)
9163
{
9164
	struct drm_device *dev = crtc->base.dev;
9165
	struct drm_i915_private *dev_priv = dev->dev_private;
9166
 
5354 serge 9167
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9168
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9169
		return true;
9170
 
5060 serge 9171
	/*
9172
	 * The relevant registers doen't exist on pre-ctg.
9173
	 * As the flip done interrupt doesn't trigger for mmio
9174
	 * flips on gmch platforms, a flip count check isn't
9175
	 * really needed there. But since ctg has the registers,
9176
	 * include it in the check anyway.
9177
	 */
9178
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9179
		return true;
9180
 
9181
	/*
9182
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9183
	 * used the same base address. In that case the mmio flip might
9184
	 * have completed, but the CS hasn't even executed the flip yet.
9185
	 *
9186
	 * A flip count check isn't enough as the CS might have updated
9187
	 * the base address just after start of vblank, but before we
9188
	 * managed to process the interrupt. This means we'd complete the
9189
	 * CS flip too soon.
9190
	 *
9191
	 * Combining both checks should get us a good enough result. It may
9192
	 * still happen that the CS flip has been executed, but has not
9193
	 * yet actually completed. But in case the base address is the same
9194
	 * anyway, we don't really care.
9195
	 */
9196
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9197
		crtc->unpin_work->gtt_offset &&
9198
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9199
				    crtc->unpin_work->flip_count);
9200
}
9201
 
3031 serge 9202
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9203
{
5060 serge 9204
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9205
	struct intel_crtc *intel_crtc =
9206
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9207
	unsigned long flags;
2327 Serge 9208
 
5354 serge 9209
 
9210
	/*
9211
	 * This is called both by irq handlers and the reset code (to complete
9212
	 * lost pageflips) so needs the full irqsave spinlocks.
9213
	 *
9214
	 * NB: An MMIO update of the plane base pointer will also
3243 Serge 9215
	 * generate a page-flip completion irq, i.e. every modeset
9216
	 * is also accompanied by a spurious intel_prepare_page_flip().
9217
	 */
3031 serge 9218
	spin_lock_irqsave(&dev->event_lock, flags);
5060 serge 9219
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
3243 Serge 9220
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 9221
	spin_unlock_irqrestore(&dev->event_lock, flags);
9222
}
2327 Serge 9223
 
5060 serge 9224
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
3243 Serge 9225
{
9226
	/* Ensure that the work item is consistent when activating it ... */
9227
	smp_wmb();
9228
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9229
	/* and that it is marked active as soon as the irq could fire. */
9230
	smp_wmb();
9231
}
9232
 
3031 serge 9233
static int intel_gen2_queue_flip(struct drm_device *dev,
9234
				 struct drm_crtc *crtc,
9235
				 struct drm_framebuffer *fb,
4104 Serge 9236
				 struct drm_i915_gem_object *obj,
5060 serge 9237
				 struct intel_engine_cs *ring,
4104 Serge 9238
				 uint32_t flags)
3031 serge 9239
{
9240
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9241
	u32 flip_mask;
9242
	int ret;
2327 Serge 9243
 
3031 serge 9244
	ret = intel_ring_begin(ring, 6);
9245
	if (ret)
5060 serge 9246
		return ret;
2327 Serge 9247
 
3031 serge 9248
	/* Can't queue multiple flips, so wait for the previous
9249
	 * one to finish before executing the next.
9250
	 */
9251
	if (intel_crtc->plane)
9252
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9253
	else
9254
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9255
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9256
	intel_ring_emit(ring, MI_NOOP);
9257
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9258
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9259
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9260
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9261
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 9262
 
9263
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9264
	__intel_ring_advance(ring);
3031 serge 9265
	return 0;
9266
}
2327 Serge 9267
 
3031 serge 9268
static int intel_gen3_queue_flip(struct drm_device *dev,
9269
				 struct drm_crtc *crtc,
9270
				 struct drm_framebuffer *fb,
4104 Serge 9271
				 struct drm_i915_gem_object *obj,
5060 serge 9272
				 struct intel_engine_cs *ring,
4104 Serge 9273
				 uint32_t flags)
3031 serge 9274
{
9275
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9276
	u32 flip_mask;
9277
	int ret;
2327 Serge 9278
 
3031 serge 9279
	ret = intel_ring_begin(ring, 6);
9280
	if (ret)
5060 serge 9281
		return ret;
2327 Serge 9282
 
3031 serge 9283
	if (intel_crtc->plane)
9284
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9285
	else
9286
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9287
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9288
	intel_ring_emit(ring, MI_NOOP);
9289
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9290
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9291
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9292
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9293
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 9294
 
3243 Serge 9295
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9296
	__intel_ring_advance(ring);
3031 serge 9297
	return 0;
9298
}
2327 Serge 9299
 
3031 serge 9300
static int intel_gen4_queue_flip(struct drm_device *dev,
9301
				 struct drm_crtc *crtc,
9302
				 struct drm_framebuffer *fb,
4104 Serge 9303
				 struct drm_i915_gem_object *obj,
5060 serge 9304
				 struct intel_engine_cs *ring,
4104 Serge 9305
				 uint32_t flags)
3031 serge 9306
{
9307
	struct drm_i915_private *dev_priv = dev->dev_private;
9308
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9309
	uint32_t pf, pipesrc;
9310
	int ret;
2327 Serge 9311
 
3031 serge 9312
	ret = intel_ring_begin(ring, 4);
9313
	if (ret)
5060 serge 9314
		return ret;
2327 Serge 9315
 
3031 serge 9316
	/* i965+ uses the linear or tiled offsets from the
9317
	 * Display Registers (which do not change across a page-flip)
9318
	 * so we need only reprogram the base address.
9319
	 */
9320
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9321
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9322
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9323
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
3031 serge 9324
			obj->tiling_mode);
2327 Serge 9325
 
3031 serge 9326
	/* XXX Enabling the panel-fitter across page-flip is so far
9327
	 * untested on non-native modes, so ignore it for now.
9328
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9329
	 */
9330
	pf = 0;
9331
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9332
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 9333
 
9334
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9335
	__intel_ring_advance(ring);
3031 serge 9336
	return 0;
9337
}
2327 Serge 9338
 
3031 serge 9339
static int intel_gen6_queue_flip(struct drm_device *dev,
9340
				 struct drm_crtc *crtc,
9341
				 struct drm_framebuffer *fb,
4104 Serge 9342
				 struct drm_i915_gem_object *obj,
5060 serge 9343
				 struct intel_engine_cs *ring,
4104 Serge 9344
				 uint32_t flags)
3031 serge 9345
{
9346
	struct drm_i915_private *dev_priv = dev->dev_private;
9347
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9348
	uint32_t pf, pipesrc;
9349
	int ret;
2327 Serge 9350
 
3031 serge 9351
	ret = intel_ring_begin(ring, 4);
9352
	if (ret)
5060 serge 9353
		return ret;
2327 Serge 9354
 
3031 serge 9355
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9356
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9357
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5060 serge 9358
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
2327 Serge 9359
 
3031 serge 9360
	/* Contrary to the suggestions in the documentation,
9361
	 * "Enable Panel Fitter" does not seem to be required when page
9362
	 * flipping with a non-native mode, and worse causes a normal
9363
	 * modeset to fail.
9364
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9365
	 */
9366
	pf = 0;
9367
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9368
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 9369
 
9370
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9371
	__intel_ring_advance(ring);
3031 serge 9372
	return 0;
9373
}
2327 Serge 9374
 
3031 serge 9375
static int intel_gen7_queue_flip(struct drm_device *dev,
9376
				 struct drm_crtc *crtc,
9377
				 struct drm_framebuffer *fb,
4104 Serge 9378
				 struct drm_i915_gem_object *obj,
5060 serge 9379
				 struct intel_engine_cs *ring,
4104 Serge 9380
				 uint32_t flags)
3031 serge 9381
{
9382
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9383
	uint32_t plane_bit = 0;
4104 Serge 9384
	int len, ret;
2327 Serge 9385
 
5060 serge 9386
	switch (intel_crtc->plane) {
3031 serge 9387
	case PLANE_A:
9388
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9389
		break;
9390
	case PLANE_B:
9391
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9392
		break;
9393
	case PLANE_C:
9394
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9395
		break;
9396
	default:
9397
		WARN_ONCE(1, "unknown plane in flip command\n");
5060 serge 9398
		return -ENODEV;
3031 serge 9399
	}
2327 Serge 9400
 
4104 Serge 9401
	len = 4;
5060 serge 9402
	if (ring->id == RCS) {
4104 Serge 9403
		len += 6;
5060 serge 9404
		/*
9405
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9406
		 * 48bits addresses, and we need a NOOP for the batch size to
9407
		 * stay even.
9408
		 */
9409
		if (IS_GEN8(dev))
9410
			len += 2;
9411
	}
4104 Serge 9412
 
5060 serge 9413
	/*
9414
	 * BSpec MI_DISPLAY_FLIP for IVB:
9415
	 * "The full packet must be contained within the same cache line."
9416
	 *
9417
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9418
	 * cacheline, if we ever start emitting more commands before
9419
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9420
	 * then do the cacheline alignment, and finally emit the
9421
	 * MI_DISPLAY_FLIP.
9422
	 */
9423
	ret = intel_ring_cacheline_align(ring);
9424
	if (ret)
9425
		return ret;
9426
 
4104 Serge 9427
	ret = intel_ring_begin(ring, len);
3031 serge 9428
	if (ret)
5060 serge 9429
		return ret;
2327 Serge 9430
 
4104 Serge 9431
	/* Unmask the flip-done completion message. Note that the bspec says that
9432
	 * we should do this for both the BCS and RCS, and that we must not unmask
9433
	 * more than one flip event at any time (or ensure that one flip message
9434
	 * can be sent by waiting for flip-done prior to queueing new flips).
9435
	 * Experimentation says that BCS works despite DERRMR masking all
9436
	 * flip-done completion events and that unmasking all planes at once
9437
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9438
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9439
	 */
9440
	if (ring->id == RCS) {
9441
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9442
		intel_ring_emit(ring, DERRMR);
9443
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9444
					DERRMR_PIPEB_PRI_FLIP_DONE |
9445
					DERRMR_PIPEC_PRI_FLIP_DONE));
5060 serge 9446
		if (IS_GEN8(dev))
9447
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9448
					      MI_SRM_LRM_GLOBAL_GTT);
9449
		else
4560 Serge 9450
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9451
				MI_SRM_LRM_GLOBAL_GTT);
4104 Serge 9452
		intel_ring_emit(ring, DERRMR);
9453
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
5060 serge 9454
		if (IS_GEN8(dev)) {
9455
			intel_ring_emit(ring, 0);
9456
			intel_ring_emit(ring, MI_NOOP);
9457
		}
4104 Serge 9458
	}
9459
 
3031 serge 9460
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9461
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5060 serge 9462
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9463
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 9464
 
9465
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9466
	__intel_ring_advance(ring);
3031 serge 9467
	return 0;
9468
}
2327 Serge 9469
 
3031 serge 9470
static int intel_default_queue_flip(struct drm_device *dev,
9471
				    struct drm_crtc *crtc,
9472
				    struct drm_framebuffer *fb,
4104 Serge 9473
				    struct drm_i915_gem_object *obj,
5060 serge 9474
				    struct intel_engine_cs *ring,
4104 Serge 9475
				    uint32_t flags)
3031 serge 9476
{
9477
	return -ENODEV;
9478
}
2327 Serge 9479
 
3031 serge 9480
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9481
				struct drm_framebuffer *fb,
4104 Serge 9482
				struct drm_pending_vblank_event *event,
9483
				uint32_t page_flip_flags)
3031 serge 9484
{
9485
	struct drm_device *dev = crtc->dev;
9486
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 9487
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9488
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3031 serge 9489
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 9490
	enum pipe pipe = intel_crtc->pipe;
3031 serge 9491
	struct intel_unpin_work *work;
5060 serge 9492
	struct intel_engine_cs *ring;
3031 serge 9493
	int ret;
2327 Serge 9494
 
5060 serge 9495
	/*
9496
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9497
	 * check to be safe.  In the future we may enable pageflipping from
9498
	 * a disabled primary plane.
9499
	 */
9500
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9501
		return -EBUSY;
9502
 
3031 serge 9503
	/* Can't change pixel format via MI display flips. */
5060 serge 9504
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
3031 serge 9505
		return -EINVAL;
2327 Serge 9506
 
3031 serge 9507
	/*
9508
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9509
	 * Note that pitch changes could also affect these register.
9510
	 */
9511
	if (INTEL_INFO(dev)->gen > 3 &&
5060 serge 9512
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9513
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
3031 serge 9514
		return -EINVAL;
2327 Serge 9515
 
5354 serge 9516
	if (i915_terminally_wedged(&dev_priv->gpu_error))
9517
		goto out_hang;
9518
 
4560 Serge 9519
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 9520
	if (work == NULL)
9521
		return -ENOMEM;
2327 Serge 9522
 
3031 serge 9523
	work->event = event;
3243 Serge 9524
	work->crtc = crtc;
5060 serge 9525
	work->old_fb_obj = intel_fb_obj(old_fb);
3031 serge 9526
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 9527
 
5060 serge 9528
	ret = drm_crtc_vblank_get(crtc);
3031 serge 9529
	if (ret)
9530
		goto free_work;
2327 Serge 9531
 
3031 serge 9532
	/* We borrow the event spin lock for protecting unpin_work */
5354 serge 9533
	spin_lock_irq(&dev->event_lock);
3031 serge 9534
	if (intel_crtc->unpin_work) {
5354 serge 9535
		/* Before declaring the flip queue wedged, check if
9536
		 * the hardware completed the operation behind our backs.
9537
		 */
9538
		if (__intel_pageflip_stall_check(dev, crtc)) {
9539
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
9540
			page_flip_completed(intel_crtc);
9541
		} else {
9542
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9543
			spin_unlock_irq(&dev->event_lock);
9544
 
9545
			drm_crtc_vblank_put(crtc);
3031 serge 9546
		kfree(work);
9547
		return -EBUSY;
9548
	}
5354 serge 9549
	}
3031 serge 9550
	intel_crtc->unpin_work = work;
5354 serge 9551
	spin_unlock_irq(&dev->event_lock);
2327 Serge 9552
 
3243 Serge 9553
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9554
		flush_workqueue(dev_priv->wq);
9555
 
3031 serge 9556
	ret = i915_mutex_lock_interruptible(dev);
9557
	if (ret)
9558
		goto cleanup;
2327 Serge 9559
 
3031 serge 9560
	/* Reference the objects for the scheduled work. */
9561
	drm_gem_object_reference(&work->old_fb_obj->base);
9562
	drm_gem_object_reference(&obj->base);
2327 Serge 9563
 
5060 serge 9564
	crtc->primary->fb = fb;
2327 Serge 9565
 
3031 serge 9566
	work->pending_flip_obj = obj;
2327 Serge 9567
 
3243 Serge 9568
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 9569
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 9570
 
5060 serge 9571
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9572
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9573
 
9574
	if (IS_VALLEYVIEW(dev)) {
9575
		ring = &dev_priv->ring[BCS];
9576
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9577
			/* vlv: DISPLAY_FLIP fails to change tiling */
9578
			ring = NULL;
9579
	} else if (IS_IVYBRIDGE(dev)) {
9580
		ring = &dev_priv->ring[BCS];
9581
	} else if (INTEL_INFO(dev)->gen >= 7) {
9582
		ring = obj->ring;
9583
		if (ring == NULL || ring->id != RCS)
9584
			ring = &dev_priv->ring[BCS];
9585
	} else {
9586
		ring = &dev_priv->ring[RCS];
9587
	}
9588
 
5354 serge 9589
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
3031 serge 9590
	if (ret)
9591
		goto cleanup_pending;
9592
 
5060 serge 9593
	work->gtt_offset =
9594
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9595
 
5354 serge 9596
	if (use_mmio_flip(ring, obj)) {
5060 serge 9597
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9598
					    page_flip_flags);
5354 serge 9599
		if (ret)
9600
			goto cleanup_unpin;
9601
 
9602
		work->flip_queued_seqno = obj->last_write_seqno;
9603
		work->flip_queued_ring = obj->ring;
9604
	} else {
5060 serge 9605
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9606
				page_flip_flags);
9607
	if (ret)
9608
		goto cleanup_unpin;
9609
 
5354 serge 9610
		work->flip_queued_seqno = intel_ring_get_seqno(ring);
9611
		work->flip_queued_ring = ring;
9612
	}
9613
 
9614
	work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
9615
	work->enable_stall_check = true;
9616
 
5060 serge 9617
	i915_gem_track_fb(work->old_fb_obj, obj,
9618
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9619
 
3031 serge 9620
	intel_disable_fbc(dev);
5060 serge 9621
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
3031 serge 9622
	mutex_unlock(&dev->struct_mutex);
9623
 
9624
	trace_i915_flip_request(intel_crtc->plane, obj);
9625
 
9626
	return 0;
9627
 
5060 serge 9628
cleanup_unpin:
9629
	intel_unpin_fb_obj(obj);
3031 serge 9630
cleanup_pending:
3243 Serge 9631
	atomic_dec(&intel_crtc->unpin_work_count);
5060 serge 9632
	crtc->primary->fb = old_fb;
3031 serge 9633
	drm_gem_object_unreference(&work->old_fb_obj->base);
9634
	drm_gem_object_unreference(&obj->base);
9635
	mutex_unlock(&dev->struct_mutex);
9636
 
9637
cleanup:
5354 serge 9638
	spin_lock_irq(&dev->event_lock);
3031 serge 9639
	intel_crtc->unpin_work = NULL;
5354 serge 9640
	spin_unlock_irq(&dev->event_lock);
3031 serge 9641
 
5060 serge 9642
	drm_crtc_vblank_put(crtc);
3031 serge 9643
free_work:
9644
	kfree(work);
9645
 
5060 serge 9646
	if (ret == -EIO) {
9647
out_hang:
5354 serge 9648
//       intel_crtc_wait_for_pending_flips(crtc);
5060 serge 9649
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
5354 serge 9650
		if (ret == 0 && event) {
9651
			spin_lock_irq(&dev->event_lock);
5060 serge 9652
			drm_send_vblank_event(dev, pipe, event);
5354 serge 9653
			spin_unlock_irq(&dev->event_lock);
9654
		}
5060 serge 9655
	}
3031 serge 9656
	return ret;
9657
}
9658
#endif
9659
 
9660
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9661
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9662
	.load_lut = intel_crtc_load_lut,
9663
};
9664
 
9665
/**
9666
 * intel_modeset_update_staged_output_state
9667
 *
9668
 * Updates the staged output configuration state, e.g. after we've read out the
9669
 * current hw state.
9670
 */
9671
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9672
{
5060 serge 9673
	struct intel_crtc *crtc;
3031 serge 9674
	struct intel_encoder *encoder;
9675
	struct intel_connector *connector;
9676
 
9677
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9678
			    base.head) {
9679
		connector->new_encoder =
9680
			to_intel_encoder(connector->base.encoder);
9681
	}
9682
 
5354 serge 9683
	for_each_intel_encoder(dev, encoder) {
3031 serge 9684
		encoder->new_crtc =
9685
			to_intel_crtc(encoder->base.crtc);
9686
	}
5060 serge 9687
 
9688
	for_each_intel_crtc(dev, crtc) {
9689
		crtc->new_enabled = crtc->base.enabled;
9690
 
9691
		if (crtc->new_enabled)
9692
			crtc->new_config = &crtc->config;
9693
		else
9694
			crtc->new_config = NULL;
9695
	}
3031 serge 9696
}
9697
 
9698
/**
9699
 * intel_modeset_commit_output_state
9700
 *
9701
 * This function copies the stage display pipe configuration to the real one.
9702
 */
9703
static void intel_modeset_commit_output_state(struct drm_device *dev)
9704
{
5060 serge 9705
	struct intel_crtc *crtc;
3031 serge 9706
	struct intel_encoder *encoder;
9707
	struct intel_connector *connector;
9708
 
9709
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9710
			    base.head) {
9711
		connector->base.encoder = &connector->new_encoder->base;
9712
	}
9713
 
5354 serge 9714
	for_each_intel_encoder(dev, encoder) {
3031 serge 9715
		encoder->base.crtc = &encoder->new_crtc->base;
9716
	}
5060 serge 9717
 
9718
	for_each_intel_crtc(dev, crtc) {
9719
		crtc->base.enabled = crtc->new_enabled;
9720
	}
3031 serge 9721
}
9722
 
4104 Serge 9723
static void
5060 serge 9724
connected_sink_compute_bpp(struct intel_connector *connector,
4104 Serge 9725
			   struct intel_crtc_config *pipe_config)
9726
{
9727
	int bpp = pipe_config->pipe_bpp;
9728
 
9729
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9730
		connector->base.base.id,
5060 serge 9731
		connector->base.name);
4104 Serge 9732
 
9733
	/* Don't use an invalid EDID bpc value */
9734
	if (connector->base.display_info.bpc &&
9735
	    connector->base.display_info.bpc * 3 < bpp) {
9736
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9737
			      bpp, connector->base.display_info.bpc*3);
9738
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9739
	}
9740
 
9741
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9742
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9743
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9744
			      bpp);
9745
		pipe_config->pipe_bpp = 24;
9746
	}
9747
}
9748
 
3746 Serge 9749
static int
4104 Serge 9750
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
3746 Serge 9751
		    struct drm_framebuffer *fb,
9752
		    struct intel_crtc_config *pipe_config)
9753
{
4104 Serge 9754
	struct drm_device *dev = crtc->base.dev;
9755
	struct intel_connector *connector;
3746 Serge 9756
	int bpp;
9757
 
9758
	switch (fb->pixel_format) {
9759
	case DRM_FORMAT_C8:
9760
		bpp = 8*3; /* since we go through a colormap */
9761
		break;
9762
	case DRM_FORMAT_XRGB1555:
9763
	case DRM_FORMAT_ARGB1555:
9764
		/* checked in intel_framebuffer_init already */
9765
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9766
			return -EINVAL;
9767
	case DRM_FORMAT_RGB565:
9768
		bpp = 6*3; /* min is 18bpp */
9769
		break;
9770
	case DRM_FORMAT_XBGR8888:
9771
	case DRM_FORMAT_ABGR8888:
9772
		/* checked in intel_framebuffer_init already */
9773
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9774
			return -EINVAL;
9775
	case DRM_FORMAT_XRGB8888:
9776
	case DRM_FORMAT_ARGB8888:
9777
		bpp = 8*3;
9778
		break;
9779
	case DRM_FORMAT_XRGB2101010:
9780
	case DRM_FORMAT_ARGB2101010:
9781
	case DRM_FORMAT_XBGR2101010:
9782
	case DRM_FORMAT_ABGR2101010:
9783
		/* checked in intel_framebuffer_init already */
9784
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9785
			return -EINVAL;
9786
		bpp = 10*3;
9787
		break;
9788
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9789
	default:
9790
		DRM_DEBUG_KMS("unsupported depth\n");
9791
		return -EINVAL;
9792
	}
9793
 
9794
	pipe_config->pipe_bpp = bpp;
9795
 
9796
	/* Clamp display bpp to EDID value */
9797
	list_for_each_entry(connector, &dev->mode_config.connector_list,
4104 Serge 9798
			    base.head) {
9799
		if (!connector->new_encoder ||
9800
		    connector->new_encoder->new_crtc != crtc)
3746 Serge 9801
			continue;
9802
 
4104 Serge 9803
		connected_sink_compute_bpp(connector, pipe_config);
3746 Serge 9804
	}
9805
 
9806
	return bpp;
9807
}
9808
 
4560 Serge 9809
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9810
{
9811
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9812
			"type: 0x%x flags: 0x%x\n",
9813
		mode->crtc_clock,
9814
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9815
		mode->crtc_hsync_end, mode->crtc_htotal,
9816
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9817
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9818
}
9819
 
4104 Serge 9820
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9821
				   struct intel_crtc_config *pipe_config,
9822
				   const char *context)
9823
{
9824
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9825
		      context, pipe_name(crtc->pipe));
9826
 
9827
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9828
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9829
		      pipe_config->pipe_bpp, pipe_config->dither);
9830
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9831
		      pipe_config->has_pch_encoder,
9832
		      pipe_config->fdi_lanes,
9833
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9834
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9835
		      pipe_config->fdi_m_n.tu);
4560 Serge 9836
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9837
		      pipe_config->has_dp_encoder,
9838
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9839
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9840
		      pipe_config->dp_m_n.tu);
5354 serge 9841
 
9842
	DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
9843
		      pipe_config->has_dp_encoder,
9844
		      pipe_config->dp_m2_n2.gmch_m,
9845
		      pipe_config->dp_m2_n2.gmch_n,
9846
		      pipe_config->dp_m2_n2.link_m,
9847
		      pipe_config->dp_m2_n2.link_n,
9848
		      pipe_config->dp_m2_n2.tu);
9849
 
9850
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
9851
		      pipe_config->has_audio,
9852
		      pipe_config->has_infoframe);
9853
 
4104 Serge 9854
	DRM_DEBUG_KMS("requested mode:\n");
9855
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9856
	DRM_DEBUG_KMS("adjusted mode:\n");
9857
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
4560 Serge 9858
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9859
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9860
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9861
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
4104 Serge 9862
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9863
		      pipe_config->gmch_pfit.control,
9864
		      pipe_config->gmch_pfit.pgm_ratios,
9865
		      pipe_config->gmch_pfit.lvds_border_bits);
9866
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9867
		      pipe_config->pch_pfit.pos,
9868
		      pipe_config->pch_pfit.size,
9869
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9870
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 9871
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 9872
}
9873
 
5060 serge 9874
static bool encoders_cloneable(const struct intel_encoder *a,
9875
			       const struct intel_encoder *b)
4104 Serge 9876
{
5060 serge 9877
	/* masks could be asymmetric, so check both ways */
9878
	return a == b || (a->cloneable & (1 << b->type) &&
9879
			  b->cloneable & (1 << a->type));
9880
}
9881
 
9882
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9883
					 struct intel_encoder *encoder)
9884
{
9885
	struct drm_device *dev = crtc->base.dev;
9886
	struct intel_encoder *source_encoder;
9887
 
5354 serge 9888
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 9889
		if (source_encoder->new_crtc != crtc)
9890
			continue;
9891
 
9892
		if (!encoders_cloneable(encoder, source_encoder))
9893
			return false;
9894
	}
9895
 
9896
	return true;
9897
}
9898
 
9899
static bool check_encoder_cloning(struct intel_crtc *crtc)
9900
{
9901
	struct drm_device *dev = crtc->base.dev;
4104 Serge 9902
	struct intel_encoder *encoder;
9903
 
5354 serge 9904
	for_each_intel_encoder(dev, encoder) {
5060 serge 9905
		if (encoder->new_crtc != crtc)
4104 Serge 9906
			continue;
9907
 
5060 serge 9908
		if (!check_single_encoder_cloning(crtc, encoder))
9909
			return false;
4104 Serge 9910
	}
9911
 
5060 serge 9912
	return true;
4104 Serge 9913
}
9914
 
5354 serge 9915
static bool check_digital_port_conflicts(struct drm_device *dev)
9916
{
9917
	struct intel_connector *connector;
9918
	unsigned int used_ports = 0;
9919
 
9920
	/*
9921
	 * Walk the connector list instead of the encoder
9922
	 * list to detect the problem on ddi platforms
9923
	 * where there's just one encoder per digital port.
9924
	 */
9925
	list_for_each_entry(connector,
9926
			    &dev->mode_config.connector_list, base.head) {
9927
		struct intel_encoder *encoder = connector->new_encoder;
9928
 
9929
		if (!encoder)
9930
			continue;
9931
 
9932
		WARN_ON(!encoder->new_crtc);
9933
 
9934
		switch (encoder->type) {
9935
			unsigned int port_mask;
9936
		case INTEL_OUTPUT_UNKNOWN:
9937
			if (WARN_ON(!HAS_DDI(dev)))
9938
				break;
9939
		case INTEL_OUTPUT_DISPLAYPORT:
9940
		case INTEL_OUTPUT_HDMI:
9941
		case INTEL_OUTPUT_EDP:
9942
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
9943
 
9944
			/* the same port mustn't appear more than once */
9945
			if (used_ports & port_mask)
9946
				return false;
9947
 
9948
			used_ports |= port_mask;
9949
		default:
9950
			break;
9951
		}
9952
	}
9953
 
9954
	return true;
9955
}
9956
 
3746 Serge 9957
static struct intel_crtc_config *
9958
intel_modeset_pipe_config(struct drm_crtc *crtc,
9959
			  struct drm_framebuffer *fb,
3031 serge 9960
			    struct drm_display_mode *mode)
9961
{
9962
	struct drm_device *dev = crtc->dev;
9963
	struct intel_encoder *encoder;
3746 Serge 9964
	struct intel_crtc_config *pipe_config;
4104 Serge 9965
	int plane_bpp, ret = -EINVAL;
9966
	bool retry = true;
3031 serge 9967
 
5060 serge 9968
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
4104 Serge 9969
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9970
		return ERR_PTR(-EINVAL);
9971
	}
9972
 
5354 serge 9973
	if (!check_digital_port_conflicts(dev)) {
9974
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
9975
		return ERR_PTR(-EINVAL);
9976
	}
9977
 
3746 Serge 9978
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9979
	if (!pipe_config)
3031 serge 9980
		return ERR_PTR(-ENOMEM);
9981
 
3746 Serge 9982
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9983
	drm_mode_copy(&pipe_config->requested_mode, mode);
4560 Serge 9984
 
4104 Serge 9985
	pipe_config->cpu_transcoder =
9986
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9987
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
3746 Serge 9988
 
4104 Serge 9989
	/*
9990
	 * Sanitize sync polarity flags based on requested ones. If neither
9991
	 * positive or negative polarity is requested, treat this as meaning
9992
	 * negative polarity.
9993
	 */
9994
	if (!(pipe_config->adjusted_mode.flags &
9995
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9996
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9997
 
9998
	if (!(pipe_config->adjusted_mode.flags &
9999
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
10000
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
10001
 
10002
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
10003
	 * plane pixel format and any sink constraints into account. Returns the
10004
	 * source plane bpp so that dithering can be selected on mismatches
10005
	 * after encoders and crtc also have had their say. */
10006
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10007
					      fb, pipe_config);
3746 Serge 10008
	if (plane_bpp < 0)
10009
		goto fail;
10010
 
4560 Serge 10011
	/*
10012
	 * Determine the real pipe dimensions. Note that stereo modes can
10013
	 * increase the actual pipe size due to the frame doubling and
10014
	 * insertion of additional space for blanks between the frame. This
10015
	 * is stored in the crtc timings. We use the requested mode to do this
10016
	 * computation to clearly distinguish it from the adjusted mode, which
10017
	 * can be changed by the connectors in the below retry loop.
10018
	 */
10019
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
10020
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
10021
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
10022
 
4104 Serge 10023
encoder_retry:
10024
	/* Ensure the port clock defaults are reset when retrying. */
10025
	pipe_config->port_clock = 0;
10026
	pipe_config->pixel_multiplier = 1;
10027
 
10028
	/* Fill in default crtc timings, allow encoders to overwrite them. */
4560 Serge 10029
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
4104 Serge 10030
 
3031 serge 10031
	/* Pass our mode to the connectors and the CRTC to give them a chance to
10032
	 * adjust it according to limitations or connector properties, and also
10033
	 * a chance to reject the mode entirely.
2330 Serge 10034
	 */
5354 serge 10035
	for_each_intel_encoder(dev, encoder) {
2327 Serge 10036
 
3031 serge 10037
		if (&encoder->new_crtc->base != crtc)
10038
			continue;
3746 Serge 10039
 
10040
			if (!(encoder->compute_config(encoder, pipe_config))) {
10041
				DRM_DEBUG_KMS("Encoder config failure\n");
10042
				goto fail;
10043
			}
10044
		}
10045
 
4104 Serge 10046
	/* Set default port clock if not overwritten by the encoder. Needs to be
10047
	 * done afterwards in case the encoder adjusts the mode. */
10048
	if (!pipe_config->port_clock)
4560 Serge 10049
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
10050
			* pipe_config->pixel_multiplier;
2327 Serge 10051
 
4104 Serge 10052
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10053
	if (ret < 0) {
3031 serge 10054
		DRM_DEBUG_KMS("CRTC fixup failed\n");
10055
		goto fail;
10056
	}
2327 Serge 10057
 
4104 Serge 10058
	if (ret == RETRY) {
10059
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
10060
			ret = -EINVAL;
10061
			goto fail;
10062
		}
10063
 
10064
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10065
		retry = false;
10066
		goto encoder_retry;
10067
	}
10068
 
3746 Serge 10069
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
10070
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
10071
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10072
 
10073
	return pipe_config;
3031 serge 10074
fail:
3746 Serge 10075
	kfree(pipe_config);
4104 Serge 10076
	return ERR_PTR(ret);
3031 serge 10077
}
2327 Serge 10078
 
3031 serge 10079
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
10080
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
10081
static void
10082
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10083
			     unsigned *prepare_pipes, unsigned *disable_pipes)
10084
{
10085
	struct intel_crtc *intel_crtc;
10086
	struct drm_device *dev = crtc->dev;
10087
	struct intel_encoder *encoder;
10088
	struct intel_connector *connector;
10089
	struct drm_crtc *tmp_crtc;
10090
 
10091
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
10092
 
10093
	/* Check which crtcs have changed outputs connected to them, these need
10094
	 * to be part of the prepare_pipes mask. We don't (yet) support global
10095
	 * modeset across multiple crtcs, so modeset_pipes will only have one
10096
	 * bit set at most. */
10097
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10098
			    base.head) {
10099
		if (connector->base.encoder == &connector->new_encoder->base)
10100
			continue;
10101
 
10102
		if (connector->base.encoder) {
10103
			tmp_crtc = connector->base.encoder->crtc;
10104
 
10105
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10106
		}
10107
 
10108
		if (connector->new_encoder)
10109
			*prepare_pipes |=
10110
				1 << connector->new_encoder->new_crtc->pipe;
10111
	}
10112
 
5354 serge 10113
	for_each_intel_encoder(dev, encoder) {
3031 serge 10114
		if (encoder->base.crtc == &encoder->new_crtc->base)
10115
			continue;
10116
 
10117
		if (encoder->base.crtc) {
10118
			tmp_crtc = encoder->base.crtc;
10119
 
10120
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10121
		}
10122
 
10123
		if (encoder->new_crtc)
10124
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
10125
	}
10126
 
5060 serge 10127
	/* Check for pipes that will be enabled/disabled ... */
10128
	for_each_intel_crtc(dev, intel_crtc) {
10129
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
3031 serge 10130
			continue;
10131
 
5060 serge 10132
		if (!intel_crtc->new_enabled)
3031 serge 10133
			*disable_pipes |= 1 << intel_crtc->pipe;
5060 serge 10134
		else
10135
			*prepare_pipes |= 1 << intel_crtc->pipe;
3031 serge 10136
	}
10137
 
10138
 
10139
	/* set_mode is also used to update properties on life display pipes. */
10140
	intel_crtc = to_intel_crtc(crtc);
5060 serge 10141
	if (intel_crtc->new_enabled)
3031 serge 10142
		*prepare_pipes |= 1 << intel_crtc->pipe;
10143
 
3746 Serge 10144
	/*
10145
	 * For simplicity do a full modeset on any pipe where the output routing
10146
	 * changed. We could be more clever, but that would require us to be
10147
	 * more careful with calling the relevant encoder->mode_set functions.
10148
	 */
3031 serge 10149
	if (*prepare_pipes)
10150
		*modeset_pipes = *prepare_pipes;
10151
 
10152
	/* ... and mask these out. */
10153
	*modeset_pipes &= ~(*disable_pipes);
10154
	*prepare_pipes &= ~(*disable_pipes);
3746 Serge 10155
 
10156
	/*
10157
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10158
	 * obies this rule, but the modeset restore mode of
10159
	 * intel_modeset_setup_hw_state does not.
10160
	 */
10161
	*modeset_pipes &= 1 << intel_crtc->pipe;
10162
	*prepare_pipes &= 1 << intel_crtc->pipe;
4104 Serge 10163
 
10164
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10165
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
2330 Serge 10166
}
2327 Serge 10167
 
3031 serge 10168
static bool intel_crtc_in_use(struct drm_crtc *crtc)
2330 Serge 10169
{
3031 serge 10170
	struct drm_encoder *encoder;
2330 Serge 10171
	struct drm_device *dev = crtc->dev;
2327 Serge 10172
 
3031 serge 10173
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10174
		if (encoder->crtc == crtc)
10175
			return true;
10176
 
10177
	return false;
10178
}
10179
 
10180
static void
10181
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10182
{
5354 serge 10183
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10184
	struct intel_encoder *intel_encoder;
10185
	struct intel_crtc *intel_crtc;
10186
	struct drm_connector *connector;
10187
 
5354 serge 10188
	intel_shared_dpll_commit(dev_priv);
10189
 
10190
	for_each_intel_encoder(dev, intel_encoder) {
3031 serge 10191
		if (!intel_encoder->base.crtc)
10192
			continue;
10193
 
10194
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10195
 
10196
		if (prepare_pipes & (1 << intel_crtc->pipe))
10197
			intel_encoder->connectors_active = false;
10198
	}
10199
 
10200
	intel_modeset_commit_output_state(dev);
10201
 
5060 serge 10202
	/* Double check state. */
10203
	for_each_intel_crtc(dev, intel_crtc) {
10204
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10205
		WARN_ON(intel_crtc->new_config &&
10206
			intel_crtc->new_config != &intel_crtc->config);
10207
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
3031 serge 10208
	}
10209
 
10210
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10211
		if (!connector->encoder || !connector->encoder->crtc)
10212
			continue;
10213
 
10214
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10215
 
10216
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10217
			struct drm_property *dpms_property =
10218
				dev->mode_config.dpms_property;
10219
 
10220
			connector->dpms = DRM_MODE_DPMS_ON;
3243 Serge 10221
			drm_object_property_set_value(&connector->base,
3031 serge 10222
							 dpms_property,
10223
							 DRM_MODE_DPMS_ON);
10224
 
10225
			intel_encoder = to_intel_encoder(connector->encoder);
10226
			intel_encoder->connectors_active = true;
10227
		}
10228
	}
10229
 
10230
}
10231
 
4560 Serge 10232
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 10233
{
4560 Serge 10234
	int diff;
4104 Serge 10235
 
10236
	if (clock1 == clock2)
10237
		return true;
10238
 
10239
	if (!clock1 || !clock2)
10240
		return false;
10241
 
10242
	diff = abs(clock1 - clock2);
10243
 
10244
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10245
		return true;
10246
 
10247
	return false;
10248
}
10249
 
3031 serge 10250
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10251
	list_for_each_entry((intel_crtc), \
10252
			    &(dev)->mode_config.crtc_list, \
10253
			    base.head) \
4104 Serge 10254
		if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 10255
 
3746 Serge 10256
static bool
4104 Serge 10257
intel_pipe_config_compare(struct drm_device *dev,
10258
			  struct intel_crtc_config *current_config,
3746 Serge 10259
			  struct intel_crtc_config *pipe_config)
10260
{
4104 Serge 10261
#define PIPE_CONF_CHECK_X(name)	\
10262
	if (current_config->name != pipe_config->name) { \
10263
		DRM_ERROR("mismatch in " #name " " \
10264
			  "(expected 0x%08x, found 0x%08x)\n", \
10265
			  current_config->name, \
10266
			  pipe_config->name); \
10267
		return false; \
3746 Serge 10268
	}
10269
 
4104 Serge 10270
#define PIPE_CONF_CHECK_I(name)	\
10271
	if (current_config->name != pipe_config->name) { \
10272
		DRM_ERROR("mismatch in " #name " " \
10273
			  "(expected %i, found %i)\n", \
10274
			  current_config->name, \
10275
			  pipe_config->name); \
10276
		return false; \
10277
	}
10278
 
5354 serge 10279
/* This is required for BDW+ where there is only one set of registers for
10280
 * switching between high and low RR.
10281
 * This macro can be used whenever a comparison has to be made between one
10282
 * hw state and multiple sw state variables.
10283
 */
10284
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
10285
	if ((current_config->name != pipe_config->name) && \
10286
		(current_config->alt_name != pipe_config->name)) { \
10287
			DRM_ERROR("mismatch in " #name " " \
10288
				  "(expected %i or %i, found %i)\n", \
10289
				  current_config->name, \
10290
				  current_config->alt_name, \
10291
				  pipe_config->name); \
10292
			return false; \
10293
	}
10294
 
4104 Serge 10295
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10296
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10297
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10298
			  "(expected %i, found %i)\n", \
10299
			  current_config->name & (mask), \
10300
			  pipe_config->name & (mask)); \
10301
		return false; \
10302
	}
10303
 
4560 Serge 10304
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10305
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10306
		DRM_ERROR("mismatch in " #name " " \
10307
			  "(expected %i, found %i)\n", \
10308
			  current_config->name, \
10309
			  pipe_config->name); \
10310
		return false; \
10311
	}
10312
 
4104 Serge 10313
#define PIPE_CONF_QUIRK(quirk)	\
10314
	((current_config->quirks | pipe_config->quirks) & (quirk))
10315
 
10316
	PIPE_CONF_CHECK_I(cpu_transcoder);
10317
 
10318
	PIPE_CONF_CHECK_I(has_pch_encoder);
10319
	PIPE_CONF_CHECK_I(fdi_lanes);
10320
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10321
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10322
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10323
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10324
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10325
 
4560 Serge 10326
	PIPE_CONF_CHECK_I(has_dp_encoder);
5354 serge 10327
 
10328
	if (INTEL_INFO(dev)->gen < 8) {
4560 Serge 10329
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10330
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10331
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10332
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10333
	PIPE_CONF_CHECK_I(dp_m_n.tu);
10334
 
5354 serge 10335
		if (current_config->has_drrs) {
10336
			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
10337
			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
10338
			PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
10339
			PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
10340
			PIPE_CONF_CHECK_I(dp_m2_n2.tu);
10341
		}
10342
	} else {
10343
		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
10344
		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
10345
		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
10346
		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
10347
		PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
10348
	}
10349
 
4104 Serge 10350
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10351
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10352
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10353
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10354
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10355
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10356
 
10357
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10358
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10359
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10360
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10361
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10362
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10363
 
10364
		PIPE_CONF_CHECK_I(pixel_multiplier);
5060 serge 10365
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10366
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10367
	    IS_VALLEYVIEW(dev))
10368
		PIPE_CONF_CHECK_I(limited_color_range);
5354 serge 10369
	PIPE_CONF_CHECK_I(has_infoframe);
4104 Serge 10370
 
5060 serge 10371
	PIPE_CONF_CHECK_I(has_audio);
10372
 
4104 Serge 10373
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10374
			      DRM_MODE_FLAG_INTERLACE);
10375
 
10376
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10377
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10378
				      DRM_MODE_FLAG_PHSYNC);
10379
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10380
				      DRM_MODE_FLAG_NHSYNC);
10381
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10382
				      DRM_MODE_FLAG_PVSYNC);
10383
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10384
				      DRM_MODE_FLAG_NVSYNC);
10385
	}
10386
 
4560 Serge 10387
	PIPE_CONF_CHECK_I(pipe_src_w);
10388
	PIPE_CONF_CHECK_I(pipe_src_h);
4104 Serge 10389
 
5060 serge 10390
	/*
10391
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10392
	 * screen. Since we don't yet re-compute the pipe config when moving
10393
	 * just the lvds port away to another pipe the sw tracking won't match.
10394
	 *
10395
	 * Proper atomic modesets with recomputed global state will fix this.
10396
	 * Until then just don't check gmch state for inherited modes.
10397
	 */
10398
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
4104 Serge 10399
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10400
	/* pfit ratios are autocomputed by the hw on gen4+ */
10401
	if (INTEL_INFO(dev)->gen < 4)
10402
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10403
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
5060 serge 10404
	}
10405
 
4104 Serge 10406
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10407
	if (current_config->pch_pfit.enabled) {
10408
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10409
	PIPE_CONF_CHECK_I(pch_pfit.size);
10410
	}
10411
 
4560 Serge 10412
	/* BDW+ don't expose a synchronous way to read the state */
10413
	if (IS_HASWELL(dev))
4104 Serge 10414
	PIPE_CONF_CHECK_I(ips_enabled);
10415
 
4560 Serge 10416
	PIPE_CONF_CHECK_I(double_wide);
10417
 
5060 serge 10418
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10419
 
4104 Serge 10420
	PIPE_CONF_CHECK_I(shared_dpll);
10421
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10422
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10423
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10424
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5060 serge 10425
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5354 serge 10426
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10427
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10428
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
4104 Serge 10429
 
4280 Serge 10430
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10431
		PIPE_CONF_CHECK_I(pipe_bpp);
10432
 
4560 Serge 10433
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10434
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10435
 
4104 Serge 10436
#undef PIPE_CONF_CHECK_X
10437
#undef PIPE_CONF_CHECK_I
5354 serge 10438
#undef PIPE_CONF_CHECK_I_ALT
4104 Serge 10439
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 10440
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 10441
#undef PIPE_CONF_QUIRK
10442
 
3746 Serge 10443
	return true;
10444
}
10445
 
5354 serge 10446
static void check_wm_state(struct drm_device *dev)
10447
{
10448
	struct drm_i915_private *dev_priv = dev->dev_private;
10449
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
10450
	struct intel_crtc *intel_crtc;
10451
	int plane;
10452
 
10453
	if (INTEL_INFO(dev)->gen < 9)
10454
		return;
10455
 
10456
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
10457
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
10458
 
10459
	for_each_intel_crtc(dev, intel_crtc) {
10460
		struct skl_ddb_entry *hw_entry, *sw_entry;
10461
		const enum pipe pipe = intel_crtc->pipe;
10462
 
10463
		if (!intel_crtc->active)
10464
			continue;
10465
 
10466
		/* planes */
10467
		for_each_plane(pipe, plane) {
10468
			hw_entry = &hw_ddb.plane[pipe][plane];
10469
			sw_entry = &sw_ddb->plane[pipe][plane];
10470
 
10471
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
10472
				continue;
10473
 
10474
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
10475
				  "(expected (%u,%u), found (%u,%u))\n",
10476
				  pipe_name(pipe), plane + 1,
10477
				  sw_entry->start, sw_entry->end,
10478
				  hw_entry->start, hw_entry->end);
10479
		}
10480
 
10481
		/* cursor */
10482
		hw_entry = &hw_ddb.cursor[pipe];
10483
		sw_entry = &sw_ddb->cursor[pipe];
10484
 
10485
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
10486
			continue;
10487
 
10488
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
10489
			  "(expected (%u,%u), found (%u,%u))\n",
10490
			  pipe_name(pipe),
10491
			  sw_entry->start, sw_entry->end,
10492
			  hw_entry->start, hw_entry->end);
10493
	}
10494
}
10495
 
4104 Serge 10496
static void
10497
check_connector_state(struct drm_device *dev)
3031 serge 10498
{
10499
	struct intel_connector *connector;
10500
 
10501
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10502
			    base.head) {
10503
		/* This also checks the encoder/connector hw state with the
10504
		 * ->get_hw_state callbacks. */
10505
		intel_connector_check_state(connector);
10506
 
10507
		WARN(&connector->new_encoder->base != connector->base.encoder,
10508
		     "connector's staged encoder doesn't match current encoder\n");
10509
	}
4104 Serge 10510
}
3031 serge 10511
 
4104 Serge 10512
static void
10513
check_encoder_state(struct drm_device *dev)
10514
{
10515
	struct intel_encoder *encoder;
10516
	struct intel_connector *connector;
10517
 
5354 serge 10518
	for_each_intel_encoder(dev, encoder) {
3031 serge 10519
		bool enabled = false;
10520
		bool active = false;
10521
		enum pipe pipe, tracked_pipe;
10522
 
10523
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10524
			      encoder->base.base.id,
5060 serge 10525
			      encoder->base.name);
3031 serge 10526
 
10527
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10528
		     "encoder's stage crtc doesn't match current crtc\n");
10529
		WARN(encoder->connectors_active && !encoder->base.crtc,
10530
		     "encoder's active_connectors set, but no crtc\n");
10531
 
10532
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10533
				    base.head) {
10534
			if (connector->base.encoder != &encoder->base)
10535
				continue;
10536
			enabled = true;
10537
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10538
				active = true;
10539
		}
5060 serge 10540
		/*
10541
		 * for MST connectors if we unplug the connector is gone
10542
		 * away but the encoder is still connected to a crtc
10543
		 * until a modeset happens in response to the hotplug.
10544
		 */
10545
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10546
			continue;
10547
 
3031 serge 10548
		WARN(!!encoder->base.crtc != enabled,
10549
		     "encoder's enabled state mismatch "
10550
		     "(expected %i, found %i)\n",
10551
		     !!encoder->base.crtc, enabled);
10552
		WARN(active && !encoder->base.crtc,
10553
		     "active encoder with no crtc\n");
10554
 
10555
		WARN(encoder->connectors_active != active,
10556
		     "encoder's computed active state doesn't match tracked active state "
10557
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10558
 
10559
		active = encoder->get_hw_state(encoder, &pipe);
10560
		WARN(active != encoder->connectors_active,
10561
		     "encoder's hw state doesn't match sw tracking "
10562
		     "(expected %i, found %i)\n",
10563
		     encoder->connectors_active, active);
10564
 
10565
		if (!encoder->base.crtc)
10566
			continue;
10567
 
10568
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10569
		WARN(active && pipe != tracked_pipe,
10570
		     "active encoder's pipe doesn't match"
10571
		     "(expected %i, found %i)\n",
10572
		     tracked_pipe, pipe);
10573
 
10574
	}
4104 Serge 10575
}
3031 serge 10576
 
4104 Serge 10577
static void
10578
check_crtc_state(struct drm_device *dev)
10579
{
5060 serge 10580
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10581
	struct intel_crtc *crtc;
10582
	struct intel_encoder *encoder;
10583
	struct intel_crtc_config pipe_config;
10584
 
5060 serge 10585
	for_each_intel_crtc(dev, crtc) {
3031 serge 10586
		bool enabled = false;
10587
		bool active = false;
10588
 
4104 Serge 10589
		memset(&pipe_config, 0, sizeof(pipe_config));
10590
 
3031 serge 10591
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10592
			      crtc->base.base.id);
10593
 
10594
		WARN(crtc->active && !crtc->base.enabled,
10595
		     "active crtc, but not enabled in sw tracking\n");
10596
 
5354 serge 10597
		for_each_intel_encoder(dev, encoder) {
3031 serge 10598
			if (encoder->base.crtc != &crtc->base)
10599
				continue;
10600
			enabled = true;
10601
			if (encoder->connectors_active)
10602
				active = true;
10603
		}
4104 Serge 10604
 
3031 serge 10605
		WARN(active != crtc->active,
10606
		     "crtc's computed active state doesn't match tracked active state "
10607
		     "(expected %i, found %i)\n", active, crtc->active);
10608
		WARN(enabled != crtc->base.enabled,
10609
		     "crtc's computed enabled state doesn't match tracked enabled state "
10610
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10611
 
3746 Serge 10612
		active = dev_priv->display.get_pipe_config(crtc,
10613
							   &pipe_config);
10614
 
5354 serge 10615
		/* hw state is inconsistent with the pipe quirk */
10616
		if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
10617
		    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3746 Serge 10618
			active = crtc->active;
10619
 
5354 serge 10620
		for_each_intel_encoder(dev, encoder) {
4104 Serge 10621
			enum pipe pipe;
10622
			if (encoder->base.crtc != &crtc->base)
10623
				continue;
4560 Serge 10624
			if (encoder->get_hw_state(encoder, &pipe))
4104 Serge 10625
				encoder->get_config(encoder, &pipe_config);
10626
		}
10627
 
3746 Serge 10628
		WARN(crtc->active != active,
10629
		     "crtc active state doesn't match with hw state "
10630
		     "(expected %i, found %i)\n", crtc->active, active);
10631
 
4104 Serge 10632
		if (active &&
10633
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10634
			WARN(1, "pipe state doesn't match!\n");
10635
			intel_dump_pipe_config(crtc, &pipe_config,
10636
					       "[hw state]");
10637
			intel_dump_pipe_config(crtc, &crtc->config,
10638
					       "[sw state]");
10639
		}
3031 serge 10640
	}
10641
}
10642
 
4104 Serge 10643
static void
10644
check_shared_dpll_state(struct drm_device *dev)
10645
{
5060 serge 10646
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10647
	struct intel_crtc *crtc;
10648
	struct intel_dpll_hw_state dpll_hw_state;
10649
	int i;
10650
 
10651
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10652
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10653
		int enabled_crtcs = 0, active_crtcs = 0;
10654
		bool active;
10655
 
10656
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10657
 
10658
		DRM_DEBUG_KMS("%s\n", pll->name);
10659
 
10660
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10661
 
5354 serge 10662
		WARN(pll->active > hweight32(pll->config.crtc_mask),
4104 Serge 10663
		     "more active pll users than references: %i vs %i\n",
5354 serge 10664
		     pll->active, hweight32(pll->config.crtc_mask));
4104 Serge 10665
		WARN(pll->active && !pll->on,
10666
		     "pll in active use but not on in sw tracking\n");
10667
		WARN(pll->on && !pll->active,
10668
		     "pll in on but not on in use in sw tracking\n");
10669
		WARN(pll->on != active,
10670
		     "pll on state mismatch (expected %i, found %i)\n",
10671
		     pll->on, active);
10672
 
5060 serge 10673
		for_each_intel_crtc(dev, crtc) {
4104 Serge 10674
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10675
				enabled_crtcs++;
10676
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10677
				active_crtcs++;
10678
		}
10679
		WARN(pll->active != active_crtcs,
10680
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10681
		     pll->active, active_crtcs);
5354 serge 10682
		WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
4104 Serge 10683
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
5354 serge 10684
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
4104 Serge 10685
 
5354 serge 10686
		WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
4104 Serge 10687
				       sizeof(dpll_hw_state)),
10688
		     "pll hw state mismatch\n");
10689
	}
10690
}
10691
 
10692
void
10693
intel_modeset_check_state(struct drm_device *dev)
10694
{
5354 serge 10695
	check_wm_state(dev);
4104 Serge 10696
	check_connector_state(dev);
10697
	check_encoder_state(dev);
10698
	check_crtc_state(dev);
10699
	check_shared_dpll_state(dev);
10700
}
10701
 
4560 Serge 10702
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10703
				     int dotclock)
10704
{
10705
	/*
10706
	 * FDI already provided one idea for the dotclock.
10707
	 * Yell if the encoder disagrees.
10708
	 */
10709
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10710
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10711
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10712
}
10713
 
5060 serge 10714
static void update_scanline_offset(struct intel_crtc *crtc)
10715
{
10716
	struct drm_device *dev = crtc->base.dev;
10717
 
10718
	/*
10719
	 * The scanline counter increments at the leading edge of hsync.
10720
	 *
10721
	 * On most platforms it starts counting from vtotal-1 on the
10722
	 * first active line. That means the scanline counter value is
10723
	 * always one less than what we would expect. Ie. just after
10724
	 * start of vblank, which also occurs at start of hsync (on the
10725
	 * last active line), the scanline counter will read vblank_start-1.
10726
	 *
10727
	 * On gen2 the scanline counter starts counting from 1 instead
10728
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10729
	 * to keep the value positive), instead of adding one.
10730
	 *
10731
	 * On HSW+ the behaviour of the scanline counter depends on the output
10732
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10733
	 * there's an extra 1 line difference. So we need to add two instead of
10734
	 * one to the value.
10735
	 */
10736
	if (IS_GEN2(dev)) {
10737
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10738
		int vtotal;
10739
 
10740
		vtotal = mode->crtc_vtotal;
10741
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10742
			vtotal /= 2;
10743
 
10744
		crtc->scanline_offset = vtotal - 1;
10745
	} else if (HAS_DDI(dev) &&
5354 serge 10746
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
5060 serge 10747
		crtc->scanline_offset = 2;
10748
	} else
10749
		crtc->scanline_offset = 1;
10750
}
10751
 
5354 serge 10752
static struct intel_crtc_config *
10753
intel_modeset_compute_config(struct drm_crtc *crtc,
10754
			     struct drm_display_mode *mode,
10755
			     struct drm_framebuffer *fb,
10756
			     unsigned *modeset_pipes,
10757
			     unsigned *prepare_pipes,
10758
			     unsigned *disable_pipes)
10759
{
10760
	struct intel_crtc_config *pipe_config = NULL;
10761
 
10762
	intel_modeset_affected_pipes(crtc, modeset_pipes,
10763
				     prepare_pipes, disable_pipes);
10764
 
10765
	if ((*modeset_pipes) == 0)
10766
		goto out;
10767
 
10768
	/*
10769
	 * Note this needs changes when we start tracking multiple modes
10770
	 * and crtcs.  At that point we'll need to compute the whole config
10771
	 * (i.e. one pipe_config for each crtc) rather than just the one
10772
	 * for this crtc.
10773
	 */
10774
	pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10775
	if (IS_ERR(pipe_config)) {
10776
		goto out;
10777
	}
10778
	intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10779
			       "[modeset]");
10780
 
10781
out:
10782
	return pipe_config;
10783
}
10784
 
3746 Serge 10785
static int __intel_set_mode(struct drm_crtc *crtc,
3031 serge 10786
		    struct drm_display_mode *mode,
5354 serge 10787
			    int x, int y, struct drm_framebuffer *fb,
10788
			    struct intel_crtc_config *pipe_config,
10789
			    unsigned modeset_pipes,
10790
			    unsigned prepare_pipes,
10791
			    unsigned disable_pipes)
3031 serge 10792
{
10793
	struct drm_device *dev = crtc->dev;
5060 serge 10794
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 10795
	struct drm_display_mode *saved_mode;
3031 serge 10796
	struct intel_crtc *intel_crtc;
3480 Serge 10797
	int ret = 0;
3031 serge 10798
 
4560 Serge 10799
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
3480 Serge 10800
	if (!saved_mode)
10801
		return -ENOMEM;
10802
 
10803
	*saved_mode = crtc->mode;
3031 serge 10804
 
5354 serge 10805
	if (modeset_pipes)
5060 serge 10806
		to_intel_crtc(crtc)->new_config = pipe_config;
3031 serge 10807
 
4560 Serge 10808
	/*
10809
	 * See if the config requires any additional preparation, e.g.
10810
	 * to adjust global state with pipes off.  We need to do this
10811
	 * here so we can get the modeset_pipe updated config for the new
10812
	 * mode set on this crtc.  For other crtcs we need to use the
10813
	 * adjusted_mode bits in the crtc directly.
10814
	 */
10815
	if (IS_VALLEYVIEW(dev)) {
5060 serge 10816
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
4560 Serge 10817
 
10818
		/* may have added more to prepare_pipes than we should */
10819
		prepare_pipes &= ~disable_pipes;
10820
	}
10821
 
5354 serge 10822
	if (dev_priv->display.crtc_compute_clock) {
10823
		unsigned clear_pipes = modeset_pipes | disable_pipes;
10824
 
10825
		ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
10826
		if (ret)
10827
			goto done;
10828
 
10829
		for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10830
			ret = dev_priv->display.crtc_compute_clock(intel_crtc);
10831
			if (ret) {
10832
				intel_shared_dpll_abort_config(dev_priv);
10833
				goto done;
10834
			}
10835
		}
10836
	}
10837
 
3746 Serge 10838
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10839
		intel_crtc_disable(&intel_crtc->base);
10840
 
3031 serge 10841
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10842
		if (intel_crtc->base.enabled)
10843
			dev_priv->display.crtc_disable(&intel_crtc->base);
10844
	}
10845
 
10846
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10847
	 * to set it here already despite that we pass it down the callchain.
5354 serge 10848
	 *
10849
	 * Note we'll need to fix this up when we start tracking multiple
10850
	 * pipes; here we assume a single modeset_pipe and only track the
10851
	 * single crtc and mode.
2330 Serge 10852
	 */
3746 Serge 10853
	if (modeset_pipes) {
3031 serge 10854
		crtc->mode = *mode;
3746 Serge 10855
		/* mode_set/enable/disable functions rely on a correct pipe
10856
		 * config. */
10857
		to_intel_crtc(crtc)->config = *pipe_config;
5060 serge 10858
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
4560 Serge 10859
 
10860
		/*
10861
		 * Calculate and store various constants which
10862
		 * are later needed by vblank and swap-completion
10863
		 * timestamping. They are derived from true hwmode.
10864
		 */
10865
		drm_calc_timestamping_constants(crtc,
10866
						&pipe_config->adjusted_mode);
3746 Serge 10867
	}
2327 Serge 10868
 
3031 serge 10869
	/* Only after disabling all output pipelines that will be changed can we
10870
	 * update the the output configuration. */
10871
	intel_modeset_update_state(dev, prepare_pipes);
10872
 
5354 serge 10873
	modeset_update_crtc_power_domains(dev);
3243 Serge 10874
 
3031 serge 10875
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10876
	 * on the DPLL.
2330 Serge 10877
	 */
3031 serge 10878
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
5060 serge 10879
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10880
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10881
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10882
 
10883
		mutex_lock(&dev->struct_mutex);
5354 serge 10884
		ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
5060 serge 10885
		if (ret != 0) {
10886
			DRM_ERROR("pin & fence failed\n");
10887
			mutex_unlock(&dev->struct_mutex);
10888
			goto done;
10889
		}
10890
		if (old_fb)
10891
			intel_unpin_fb_obj(old_obj);
10892
		i915_gem_track_fb(old_obj, obj,
10893
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10894
		mutex_unlock(&dev->struct_mutex);
10895
 
10896
		crtc->primary->fb = fb;
10897
		crtc->x = x;
10898
		crtc->y = y;
3031 serge 10899
	}
10900
 
10901
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
5060 serge 10902
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10903
		update_scanline_offset(intel_crtc);
10904
 
3031 serge 10905
		dev_priv->display.crtc_enable(&intel_crtc->base);
5060 serge 10906
	}
3031 serge 10907
 
10908
	/* FIXME: add subpixel order */
10909
done:
4560 Serge 10910
	if (ret && crtc->enabled)
3480 Serge 10911
		crtc->mode = *saved_mode;
3031 serge 10912
 
3746 Serge 10913
	kfree(pipe_config);
3480 Serge 10914
	kfree(saved_mode);
3031 serge 10915
	return ret;
2330 Serge 10916
}
2327 Serge 10917
 
5354 serge 10918
static int intel_set_mode_pipes(struct drm_crtc *crtc,
3746 Serge 10919
		     struct drm_display_mode *mode,
5354 serge 10920
				int x, int y, struct drm_framebuffer *fb,
10921
				struct intel_crtc_config *pipe_config,
10922
				unsigned modeset_pipes,
10923
				unsigned prepare_pipes,
10924
				unsigned disable_pipes)
3746 Serge 10925
{
10926
	int ret;
10927
 
5354 serge 10928
	ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
10929
			       prepare_pipes, disable_pipes);
3746 Serge 10930
 
10931
	if (ret == 0)
10932
		intel_modeset_check_state(crtc->dev);
10933
 
10934
	return ret;
10935
}
10936
 
5354 serge 10937
static int intel_set_mode(struct drm_crtc *crtc,
10938
			  struct drm_display_mode *mode,
10939
			  int x, int y, struct drm_framebuffer *fb)
10940
{
10941
	struct intel_crtc_config *pipe_config;
10942
	unsigned modeset_pipes, prepare_pipes, disable_pipes;
10943
 
10944
	pipe_config = intel_modeset_compute_config(crtc, mode, fb,
10945
						   &modeset_pipes,
10946
						   &prepare_pipes,
10947
						   &disable_pipes);
10948
 
10949
	if (IS_ERR(pipe_config))
10950
		return PTR_ERR(pipe_config);
10951
 
10952
	return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
10953
				    modeset_pipes, prepare_pipes,
10954
				    disable_pipes);
10955
}
10956
 
3480 Serge 10957
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10958
{
5060 serge 10959
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
3480 Serge 10960
}
10961
 
3031 serge 10962
#undef for_each_intel_crtc_masked
2327 Serge 10963
 
3031 serge 10964
static void intel_set_config_free(struct intel_set_config *config)
10965
{
10966
	if (!config)
10967
		return;
10968
 
10969
	kfree(config->save_connector_encoders);
10970
	kfree(config->save_encoder_crtcs);
5060 serge 10971
	kfree(config->save_crtc_enabled);
3031 serge 10972
	kfree(config);
10973
}
10974
 
10975
static int intel_set_config_save_state(struct drm_device *dev,
10976
				       struct intel_set_config *config)
10977
{
5060 serge 10978
	struct drm_crtc *crtc;
3031 serge 10979
	struct drm_encoder *encoder;
10980
	struct drm_connector *connector;
10981
	int count;
10982
 
5060 serge 10983
	config->save_crtc_enabled =
10984
		kcalloc(dev->mode_config.num_crtc,
10985
			sizeof(bool), GFP_KERNEL);
10986
	if (!config->save_crtc_enabled)
10987
		return -ENOMEM;
10988
 
3031 serge 10989
	config->save_encoder_crtcs =
10990
		kcalloc(dev->mode_config.num_encoder,
10991
			sizeof(struct drm_crtc *), GFP_KERNEL);
10992
	if (!config->save_encoder_crtcs)
10993
		return -ENOMEM;
10994
 
10995
	config->save_connector_encoders =
10996
		kcalloc(dev->mode_config.num_connector,
10997
			sizeof(struct drm_encoder *), GFP_KERNEL);
10998
	if (!config->save_connector_encoders)
10999
		return -ENOMEM;
11000
 
11001
	/* Copy data. Note that driver private data is not affected.
11002
	 * Should anything bad happen only the expected state is
11003
	 * restored, not the drivers personal bookkeeping.
11004
	 */
11005
	count = 0;
5060 serge 11006
	for_each_crtc(dev, crtc) {
11007
		config->save_crtc_enabled[count++] = crtc->enabled;
11008
	}
11009
 
11010
	count = 0;
3031 serge 11011
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
11012
		config->save_encoder_crtcs[count++] = encoder->crtc;
11013
	}
11014
 
11015
	count = 0;
11016
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11017
		config->save_connector_encoders[count++] = connector->encoder;
11018
	}
11019
 
11020
	return 0;
11021
}
11022
 
11023
static void intel_set_config_restore_state(struct drm_device *dev,
11024
					   struct intel_set_config *config)
11025
{
5060 serge 11026
	struct intel_crtc *crtc;
3031 serge 11027
	struct intel_encoder *encoder;
11028
	struct intel_connector *connector;
11029
	int count;
11030
 
11031
	count = 0;
5060 serge 11032
	for_each_intel_crtc(dev, crtc) {
11033
		crtc->new_enabled = config->save_crtc_enabled[count++];
11034
 
11035
		if (crtc->new_enabled)
11036
			crtc->new_config = &crtc->config;
11037
		else
11038
			crtc->new_config = NULL;
11039
	}
11040
 
11041
	count = 0;
5354 serge 11042
	for_each_intel_encoder(dev, encoder) {
3031 serge 11043
		encoder->new_crtc =
11044
			to_intel_crtc(config->save_encoder_crtcs[count++]);
11045
	}
11046
 
11047
	count = 0;
11048
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11049
		connector->new_encoder =
11050
			to_intel_encoder(config->save_connector_encoders[count++]);
11051
	}
11052
}
11053
 
3746 Serge 11054
static bool
4104 Serge 11055
is_crtc_connector_off(struct drm_mode_set *set)
3746 Serge 11056
{
11057
	int i;
11058
 
4104 Serge 11059
	if (set->num_connectors == 0)
11060
		return false;
11061
 
11062
	if (WARN_ON(set->connectors == NULL))
11063
		return false;
11064
 
11065
	for (i = 0; i < set->num_connectors; i++)
11066
		if (set->connectors[i]->encoder &&
11067
		    set->connectors[i]->encoder->crtc == set->crtc &&
11068
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
3746 Serge 11069
			return true;
11070
 
11071
	return false;
11072
}
11073
 
3031 serge 11074
static void
11075
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
11076
				      struct intel_set_config *config)
11077
{
11078
 
11079
	/* We should be able to check here if the fb has the same properties
11080
	 * and then just flip_or_move it */
4104 Serge 11081
	if (is_crtc_connector_off(set)) {
3746 Serge 11082
			config->mode_changed = true;
5060 serge 11083
	} else if (set->crtc->primary->fb != set->fb) {
11084
		/*
11085
		 * If we have no fb, we can only flip as long as the crtc is
11086
		 * active, otherwise we need a full mode set.  The crtc may
11087
		 * be active if we've only disabled the primary plane, or
11088
		 * in fastboot situations.
11089
		 */
11090
		if (set->crtc->primary->fb == NULL) {
4104 Serge 11091
			struct intel_crtc *intel_crtc =
11092
				to_intel_crtc(set->crtc);
11093
 
5060 serge 11094
			if (intel_crtc->active) {
4104 Serge 11095
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
11096
				config->fb_changed = true;
11097
			} else {
11098
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
3031 serge 11099
			config->mode_changed = true;
4104 Serge 11100
			}
3031 serge 11101
		} else if (set->fb == NULL) {
11102
			config->mode_changed = true;
3746 Serge 11103
		} else if (set->fb->pixel_format !=
5060 serge 11104
			   set->crtc->primary->fb->pixel_format) {
3031 serge 11105
			config->mode_changed = true;
3746 Serge 11106
		} else {
3031 serge 11107
			config->fb_changed = true;
11108
	}
3746 Serge 11109
	}
3031 serge 11110
 
11111
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
11112
		config->fb_changed = true;
11113
 
11114
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
11115
		DRM_DEBUG_KMS("modes are different, full mode set\n");
11116
		drm_mode_debug_printmodeline(&set->crtc->mode);
11117
		drm_mode_debug_printmodeline(set->mode);
11118
		config->mode_changed = true;
11119
	}
4104 Serge 11120
 
11121
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
11122
			set->crtc->base.id, config->mode_changed, config->fb_changed);
3031 serge 11123
}
11124
 
11125
static int
11126
intel_modeset_stage_output_state(struct drm_device *dev,
11127
				 struct drm_mode_set *set,
11128
				 struct intel_set_config *config)
11129
{
11130
	struct intel_connector *connector;
11131
	struct intel_encoder *encoder;
5060 serge 11132
	struct intel_crtc *crtc;
4104 Serge 11133
	int ro;
3031 serge 11134
 
3480 Serge 11135
	/* The upper layers ensure that we either disable a crtc or have a list
3031 serge 11136
	 * of connectors. For paranoia, double-check this. */
11137
	WARN_ON(!set->fb && (set->num_connectors != 0));
11138
	WARN_ON(set->fb && (set->num_connectors == 0));
11139
 
11140
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11141
			    base.head) {
11142
		/* Otherwise traverse passed in connector list and get encoders
11143
		 * for them. */
11144
		for (ro = 0; ro < set->num_connectors; ro++) {
11145
			if (set->connectors[ro] == &connector->base) {
5060 serge 11146
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
3031 serge 11147
				break;
11148
			}
11149
		}
11150
 
11151
		/* If we disable the crtc, disable all its connectors. Also, if
11152
		 * the connector is on the changing crtc but not on the new
11153
		 * connector list, disable it. */
11154
		if ((!set->fb || ro == set->num_connectors) &&
11155
		    connector->base.encoder &&
11156
		    connector->base.encoder->crtc == set->crtc) {
11157
			connector->new_encoder = NULL;
11158
 
11159
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
11160
				connector->base.base.id,
5060 serge 11161
				connector->base.name);
3031 serge 11162
		}
11163
 
11164
 
11165
		if (&connector->new_encoder->base != connector->base.encoder) {
11166
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
11167
			config->mode_changed = true;
11168
		}
11169
	}
11170
	/* connector->new_encoder is now updated for all connectors. */
11171
 
11172
	/* Update crtc of enabled connectors. */
11173
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11174
			    base.head) {
5060 serge 11175
		struct drm_crtc *new_crtc;
11176
 
3031 serge 11177
		if (!connector->new_encoder)
11178
			continue;
11179
 
11180
		new_crtc = connector->new_encoder->base.crtc;
11181
 
11182
		for (ro = 0; ro < set->num_connectors; ro++) {
11183
			if (set->connectors[ro] == &connector->base)
11184
				new_crtc = set->crtc;
11185
		}
11186
 
11187
		/* Make sure the new CRTC will work with the encoder */
4560 Serge 11188
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
3031 serge 11189
					   new_crtc)) {
11190
			return -EINVAL;
11191
		}
5060 serge 11192
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
3031 serge 11193
 
11194
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11195
			connector->base.base.id,
5060 serge 11196
			connector->base.name,
3031 serge 11197
			new_crtc->base.id);
11198
	}
11199
 
11200
	/* Check for any encoders that needs to be disabled. */
5354 serge 11201
	for_each_intel_encoder(dev, encoder) {
4560 Serge 11202
		int num_connectors = 0;
3031 serge 11203
		list_for_each_entry(connector,
11204
				    &dev->mode_config.connector_list,
11205
				    base.head) {
11206
			if (connector->new_encoder == encoder) {
11207
				WARN_ON(!connector->new_encoder->new_crtc);
4560 Serge 11208
				num_connectors++;
3031 serge 11209
			}
11210
		}
4560 Serge 11211
 
11212
		if (num_connectors == 0)
3031 serge 11213
		encoder->new_crtc = NULL;
4560 Serge 11214
		else if (num_connectors > 1)
11215
			return -EINVAL;
11216
 
3031 serge 11217
		/* Only now check for crtc changes so we don't miss encoders
11218
		 * that will be disabled. */
11219
		if (&encoder->new_crtc->base != encoder->base.crtc) {
11220
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
11221
			config->mode_changed = true;
11222
		}
11223
	}
11224
	/* Now we've also updated encoder->new_crtc for all encoders. */
5060 serge 11225
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11226
			    base.head) {
11227
		if (connector->new_encoder)
11228
			if (connector->new_encoder != connector->encoder)
11229
				connector->encoder = connector->new_encoder;
11230
	}
11231
	for_each_intel_crtc(dev, crtc) {
11232
		crtc->new_enabled = false;
3031 serge 11233
 
5354 serge 11234
		for_each_intel_encoder(dev, encoder) {
5060 serge 11235
			if (encoder->new_crtc == crtc) {
11236
				crtc->new_enabled = true;
11237
				break;
11238
			}
11239
		}
11240
 
11241
		if (crtc->new_enabled != crtc->base.enabled) {
11242
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11243
				      crtc->new_enabled ? "en" : "dis");
11244
			config->mode_changed = true;
11245
		}
11246
 
11247
		if (crtc->new_enabled)
11248
			crtc->new_config = &crtc->config;
11249
		else
11250
			crtc->new_config = NULL;
11251
	}
11252
 
3031 serge 11253
	return 0;
11254
}
11255
 
5060 serge 11256
static void disable_crtc_nofb(struct intel_crtc *crtc)
11257
{
11258
	struct drm_device *dev = crtc->base.dev;
11259
	struct intel_encoder *encoder;
11260
	struct intel_connector *connector;
11261
 
11262
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11263
		      pipe_name(crtc->pipe));
11264
 
11265
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11266
		if (connector->new_encoder &&
11267
		    connector->new_encoder->new_crtc == crtc)
11268
			connector->new_encoder = NULL;
11269
	}
11270
 
5354 serge 11271
	for_each_intel_encoder(dev, encoder) {
5060 serge 11272
		if (encoder->new_crtc == crtc)
11273
			encoder->new_crtc = NULL;
11274
	}
11275
 
11276
	crtc->new_enabled = false;
11277
	crtc->new_config = NULL;
11278
}
11279
 
3031 serge 11280
static int intel_crtc_set_config(struct drm_mode_set *set)
11281
{
11282
	struct drm_device *dev;
11283
	struct drm_mode_set save_set;
11284
	struct intel_set_config *config;
5354 serge 11285
	struct intel_crtc_config *pipe_config;
11286
	unsigned modeset_pipes, prepare_pipes, disable_pipes;
3031 serge 11287
	int ret;
11288
 
11289
	BUG_ON(!set);
11290
	BUG_ON(!set->crtc);
11291
	BUG_ON(!set->crtc->helper_private);
11292
 
3480 Serge 11293
	/* Enforce sane interface api - has been abused by the fb helper. */
11294
	BUG_ON(!set->mode && set->fb);
11295
	BUG_ON(set->fb && set->num_connectors == 0);
3031 serge 11296
 
11297
	if (set->fb) {
11298
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11299
				set->crtc->base.id, set->fb->base.id,
11300
				(int)set->num_connectors, set->x, set->y);
11301
	} else {
11302
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11303
	}
11304
 
11305
	dev = set->crtc->dev;
11306
 
11307
	ret = -ENOMEM;
11308
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11309
	if (!config)
11310
		goto out_config;
11311
 
11312
	ret = intel_set_config_save_state(dev, config);
11313
	if (ret)
11314
		goto out_config;
11315
 
11316
	save_set.crtc = set->crtc;
11317
	save_set.mode = &set->crtc->mode;
11318
	save_set.x = set->crtc->x;
11319
	save_set.y = set->crtc->y;
5060 serge 11320
	save_set.fb = set->crtc->primary->fb;
3031 serge 11321
 
11322
	/* Compute whether we need a full modeset, only an fb base update or no
11323
	 * change at all. In the future we might also check whether only the
11324
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11325
	 * such cases. */
11326
	intel_set_config_compute_mode_changes(set, config);
11327
 
11328
	ret = intel_modeset_stage_output_state(dev, set, config);
11329
	if (ret)
11330
		goto fail;
11331
 
5354 serge 11332
	pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
11333
						   set->fb,
11334
						   &modeset_pipes,
11335
						   &prepare_pipes,
11336
						   &disable_pipes);
11337
	if (IS_ERR(pipe_config)) {
11338
		ret = PTR_ERR(pipe_config);
11339
		goto fail;
11340
	} else if (pipe_config) {
11341
		if (pipe_config->has_audio !=
11342
		    to_intel_crtc(set->crtc)->config.has_audio)
11343
			config->mode_changed = true;
11344
 
11345
		/*
11346
		 * Note we have an issue here with infoframes: current code
11347
		 * only updates them on the full mode set path per hw
11348
		 * requirements.  So here we should be checking for any
11349
		 * required changes and forcing a mode set.
11350
		 */
11351
	}
11352
 
11353
	/* set_mode will free it in the mode_changed case */
11354
	if (!config->mode_changed)
11355
		kfree(pipe_config);
11356
 
11357
	intel_update_pipe_size(to_intel_crtc(set->crtc));
11358
 
3031 serge 11359
	if (config->mode_changed) {
5354 serge 11360
		ret = intel_set_mode_pipes(set->crtc, set->mode,
11361
					   set->x, set->y, set->fb, pipe_config,
11362
					   modeset_pipes, prepare_pipes,
11363
					   disable_pipes);
3031 serge 11364
	} else if (config->fb_changed) {
5060 serge 11365
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
3746 Serge 11366
 
5354 serge 11367
//       intel_crtc_wait_for_pending_flips(set->crtc);
5060 serge 11368
 
3031 serge 11369
		ret = intel_pipe_set_base(set->crtc,
11370
					  set->x, set->y, set->fb);
5060 serge 11371
 
4560 Serge 11372
		/*
5060 serge 11373
		 * We need to make sure the primary plane is re-enabled if it
11374
		 * has previously been turned off.
11375
		 */
11376
		if (!intel_crtc->primary_enabled && ret == 0) {
11377
			WARN_ON(!intel_crtc->active);
5354 serge 11378
			intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
5060 serge 11379
		}
11380
 
11381
		/*
4560 Serge 11382
		 * In the fastboot case this may be our only check of the
11383
		 * state after boot.  It would be better to only do it on
11384
		 * the first update, but we don't have a nice way of doing that
11385
		 * (and really, set_config isn't used much for high freq page
11386
		 * flipping, so increasing its cost here shouldn't be a big
11387
		 * deal).
11388
		 */
5060 serge 11389
		if (i915.fastboot && ret == 0)
4560 Serge 11390
			intel_modeset_check_state(set->crtc->dev);
3031 serge 11391
	}
11392
 
3746 Serge 11393
	if (ret) {
4104 Serge 11394
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
3746 Serge 11395
			  set->crtc->base.id, ret);
3031 serge 11396
fail:
11397
	intel_set_config_restore_state(dev, config);
11398
 
5060 serge 11399
		/*
11400
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11401
		 * force the pipe off to avoid oopsing in the modeset code
11402
		 * due to fb==NULL. This should only happen during boot since
11403
		 * we don't yet reconstruct the FB from the hardware state.
11404
		 */
11405
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11406
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11407
 
3031 serge 11408
	/* Try to restore the config */
11409
	if (config->mode_changed &&
3480 Serge 11410
	    intel_set_mode(save_set.crtc, save_set.mode,
3031 serge 11411
			    save_set.x, save_set.y, save_set.fb))
11412
		DRM_ERROR("failed to restore config after modeset failure\n");
3746 Serge 11413
	}
3031 serge 11414
 
11415
out_config:
11416
	intel_set_config_free(config);
11417
	return ret;
11418
}
11419
 
2330 Serge 11420
static const struct drm_crtc_funcs intel_crtc_funcs = {
11421
	.gamma_set = intel_crtc_gamma_set,
3031 serge 11422
	.set_config = intel_crtc_set_config,
2330 Serge 11423
	.destroy = intel_crtc_destroy,
11424
//	.page_flip = intel_crtc_page_flip,
11425
};
2327 Serge 11426
 
4104 Serge 11427
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11428
				      struct intel_shared_dpll *pll,
11429
				      struct intel_dpll_hw_state *hw_state)
3031 serge 11430
{
4104 Serge 11431
	uint32_t val;
3031 serge 11432
 
5354 serge 11433
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
5060 serge 11434
		return false;
11435
 
4104 Serge 11436
	val = I915_READ(PCH_DPLL(pll->id));
11437
	hw_state->dpll = val;
11438
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11439
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11440
 
11441
	return val & DPLL_VCO_ENABLE;
11442
}
11443
 
11444
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11445
				  struct intel_shared_dpll *pll)
11446
{
5354 serge 11447
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
11448
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
4104 Serge 11449
}
11450
 
11451
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11452
				struct intel_shared_dpll *pll)
11453
{
11454
	/* PCH refclock must be enabled first */
4560 Serge 11455
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 11456
 
5354 serge 11457
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 11458
 
11459
	/* Wait for the clocks to stabilize. */
11460
	POSTING_READ(PCH_DPLL(pll->id));
11461
	udelay(150);
11462
 
11463
	/* The pixel multiplier can only be updated once the
11464
	 * DPLL is enabled and the clocks are stable.
11465
	 *
11466
	 * So write it again.
11467
	 */
5354 serge 11468
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 11469
	POSTING_READ(PCH_DPLL(pll->id));
11470
	udelay(200);
11471
}
11472
 
11473
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11474
				 struct intel_shared_dpll *pll)
11475
{
11476
	struct drm_device *dev = dev_priv->dev;
11477
	struct intel_crtc *crtc;
11478
 
11479
	/* Make sure no transcoder isn't still depending on us. */
5060 serge 11480
	for_each_intel_crtc(dev, crtc) {
4104 Serge 11481
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11482
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 11483
	}
11484
 
4104 Serge 11485
	I915_WRITE(PCH_DPLL(pll->id), 0);
11486
	POSTING_READ(PCH_DPLL(pll->id));
11487
	udelay(200);
11488
}
11489
 
11490
static char *ibx_pch_dpll_names[] = {
11491
	"PCH DPLL A",
11492
	"PCH DPLL B",
11493
};
11494
 
11495
static void ibx_pch_dpll_init(struct drm_device *dev)
11496
{
11497
	struct drm_i915_private *dev_priv = dev->dev_private;
11498
	int i;
11499
 
11500
	dev_priv->num_shared_dpll = 2;
11501
 
11502
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11503
		dev_priv->shared_dplls[i].id = i;
11504
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11505
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11506
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11507
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11508
		dev_priv->shared_dplls[i].get_hw_state =
11509
			ibx_pch_dpll_get_hw_state;
3031 serge 11510
	}
11511
}
11512
 
4104 Serge 11513
static void intel_shared_dpll_init(struct drm_device *dev)
11514
{
11515
	struct drm_i915_private *dev_priv = dev->dev_private;
11516
 
5060 serge 11517
	if (HAS_DDI(dev))
11518
		intel_ddi_pll_init(dev);
11519
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4104 Serge 11520
		ibx_pch_dpll_init(dev);
11521
	else
11522
		dev_priv->num_shared_dpll = 0;
11523
 
11524
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11525
}
11526
 
5060 serge 11527
static int
11528
intel_primary_plane_disable(struct drm_plane *plane)
11529
{
11530
	struct drm_device *dev = plane->dev;
11531
	struct intel_crtc *intel_crtc;
11532
 
11533
	if (!plane->fb)
11534
		return 0;
11535
 
11536
	BUG_ON(!plane->crtc);
11537
 
11538
	intel_crtc = to_intel_crtc(plane->crtc);
11539
 
11540
	/*
11541
	 * Even though we checked plane->fb above, it's still possible that
11542
	 * the primary plane has been implicitly disabled because the crtc
11543
	 * coordinates given weren't visible, or because we detected
11544
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11545
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11546
	 * In either case, we need to unpin the FB and let the fb pointer get
11547
	 * updated, but otherwise we don't need to touch the hardware.
11548
	 */
11549
	if (!intel_crtc->primary_enabled)
11550
		goto disable_unpin;
11551
 
5354 serge 11552
//   intel_crtc_wait_for_pending_flips(plane->crtc);
11553
	intel_disable_primary_hw_plane(plane, plane->crtc);
11554
 
5060 serge 11555
disable_unpin:
11556
	mutex_lock(&dev->struct_mutex);
11557
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11558
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11559
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11560
	mutex_unlock(&dev->struct_mutex);
11561
	plane->fb = NULL;
11562
 
11563
	return 0;
11564
}
11565
 
11566
static int
5354 serge 11567
intel_check_primary_plane(struct drm_plane *plane,
11568
			  struct intel_plane_state *state)
5060 serge 11569
{
5354 serge 11570
	struct drm_crtc *crtc = state->crtc;
11571
	struct drm_framebuffer *fb = state->fb;
11572
	struct drm_rect *dest = &state->dst;
11573
	struct drm_rect *src = &state->src;
11574
	const struct drm_rect *clip = &state->clip;
11575
 
11576
	return drm_plane_helper_check_update(plane, crtc, fb,
11577
					     src, dest, clip,
11578
					     DRM_PLANE_HELPER_NO_SCALING,
11579
					     DRM_PLANE_HELPER_NO_SCALING,
11580
					     false, true, &state->visible);
11581
}
11582
 
11583
static int
11584
intel_prepare_primary_plane(struct drm_plane *plane,
11585
			    struct intel_plane_state *state)
11586
{
11587
	struct drm_crtc *crtc = state->crtc;
11588
	struct drm_framebuffer *fb = state->fb;
5060 serge 11589
	struct drm_device *dev = crtc->dev;
11590
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5354 serge 11591
	enum pipe pipe = intel_crtc->pipe;
5060 serge 11592
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11593
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11594
	int ret;
11595
 
11596
 
11597
 
5354 serge 11598
	if (old_obj != obj) {
5060 serge 11599
		mutex_lock(&dev->struct_mutex);
5354 serge 11600
		ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
11601
		if (ret == 0)
11602
			i915_gem_track_fb(old_obj, obj,
11603
					  INTEL_FRONTBUFFER_PRIMARY(pipe));
11604
		mutex_unlock(&dev->struct_mutex);
11605
		if (ret != 0) {
11606
			DRM_DEBUG_KMS("pin & fence failed\n");
11607
		return ret;
11608
		}
11609
	}
5060 serge 11610
 
5354 serge 11611
	return 0;
11612
}
11613
 
11614
static void
11615
intel_commit_primary_plane(struct drm_plane *plane,
11616
			   struct intel_plane_state *state)
11617
{
11618
	struct drm_crtc *crtc = state->crtc;
11619
	struct drm_framebuffer *fb = state->fb;
11620
	struct drm_device *dev = crtc->dev;
11621
	struct drm_i915_private *dev_priv = dev->dev_private;
11622
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11623
	enum pipe pipe = intel_crtc->pipe;
11624
	struct drm_framebuffer *old_fb = plane->fb;
11625
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11626
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11627
	struct intel_plane *intel_plane = to_intel_plane(plane);
11628
	struct drm_rect *src = &state->src;
11629
 
11630
	crtc->primary->fb = fb;
11631
	crtc->x = src->x1 >> 16;
11632
	crtc->y = src->y1 >> 16;
11633
 
11634
	intel_plane->crtc_x = state->orig_dst.x1;
11635
	intel_plane->crtc_y = state->orig_dst.y1;
11636
	intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11637
	intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11638
	intel_plane->src_x = state->orig_src.x1;
11639
	intel_plane->src_y = state->orig_src.y1;
11640
	intel_plane->src_w = drm_rect_width(&state->orig_src);
11641
	intel_plane->src_h = drm_rect_height(&state->orig_src);
11642
	intel_plane->obj = obj;
11643
 
11644
	if (intel_crtc->active) {
5060 serge 11645
		/*
5354 serge 11646
		 * FBC does not work on some platforms for rotated
11647
		 * planes, so disable it when rotation is not 0 and
11648
		 * update it when rotation is set back to 0.
11649
		 *
11650
		 * FIXME: This is redundant with the fbc update done in
11651
		 * the primary plane enable function except that that
11652
		 * one is done too late. We eventually need to unify
11653
		 * this.
5060 serge 11654
		 */
5354 serge 11655
		if (intel_crtc->primary_enabled &&
11656
		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11657
		    dev_priv->fbc.plane == intel_crtc->plane &&
11658
		    intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11659
			intel_disable_fbc(dev);
11660
		}
5060 serge 11661
 
5354 serge 11662
		if (state->visible) {
11663
			bool was_enabled = intel_crtc->primary_enabled;
5060 serge 11664
 
5354 serge 11665
			/* FIXME: kill this fastboot hack */
11666
			intel_update_pipe_size(intel_crtc);
5060 serge 11667
 
5354 serge 11668
			intel_crtc->primary_enabled = true;
5060 serge 11669
 
5354 serge 11670
			dev_priv->display.update_primary_plane(crtc, plane->fb,
11671
					crtc->x, crtc->y);
5060 serge 11672
 
11673
	/*
5354 serge 11674
			 * BDW signals flip done immediately if the plane
11675
			 * is disabled, even if the plane enable is already
11676
			 * armed to occur at the next vblank :(
11677
			 */
11678
			if (IS_BROADWELL(dev) && !was_enabled)
11679
				intel_wait_for_vblank(dev, intel_crtc->pipe);
11680
		} else {
11681
			/*
11682
			 * If clipping results in a non-visible primary plane,
11683
			 * we'll disable the primary plane.  Note that this is
11684
			 * a bit different than what happens if userspace
11685
			 * explicitly disables the plane by passing fb=0
5060 serge 11686
	 * because plane->fb still gets set and pinned.
11687
	 */
5354 serge 11688
			intel_disable_primary_hw_plane(plane, crtc);
11689
		}
11690
 
11691
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
11692
 
5060 serge 11693
		mutex_lock(&dev->struct_mutex);
5354 serge 11694
		intel_update_fbc(dev);
11695
		mutex_unlock(&dev->struct_mutex);
11696
	}
5060 serge 11697
 
5354 serge 11698
	if (old_fb && old_fb != fb) {
11699
		if (intel_crtc->active)
11700
			intel_wait_for_vblank(dev, intel_crtc->pipe);
11701
 
11702
		mutex_lock(&dev->struct_mutex);
11703
		intel_unpin_fb_obj(old_obj);
5060 serge 11704
				mutex_unlock(&dev->struct_mutex);
11705
		}
5354 serge 11706
}
5060 serge 11707
 
5354 serge 11708
static int
11709
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11710
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11711
			     unsigned int crtc_w, unsigned int crtc_h,
11712
			     uint32_t src_x, uint32_t src_y,
11713
			     uint32_t src_w, uint32_t src_h)
11714
{
11715
	struct intel_plane_state state;
11716
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11717
	int ret;
5060 serge 11718
 
5354 serge 11719
	state.crtc = crtc;
11720
	state.fb = fb;
5060 serge 11721
 
5354 serge 11722
	/* sample coordinates in 16.16 fixed point */
11723
	state.src.x1 = src_x;
11724
	state.src.x2 = src_x + src_w;
11725
	state.src.y1 = src_y;
11726
	state.src.y2 = src_y + src_h;
5060 serge 11727
 
5354 serge 11728
	/* integer pixels */
11729
	state.dst.x1 = crtc_x;
11730
	state.dst.x2 = crtc_x + crtc_w;
11731
	state.dst.y1 = crtc_y;
11732
	state.dst.y2 = crtc_y + crtc_h;
5060 serge 11733
 
5354 serge 11734
	state.clip.x1 = 0;
11735
	state.clip.y1 = 0;
11736
	state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11737
	state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
5060 serge 11738
 
5354 serge 11739
	state.orig_src = state.src;
11740
	state.orig_dst = state.dst;
5060 serge 11741
 
5354 serge 11742
	ret = intel_check_primary_plane(plane, &state);
5060 serge 11743
	if (ret)
11744
		return ret;
11745
 
5354 serge 11746
	ret = intel_prepare_primary_plane(plane, &state);
11747
	if (ret)
11748
		return ret;
5060 serge 11749
 
5354 serge 11750
	intel_commit_primary_plane(plane, &state);
11751
 
5060 serge 11752
	return 0;
11753
}
11754
 
11755
/* Common destruction function for both primary and cursor planes */
11756
static void intel_plane_destroy(struct drm_plane *plane)
11757
{
11758
	struct intel_plane *intel_plane = to_intel_plane(plane);
11759
	drm_plane_cleanup(plane);
11760
	kfree(intel_plane);
11761
}
11762
 
11763
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11764
	.update_plane = intel_primary_plane_setplane,
11765
	.disable_plane = intel_primary_plane_disable,
11766
	.destroy = intel_plane_destroy,
5354 serge 11767
	.set_property = intel_plane_set_property
5060 serge 11768
};
11769
 
11770
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11771
						    int pipe)
11772
{
11773
	struct intel_plane *primary;
11774
	const uint32_t *intel_primary_formats;
11775
	int num_formats;
11776
 
11777
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11778
	if (primary == NULL)
11779
		return NULL;
11780
 
11781
	primary->can_scale = false;
11782
	primary->max_downscale = 1;
11783
	primary->pipe = pipe;
11784
	primary->plane = pipe;
5354 serge 11785
	primary->rotation = BIT(DRM_ROTATE_0);
5060 serge 11786
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11787
		primary->plane = !pipe;
11788
 
11789
	if (INTEL_INFO(dev)->gen <= 3) {
11790
		intel_primary_formats = intel_primary_formats_gen2;
11791
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11792
	} else {
11793
		intel_primary_formats = intel_primary_formats_gen4;
11794
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11795
	}
11796
 
11797
	drm_universal_plane_init(dev, &primary->base, 0,
11798
				 &intel_primary_plane_funcs,
11799
				 intel_primary_formats, num_formats,
11800
				 DRM_PLANE_TYPE_PRIMARY);
5354 serge 11801
 
11802
	if (INTEL_INFO(dev)->gen >= 4) {
11803
		if (!dev->mode_config.rotation_property)
11804
			dev->mode_config.rotation_property =
11805
				drm_mode_create_rotation_property(dev,
11806
							BIT(DRM_ROTATE_0) |
11807
							BIT(DRM_ROTATE_180));
11808
		if (dev->mode_config.rotation_property)
11809
			drm_object_attach_property(&primary->base.base,
11810
				dev->mode_config.rotation_property,
11811
				primary->rotation);
11812
	}
11813
 
5060 serge 11814
	return &primary->base;
11815
}
11816
 
11817
static int
11818
intel_cursor_plane_disable(struct drm_plane *plane)
11819
{
11820
	if (!plane->fb)
11821
		return 0;
11822
 
11823
	BUG_ON(!plane->crtc);
11824
 
11825
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11826
}
11827
 
11828
static int
5354 serge 11829
intel_check_cursor_plane(struct drm_plane *plane,
11830
			 struct intel_plane_state *state)
5060 serge 11831
{
5354 serge 11832
	struct drm_crtc *crtc = state->crtc;
11833
	struct drm_device *dev = crtc->dev;
11834
	struct drm_framebuffer *fb = state->fb;
11835
	struct drm_rect *dest = &state->dst;
11836
	struct drm_rect *src = &state->src;
11837
	const struct drm_rect *clip = &state->clip;
11838
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11839
	int crtc_w, crtc_h;
11840
	unsigned stride;
5060 serge 11841
	int ret;
11842
 
11843
	ret = drm_plane_helper_check_update(plane, crtc, fb,
5354 serge 11844
					    src, dest, clip,
5060 serge 11845
					    DRM_PLANE_HELPER_NO_SCALING,
11846
					    DRM_PLANE_HELPER_NO_SCALING,
5354 serge 11847
					    true, true, &state->visible);
5060 serge 11848
	if (ret)
11849
		return ret;
11850
 
5354 serge 11851
 
11852
	/* if we want to turn off the cursor ignore width and height */
11853
	if (!obj)
11854
		return 0;
11855
 
11856
	/* Check for which cursor types we support */
11857
	crtc_w = drm_rect_width(&state->orig_dst);
11858
	crtc_h = drm_rect_height(&state->orig_dst);
11859
	if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
11860
		DRM_DEBUG("Cursor dimension not supported\n");
11861
		return -EINVAL;
11862
	}
11863
 
11864
	stride = roundup_pow_of_two(crtc_w) * 4;
11865
	if (obj->base.size < stride * crtc_h) {
11866
		DRM_DEBUG_KMS("buffer is too small\n");
11867
		return -ENOMEM;
11868
	}
11869
 
11870
	if (fb == crtc->cursor->fb)
11871
		return 0;
11872
 
11873
	/* we only need to pin inside GTT if cursor is non-phy */
11874
	mutex_lock(&dev->struct_mutex);
11875
	if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
11876
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
11877
		ret = -EINVAL;
11878
	}
11879
	mutex_unlock(&dev->struct_mutex);
11880
 
11881
	return ret;
11882
}
11883
 
11884
static int
11885
intel_commit_cursor_plane(struct drm_plane *plane,
11886
			  struct intel_plane_state *state)
11887
{
11888
	struct drm_crtc *crtc = state->crtc;
11889
	struct drm_framebuffer *fb = state->fb;
11890
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11891
	struct intel_plane *intel_plane = to_intel_plane(plane);
11892
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11893
	struct drm_i915_gem_object *obj = intel_fb->obj;
11894
	int crtc_w, crtc_h;
11895
 
11896
	crtc->cursor_x = state->orig_dst.x1;
11897
	crtc->cursor_y = state->orig_dst.y1;
11898
 
11899
	intel_plane->crtc_x = state->orig_dst.x1;
11900
	intel_plane->crtc_y = state->orig_dst.y1;
11901
	intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11902
	intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11903
	intel_plane->src_x = state->orig_src.x1;
11904
	intel_plane->src_y = state->orig_src.y1;
11905
	intel_plane->src_w = drm_rect_width(&state->orig_src);
11906
	intel_plane->src_h = drm_rect_height(&state->orig_src);
11907
	intel_plane->obj = obj;
11908
 
5060 serge 11909
	if (fb != crtc->cursor->fb) {
5354 serge 11910
		crtc_w = drm_rect_width(&state->orig_dst);
11911
		crtc_h = drm_rect_height(&state->orig_dst);
5060 serge 11912
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11913
	} else {
5354 serge 11914
		intel_crtc_update_cursor(crtc, state->visible);
11915
 
11916
 
5060 serge 11917
		return 0;
11918
	}
11919
}
5354 serge 11920
 
11921
static int
11922
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11923
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11924
			  unsigned int crtc_w, unsigned int crtc_h,
11925
			  uint32_t src_x, uint32_t src_y,
11926
			  uint32_t src_w, uint32_t src_h)
11927
{
11928
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11929
	struct intel_plane_state state;
11930
	int ret;
11931
 
11932
	state.crtc = crtc;
11933
	state.fb = fb;
11934
 
11935
	/* sample coordinates in 16.16 fixed point */
11936
	state.src.x1 = src_x;
11937
	state.src.x2 = src_x + src_w;
11938
	state.src.y1 = src_y;
11939
	state.src.y2 = src_y + src_h;
11940
 
11941
	/* integer pixels */
11942
	state.dst.x1 = crtc_x;
11943
	state.dst.x2 = crtc_x + crtc_w;
11944
	state.dst.y1 = crtc_y;
11945
	state.dst.y2 = crtc_y + crtc_h;
11946
 
11947
	state.clip.x1 = 0;
11948
	state.clip.y1 = 0;
11949
	state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11950
	state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
11951
 
11952
	state.orig_src = state.src;
11953
	state.orig_dst = state.dst;
11954
 
11955
	ret = intel_check_cursor_plane(plane, &state);
11956
	if (ret)
11957
		return ret;
11958
 
11959
	return intel_commit_cursor_plane(plane, &state);
11960
}
11961
 
5060 serge 11962
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11963
	.update_plane = intel_cursor_plane_update,
11964
	.disable_plane = intel_cursor_plane_disable,
11965
	.destroy = intel_plane_destroy,
5354 serge 11966
	.set_property = intel_plane_set_property,
5060 serge 11967
};
11968
 
11969
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11970
						   int pipe)
11971
{
11972
	struct intel_plane *cursor;
11973
 
11974
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11975
	if (cursor == NULL)
11976
		return NULL;
11977
 
11978
	cursor->can_scale = false;
11979
	cursor->max_downscale = 1;
11980
	cursor->pipe = pipe;
11981
	cursor->plane = pipe;
5354 serge 11982
	cursor->rotation = BIT(DRM_ROTATE_0);
5060 serge 11983
 
11984
	drm_universal_plane_init(dev, &cursor->base, 0,
11985
				 &intel_cursor_plane_funcs,
11986
				 intel_cursor_formats,
11987
				 ARRAY_SIZE(intel_cursor_formats),
11988
				 DRM_PLANE_TYPE_CURSOR);
5354 serge 11989
 
11990
	if (INTEL_INFO(dev)->gen >= 4) {
11991
		if (!dev->mode_config.rotation_property)
11992
			dev->mode_config.rotation_property =
11993
				drm_mode_create_rotation_property(dev,
11994
							BIT(DRM_ROTATE_0) |
11995
							BIT(DRM_ROTATE_180));
11996
		if (dev->mode_config.rotation_property)
11997
			drm_object_attach_property(&cursor->base.base,
11998
				dev->mode_config.rotation_property,
11999
				cursor->rotation);
12000
	}
12001
 
5060 serge 12002
	return &cursor->base;
12003
}
12004
 
2330 Serge 12005
static void intel_crtc_init(struct drm_device *dev, int pipe)
12006
{
5060 serge 12007
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 12008
	struct intel_crtc *intel_crtc;
5060 serge 12009
	struct drm_plane *primary = NULL;
12010
	struct drm_plane *cursor = NULL;
12011
	int i, ret;
2327 Serge 12012
 
4560 Serge 12013
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 12014
	if (intel_crtc == NULL)
12015
		return;
2327 Serge 12016
 
5060 serge 12017
	primary = intel_primary_plane_create(dev, pipe);
12018
	if (!primary)
12019
		goto fail;
2327 Serge 12020
 
5060 serge 12021
	cursor = intel_cursor_plane_create(dev, pipe);
12022
	if (!cursor)
12023
		goto fail;
12024
 
12025
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
12026
					cursor, &intel_crtc_funcs);
12027
	if (ret)
12028
		goto fail;
12029
 
2330 Serge 12030
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
12031
	for (i = 0; i < 256; i++) {
12032
		intel_crtc->lut_r[i] = i;
12033
		intel_crtc->lut_g[i] = i;
12034
		intel_crtc->lut_b[i] = i;
12035
	}
2327 Serge 12036
 
4560 Serge 12037
	/*
12038
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
5060 serge 12039
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
4560 Serge 12040
	 */
2330 Serge 12041
	intel_crtc->pipe = pipe;
12042
	intel_crtc->plane = pipe;
4560 Serge 12043
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 12044
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
12045
		intel_crtc->plane = !pipe;
12046
	}
2327 Serge 12047
 
5060 serge 12048
	intel_crtc->cursor_base = ~0;
12049
	intel_crtc->cursor_cntl = ~0;
5354 serge 12050
	intel_crtc->cursor_size = ~0;
5060 serge 12051
 
2330 Serge 12052
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
12053
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
12054
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12055
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 12056
 
2330 Serge 12057
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5060 serge 12058
 
12059
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
12060
	return;
12061
 
12062
fail:
12063
	if (primary)
12064
		drm_plane_cleanup(primary);
12065
	if (cursor)
12066
		drm_plane_cleanup(cursor);
12067
	kfree(intel_crtc);
2330 Serge 12068
}
2327 Serge 12069
 
4560 Serge 12070
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12071
{
12072
	struct drm_encoder *encoder = connector->base.encoder;
5060 serge 12073
	struct drm_device *dev = connector->base.dev;
4560 Serge 12074
 
5060 serge 12075
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4560 Serge 12076
 
5354 serge 12077
	if (!encoder || WARN_ON(!encoder->crtc))
4560 Serge 12078
		return INVALID_PIPE;
12079
 
12080
	return to_intel_crtc(encoder->crtc)->pipe;
12081
}
12082
 
3031 serge 12083
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12084
				struct drm_file *file)
12085
{
12086
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5060 serge 12087
	struct drm_crtc *drmmode_crtc;
3031 serge 12088
	struct intel_crtc *crtc;
2327 Serge 12089
 
3482 Serge 12090
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
12091
		return -ENODEV;
12092
 
5060 serge 12093
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
2327 Serge 12094
 
5060 serge 12095
	if (!drmmode_crtc) {
3031 serge 12096
		DRM_ERROR("no such CRTC id\n");
4560 Serge 12097
		return -ENOENT;
3031 serge 12098
	}
2327 Serge 12099
 
5060 serge 12100
	crtc = to_intel_crtc(drmmode_crtc);
3031 serge 12101
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 12102
 
3031 serge 12103
	return 0;
12104
}
2327 Serge 12105
 
3031 serge 12106
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 12107
{
3031 serge 12108
	struct drm_device *dev = encoder->base.dev;
12109
	struct intel_encoder *source_encoder;
2330 Serge 12110
	int index_mask = 0;
12111
	int entry = 0;
2327 Serge 12112
 
5354 serge 12113
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 12114
		if (encoders_cloneable(encoder, source_encoder))
2330 Serge 12115
			index_mask |= (1 << entry);
3031 serge 12116
 
2330 Serge 12117
		entry++;
12118
	}
2327 Serge 12119
 
2330 Serge 12120
	return index_mask;
12121
}
2327 Serge 12122
 
2330 Serge 12123
static bool has_edp_a(struct drm_device *dev)
12124
{
12125
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12126
 
2330 Serge 12127
	if (!IS_MOBILE(dev))
12128
		return false;
2327 Serge 12129
 
2330 Serge 12130
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
12131
		return false;
2327 Serge 12132
 
5060 serge 12133
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
2330 Serge 12134
		return false;
2327 Serge 12135
 
2330 Serge 12136
	return true;
12137
}
2327 Serge 12138
 
4560 Serge 12139
const char *intel_output_name(int output)
12140
{
12141
	static const char *names[] = {
12142
		[INTEL_OUTPUT_UNUSED] = "Unused",
12143
		[INTEL_OUTPUT_ANALOG] = "Analog",
12144
		[INTEL_OUTPUT_DVO] = "DVO",
12145
		[INTEL_OUTPUT_SDVO] = "SDVO",
12146
		[INTEL_OUTPUT_LVDS] = "LVDS",
12147
		[INTEL_OUTPUT_TVOUT] = "TV",
12148
		[INTEL_OUTPUT_HDMI] = "HDMI",
12149
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
12150
		[INTEL_OUTPUT_EDP] = "eDP",
12151
		[INTEL_OUTPUT_DSI] = "DSI",
12152
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
12153
	};
12154
 
12155
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
12156
		return "Invalid";
12157
 
12158
	return names[output];
12159
}
12160
 
5060 serge 12161
static bool intel_crt_present(struct drm_device *dev)
12162
{
12163
	struct drm_i915_private *dev_priv = dev->dev_private;
12164
 
5354 serge 12165
	if (INTEL_INFO(dev)->gen >= 9)
5060 serge 12166
		return false;
12167
 
5354 serge 12168
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
12169
		return false;
12170
 
5060 serge 12171
	if (IS_CHERRYVIEW(dev))
12172
		return false;
12173
 
12174
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
12175
		return false;
12176
 
12177
	return true;
12178
}
12179
 
2330 Serge 12180
static void intel_setup_outputs(struct drm_device *dev)
12181
{
12182
	struct drm_i915_private *dev_priv = dev->dev_private;
12183
	struct intel_encoder *encoder;
12184
	bool dpd_is_edp = false;
2327 Serge 12185
 
4104 Serge 12186
	intel_lvds_init(dev);
2327 Serge 12187
 
5060 serge 12188
	if (intel_crt_present(dev))
2330 Serge 12189
	intel_crt_init(dev);
2327 Serge 12190
 
3480 Serge 12191
	if (HAS_DDI(dev)) {
2330 Serge 12192
		int found;
2327 Serge 12193
 
3031 serge 12194
		/* Haswell uses DDI functions to detect digital outputs */
12195
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
12196
		/* DDI A only supports eDP */
12197
		if (found)
12198
			intel_ddi_init(dev, PORT_A);
12199
 
12200
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
12201
		 * register */
12202
		found = I915_READ(SFUSE_STRAP);
12203
 
12204
		if (found & SFUSE_STRAP_DDIB_DETECTED)
12205
			intel_ddi_init(dev, PORT_B);
12206
		if (found & SFUSE_STRAP_DDIC_DETECTED)
12207
			intel_ddi_init(dev, PORT_C);
12208
		if (found & SFUSE_STRAP_DDID_DETECTED)
12209
			intel_ddi_init(dev, PORT_D);
12210
	} else if (HAS_PCH_SPLIT(dev)) {
12211
		int found;
4560 Serge 12212
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 12213
 
3243 Serge 12214
		if (has_edp_a(dev))
12215
			intel_dp_init(dev, DP_A, PORT_A);
12216
 
3746 Serge 12217
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 12218
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 12219
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 12220
			if (!found)
3746 Serge 12221
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 12222
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 12223
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 12224
		}
2327 Serge 12225
 
3746 Serge 12226
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
12227
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 12228
 
3746 Serge 12229
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
12230
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 12231
 
2330 Serge 12232
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 12233
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 12234
 
3243 Serge 12235
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 12236
			intel_dp_init(dev, PCH_DP_D, PORT_D);
12237
	} else if (IS_VALLEYVIEW(dev)) {
5354 serge 12238
		/*
12239
		 * The DP_DETECTED bit is the latched state of the DDC
12240
		 * SDA pin at boot. However since eDP doesn't require DDC
12241
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
12242
		 * eDP ports may have been muxed to an alternate function.
12243
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
12244
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
12245
		 * detect eDP ports.
12246
		 */
12247
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
4560 Serge 12248
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12249
					PORT_B);
5354 serge 12250
		if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
12251
		    intel_dp_is_edp(dev, PORT_B))
4560 Serge 12252
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12253
 
5354 serge 12254
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
4104 Serge 12255
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12256
					PORT_C);
5354 serge 12257
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
12258
		    intel_dp_is_edp(dev, PORT_C))
4560 Serge 12259
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
3243 Serge 12260
 
5060 serge 12261
		if (IS_CHERRYVIEW(dev)) {
5354 serge 12262
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
5060 serge 12263
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12264
						PORT_D);
5354 serge 12265
			/* eDP not supported on port D, so don't check VBT */
5060 serge 12266
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12267
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12268
			}
12269
 
4560 Serge 12270
		intel_dsi_init(dev);
2330 Serge 12271
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
12272
		bool found = false;
2327 Serge 12273
 
3746 Serge 12274
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 12275
			DRM_DEBUG_KMS("probing SDVOB\n");
3746 Serge 12276
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
2330 Serge 12277
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
12278
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 12279
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 12280
			}
2327 Serge 12281
 
4104 Serge 12282
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
3031 serge 12283
				intel_dp_init(dev, DP_B, PORT_B);
2330 Serge 12284
			}
2327 Serge 12285
 
2330 Serge 12286
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 12287
 
3746 Serge 12288
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 12289
			DRM_DEBUG_KMS("probing SDVOC\n");
3746 Serge 12290
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
2330 Serge 12291
		}
2327 Serge 12292
 
3746 Serge 12293
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 12294
 
2330 Serge 12295
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
12296
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 12297
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 12298
			}
4104 Serge 12299
			if (SUPPORTS_INTEGRATED_DP(dev))
3031 serge 12300
				intel_dp_init(dev, DP_C, PORT_C);
2330 Serge 12301
			}
2327 Serge 12302
 
2330 Serge 12303
		if (SUPPORTS_INTEGRATED_DP(dev) &&
4104 Serge 12304
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 12305
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 12306
	} else if (IS_GEN2(dev))
12307
		intel_dvo_init(dev);
2327 Serge 12308
 
12309
 
5354 serge 12310
	intel_psr_init(dev);
5060 serge 12311
 
5354 serge 12312
	for_each_intel_encoder(dev, encoder) {
2330 Serge 12313
		encoder->base.possible_crtcs = encoder->crtc_mask;
12314
		encoder->base.possible_clones =
3031 serge 12315
			intel_encoder_clones(encoder);
2330 Serge 12316
	}
2327 Serge 12317
 
3243 Serge 12318
	intel_init_pch_refclk(dev);
12319
 
12320
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 12321
}
12322
 
12323
 
12324
 
2335 Serge 12325
static const struct drm_framebuffer_funcs intel_fb_funcs = {
12326
//	.destroy = intel_user_framebuffer_destroy,
12327
//	.create_handle = intel_user_framebuffer_create_handle,
12328
};
2327 Serge 12329
 
5060 serge 12330
static int intel_framebuffer_init(struct drm_device *dev,
2335 Serge 12331
			   struct intel_framebuffer *intel_fb,
2342 Serge 12332
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 12333
			   struct drm_i915_gem_object *obj)
12334
{
5060 serge 12335
	int aligned_height;
4104 Serge 12336
	int pitch_limit;
2335 Serge 12337
	int ret;
2327 Serge 12338
 
4560 Serge 12339
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
12340
 
3243 Serge 12341
	if (obj->tiling_mode == I915_TILING_Y) {
12342
		DRM_DEBUG("hardware does not support tiling Y\n");
2335 Serge 12343
		return -EINVAL;
3243 Serge 12344
	}
2327 Serge 12345
 
3243 Serge 12346
	if (mode_cmd->pitches[0] & 63) {
12347
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
12348
			  mode_cmd->pitches[0]);
12349
		return -EINVAL;
12350
	}
12351
 
4104 Serge 12352
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
12353
		pitch_limit = 32*1024;
12354
	} else if (INTEL_INFO(dev)->gen >= 4) {
12355
		if (obj->tiling_mode)
12356
			pitch_limit = 16*1024;
12357
		else
12358
			pitch_limit = 32*1024;
12359
	} else if (INTEL_INFO(dev)->gen >= 3) {
12360
		if (obj->tiling_mode)
12361
			pitch_limit = 8*1024;
12362
		else
12363
			pitch_limit = 16*1024;
12364
	} else
12365
		/* XXX DSPC is limited to 4k tiled */
12366
		pitch_limit = 8*1024;
12367
 
12368
	if (mode_cmd->pitches[0] > pitch_limit) {
12369
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
12370
			  obj->tiling_mode ? "tiled" : "linear",
12371
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 12372
		return -EINVAL;
12373
	}
12374
 
12375
	if (obj->tiling_mode != I915_TILING_NONE &&
12376
	    mode_cmd->pitches[0] != obj->stride) {
12377
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
12378
			  mode_cmd->pitches[0], obj->stride);
2335 Serge 12379
			return -EINVAL;
3243 Serge 12380
	}
2327 Serge 12381
 
3243 Serge 12382
	/* Reject formats not supported by any plane early. */
2342 Serge 12383
	switch (mode_cmd->pixel_format) {
3243 Serge 12384
	case DRM_FORMAT_C8:
2342 Serge 12385
	case DRM_FORMAT_RGB565:
12386
	case DRM_FORMAT_XRGB8888:
3243 Serge 12387
	case DRM_FORMAT_ARGB8888:
12388
		break;
12389
	case DRM_FORMAT_XRGB1555:
12390
	case DRM_FORMAT_ARGB1555:
12391
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 12392
			DRM_DEBUG("unsupported pixel format: %s\n",
12393
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 12394
			return -EINVAL;
12395
		}
12396
		break;
3031 serge 12397
	case DRM_FORMAT_XBGR8888:
3243 Serge 12398
	case DRM_FORMAT_ABGR8888:
2342 Serge 12399
	case DRM_FORMAT_XRGB2101010:
12400
	case DRM_FORMAT_ARGB2101010:
3243 Serge 12401
	case DRM_FORMAT_XBGR2101010:
12402
	case DRM_FORMAT_ABGR2101010:
12403
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 12404
			DRM_DEBUG("unsupported pixel format: %s\n",
12405
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 12406
			return -EINVAL;
12407
		}
2335 Serge 12408
		break;
2342 Serge 12409
	case DRM_FORMAT_YUYV:
12410
	case DRM_FORMAT_UYVY:
12411
	case DRM_FORMAT_YVYU:
12412
	case DRM_FORMAT_VYUY:
3243 Serge 12413
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 12414
			DRM_DEBUG("unsupported pixel format: %s\n",
12415
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 12416
			return -EINVAL;
12417
		}
2342 Serge 12418
		break;
2335 Serge 12419
	default:
4104 Serge 12420
		DRM_DEBUG("unsupported pixel format: %s\n",
12421
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 12422
		return -EINVAL;
12423
	}
2327 Serge 12424
 
3243 Serge 12425
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12426
	if (mode_cmd->offsets[0] != 0)
12427
		return -EINVAL;
12428
 
5060 serge 12429
	aligned_height = intel_align_height(dev, mode_cmd->height,
12430
					    obj->tiling_mode);
4560 Serge 12431
	/* FIXME drm helper for size checks (especially planar formats)? */
12432
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
12433
		return -EINVAL;
12434
 
3480 Serge 12435
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
12436
	intel_fb->obj = obj;
4560 Serge 12437
	intel_fb->obj->framebuffer_references++;
3480 Serge 12438
 
2335 Serge 12439
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
12440
	if (ret) {
12441
		DRM_ERROR("framebuffer init failed %d\n", ret);
12442
		return ret;
12443
	}
2327 Serge 12444
 
2335 Serge 12445
	return 0;
12446
}
2327 Serge 12447
 
4560 Serge 12448
#ifndef CONFIG_DRM_I915_FBDEV
12449
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
12450
{
12451
}
12452
#endif
2327 Serge 12453
 
2360 Serge 12454
static const struct drm_mode_config_funcs intel_mode_funcs = {
4560 Serge 12455
	.fb_create = NULL,
12456
	.output_poll_changed = intel_fbdev_output_poll_changed,
2360 Serge 12457
};
2327 Serge 12458
 
3031 serge 12459
/* Set up chip specific display functions */
12460
static void intel_init_display(struct drm_device *dev)
12461
{
12462
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12463
 
4104 Serge 12464
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
12465
		dev_priv->display.find_dpll = g4x_find_best_dpll;
5060 serge 12466
	else if (IS_CHERRYVIEW(dev))
12467
		dev_priv->display.find_dpll = chv_find_best_dpll;
4104 Serge 12468
	else if (IS_VALLEYVIEW(dev))
12469
		dev_priv->display.find_dpll = vlv_find_best_dpll;
12470
	else if (IS_PINEVIEW(dev))
12471
		dev_priv->display.find_dpll = pnv_find_best_dpll;
12472
	else
12473
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
12474
 
3480 Serge 12475
	if (HAS_DDI(dev)) {
3746 Serge 12476
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5060 serge 12477
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
5354 serge 12478
		dev_priv->display.crtc_compute_clock =
12479
			haswell_crtc_compute_clock;
3243 Serge 12480
		dev_priv->display.crtc_enable = haswell_crtc_enable;
12481
		dev_priv->display.crtc_disable = haswell_crtc_disable;
5060 serge 12482
		dev_priv->display.off = ironlake_crtc_off;
5354 serge 12483
		if (INTEL_INFO(dev)->gen >= 9)
12484
			dev_priv->display.update_primary_plane =
12485
				skylake_update_primary_plane;
12486
		else
5060 serge 12487
		dev_priv->display.update_primary_plane =
12488
			ironlake_update_primary_plane;
3243 Serge 12489
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 12490
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5060 serge 12491
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
5354 serge 12492
		dev_priv->display.crtc_compute_clock =
12493
			ironlake_crtc_compute_clock;
3031 serge 12494
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
12495
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
12496
		dev_priv->display.off = ironlake_crtc_off;
5060 serge 12497
		dev_priv->display.update_primary_plane =
12498
			ironlake_update_primary_plane;
4104 Serge 12499
	} else if (IS_VALLEYVIEW(dev)) {
12500
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5060 serge 12501
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
5354 serge 12502
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
4104 Serge 12503
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12504
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12505
		dev_priv->display.off = i9xx_crtc_off;
5060 serge 12506
		dev_priv->display.update_primary_plane =
12507
			i9xx_update_primary_plane;
3031 serge 12508
	} else {
3746 Serge 12509
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5060 serge 12510
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
5354 serge 12511
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
3031 serge 12512
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12513
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12514
		dev_priv->display.off = i9xx_crtc_off;
5060 serge 12515
		dev_priv->display.update_primary_plane =
12516
			i9xx_update_primary_plane;
3031 serge 12517
	}
2327 Serge 12518
 
3031 serge 12519
	/* Returns the core display clock speed */
12520
	if (IS_VALLEYVIEW(dev))
12521
		dev_priv->display.get_display_clock_speed =
12522
			valleyview_get_display_clock_speed;
12523
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12524
		dev_priv->display.get_display_clock_speed =
12525
			i945_get_display_clock_speed;
12526
	else if (IS_I915G(dev))
12527
		dev_priv->display.get_display_clock_speed =
12528
			i915_get_display_clock_speed;
4104 Serge 12529
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 12530
		dev_priv->display.get_display_clock_speed =
12531
			i9xx_misc_get_display_clock_speed;
4104 Serge 12532
	else if (IS_PINEVIEW(dev))
12533
		dev_priv->display.get_display_clock_speed =
12534
			pnv_get_display_clock_speed;
3031 serge 12535
	else if (IS_I915GM(dev))
12536
		dev_priv->display.get_display_clock_speed =
12537
			i915gm_get_display_clock_speed;
12538
	else if (IS_I865G(dev))
12539
		dev_priv->display.get_display_clock_speed =
12540
			i865_get_display_clock_speed;
12541
	else if (IS_I85X(dev))
12542
		dev_priv->display.get_display_clock_speed =
12543
			i855_get_display_clock_speed;
12544
	else /* 852, 830 */
12545
		dev_priv->display.get_display_clock_speed =
12546
			i830_get_display_clock_speed;
2327 Serge 12547
 
3031 serge 12548
		if (IS_GEN5(dev)) {
12549
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12550
		} else if (IS_GEN6(dev)) {
12551
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12552
		} else if (IS_IVYBRIDGE(dev)) {
12553
			/* FIXME: detect B0+ stepping and use auto training */
12554
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
3243 Serge 12555
			dev_priv->display.modeset_global_resources =
12556
				ivb_modeset_global_resources;
5354 serge 12557
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3031 serge 12558
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
4560 Serge 12559
	} else if (IS_VALLEYVIEW(dev)) {
12560
		dev_priv->display.modeset_global_resources =
12561
			valleyview_modeset_global_resources;
3031 serge 12562
	}
2327 Serge 12563
 
3031 serge 12564
	/* Default just returns -ENODEV to indicate unsupported */
12565
//	dev_priv->display.queue_flip = intel_default_queue_flip;
2327 Serge 12566
 
12567
 
12568
 
12569
 
4560 Serge 12570
	intel_panel_init_backlight_funcs(dev);
5354 serge 12571
 
12572
	mutex_init(&dev_priv->pps_mutex);
3031 serge 12573
}
12574
 
12575
/*
12576
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12577
 * resume, or other times.  This quirk makes sure that's the case for
12578
 * affected systems.
12579
 */
12580
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 12581
{
12582
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12583
 
3031 serge 12584
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12585
	DRM_INFO("applying pipe a force quirk\n");
12586
}
2327 Serge 12587
 
5354 serge 12588
static void quirk_pipeb_force(struct drm_device *dev)
12589
{
12590
	struct drm_i915_private *dev_priv = dev->dev_private;
12591
 
12592
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
12593
	DRM_INFO("applying pipe b force quirk\n");
12594
}
12595
 
3031 serge 12596
/*
12597
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12598
 */
12599
static void quirk_ssc_force_disable(struct drm_device *dev)
12600
{
12601
	struct drm_i915_private *dev_priv = dev->dev_private;
12602
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12603
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 12604
}
2327 Serge 12605
 
3031 serge 12606
/*
12607
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12608
 * brightness value
12609
 */
12610
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 12611
{
12612
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12613
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12614
	DRM_INFO("applying inverted panel brightness quirk\n");
12615
}
2327 Serge 12616
 
5060 serge 12617
/* Some VBT's incorrectly indicate no backlight is present */
12618
static void quirk_backlight_present(struct drm_device *dev)
12619
{
12620
	struct drm_i915_private *dev_priv = dev->dev_private;
12621
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12622
	DRM_INFO("applying backlight present quirk\n");
12623
}
12624
 
3031 serge 12625
struct intel_quirk {
12626
	int device;
12627
	int subsystem_vendor;
12628
	int subsystem_device;
12629
	void (*hook)(struct drm_device *dev);
12630
};
2327 Serge 12631
 
3031 serge 12632
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12633
struct intel_dmi_quirk {
12634
	void (*hook)(struct drm_device *dev);
12635
	const struct dmi_system_id (*dmi_id_list)[];
12636
};
2327 Serge 12637
 
3031 serge 12638
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12639
{
12640
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12641
	return 1;
2330 Serge 12642
}
2327 Serge 12643
 
3031 serge 12644
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12645
	{
12646
		.dmi_id_list = &(const struct dmi_system_id[]) {
12647
			{
12648
				.callback = intel_dmi_reverse_brightness,
12649
				.ident = "NCR Corporation",
12650
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12651
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12652
				},
12653
			},
12654
			{ }  /* terminating entry */
12655
		},
12656
		.hook = quirk_invert_brightness,
12657
	},
12658
};
2327 Serge 12659
 
3031 serge 12660
static struct intel_quirk intel_quirks[] = {
12661
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12662
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 12663
 
3031 serge 12664
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12665
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 12666
 
3031 serge 12667
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12668
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 12669
 
5367 serge 12670
	/* 830 needs to leave pipe A & dpll A up */
12671
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
12672
 
12673
	/* 830 needs to leave pipe B & dpll B up */
12674
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
12675
 
3031 serge 12676
	/* Lenovo U160 cannot use SSC on LVDS */
12677
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 12678
 
3031 serge 12679
	/* Sony Vaio Y cannot use SSC on LVDS */
12680
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 12681
 
3031 serge 12682
	/* Acer Aspire 5734Z must invert backlight brightness */
12683
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 12684
 
12685
	/* Acer/eMachines G725 */
12686
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12687
 
12688
	/* Acer/eMachines e725 */
12689
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12690
 
12691
	/* Acer/Packard Bell NCL20 */
12692
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12693
 
12694
	/* Acer Aspire 4736Z */
12695
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
5060 serge 12696
 
12697
	/* Acer Aspire 5336 */
12698
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12699
 
12700
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12701
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12702
 
5097 serge 12703
	/* Acer C720 Chromebook (Core i3 4005U) */
12704
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12705
 
5354 serge 12706
	/* Apple Macbook 2,1 (Core 2 T7400) */
12707
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
12708
 
5060 serge 12709
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12710
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12711
 
12712
	/* HP Chromebook 14 (Celeron 2955U) */
12713
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
3031 serge 12714
};
2327 Serge 12715
 
3031 serge 12716
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 12717
{
3031 serge 12718
	struct pci_dev *d = dev->pdev;
12719
	int i;
2327 Serge 12720
 
3031 serge 12721
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12722
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 12723
 
3031 serge 12724
		if (d->device == q->device &&
12725
		    (d->subsystem_vendor == q->subsystem_vendor ||
12726
		     q->subsystem_vendor == PCI_ANY_ID) &&
12727
		    (d->subsystem_device == q->subsystem_device ||
12728
		     q->subsystem_device == PCI_ANY_ID))
12729
			q->hook(dev);
12730
	}
5097 serge 12731
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
12732
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
12733
			intel_dmi_quirks[i].hook(dev);
12734
	}
2330 Serge 12735
}
2327 Serge 12736
 
3031 serge 12737
/* Disable the VGA plane that we never use */
12738
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 12739
{
12740
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12741
	u8 sr1;
3480 Serge 12742
	u32 vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 12743
 
4560 Serge 12744
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12745
	outb(SR01, VGA_SR_INDEX);
12746
	sr1 = inb(VGA_SR_DATA);
12747
	outb(sr1 | 1<<5, VGA_SR_DATA);
12748
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 12749
	udelay(300);
2327 Serge 12750
 
5354 serge 12751
	/*
12752
	 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
12753
	 * from S3 without preserving (some of?) the other bits.
12754
	 */
12755
	I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
3031 serge 12756
	POSTING_READ(vga_reg);
2330 Serge 12757
}
12758
 
3031 serge 12759
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 12760
{
3031 serge 12761
	intel_prepare_ddi(dev);
2342 Serge 12762
 
5060 serge 12763
	if (IS_VALLEYVIEW(dev))
12764
		vlv_update_cdclk(dev);
12765
 
3031 serge 12766
	intel_init_clock_gating(dev);
12767
 
3482 Serge 12768
    intel_enable_gt_powersave(dev);
2342 Serge 12769
}
12770
 
3031 serge 12771
void intel_modeset_init(struct drm_device *dev)
2330 Serge 12772
{
3031 serge 12773
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 12774
	int sprite, ret;
12775
	enum pipe pipe;
12776
	struct intel_crtc *crtc;
2330 Serge 12777
 
3031 serge 12778
	drm_mode_config_init(dev);
2330 Serge 12779
 
3031 serge 12780
	dev->mode_config.min_width = 0;
12781
	dev->mode_config.min_height = 0;
2330 Serge 12782
 
3031 serge 12783
	dev->mode_config.preferred_depth = 24;
12784
	dev->mode_config.prefer_shadow = 1;
2330 Serge 12785
 
3031 serge 12786
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 12787
 
3031 serge 12788
	intel_init_quirks(dev);
2330 Serge 12789
 
3031 serge 12790
	intel_init_pm(dev);
2330 Serge 12791
 
3746 Serge 12792
	if (INTEL_INFO(dev)->num_pipes == 0)
12793
		return;
12794
 
3031 serge 12795
	intel_init_display(dev);
2330 Serge 12796
 
3031 serge 12797
	if (IS_GEN2(dev)) {
12798
		dev->mode_config.max_width = 2048;
12799
		dev->mode_config.max_height = 2048;
12800
	} else if (IS_GEN3(dev)) {
12801
		dev->mode_config.max_width = 4096;
12802
		dev->mode_config.max_height = 4096;
12803
	} else {
12804
		dev->mode_config.max_width = 8192;
12805
		dev->mode_config.max_height = 8192;
12806
	}
5060 serge 12807
 
12808
	if (IS_GEN2(dev)) {
12809
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12810
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12811
	} else {
12812
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12813
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12814
	}
12815
 
3480 Serge 12816
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 12817
 
3031 serge 12818
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 12819
		      INTEL_INFO(dev)->num_pipes,
12820
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 12821
 
5354 serge 12822
	for_each_pipe(dev_priv, pipe) {
5060 serge 12823
		intel_crtc_init(dev, pipe);
12824
		for_each_sprite(pipe, sprite) {
12825
			ret = intel_plane_init(dev, pipe, sprite);
3031 serge 12826
		if (ret)
4104 Serge 12827
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
5060 serge 12828
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
3746 Serge 12829
		}
2330 Serge 12830
	}
12831
 
4560 Serge 12832
	intel_init_dpio(dev);
12833
 
4104 Serge 12834
	intel_shared_dpll_init(dev);
2330 Serge 12835
 
5354 serge 12836
	/* save the BIOS value before clobbering it */
12837
	dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
3031 serge 12838
	/* Just disable it once at startup */
12839
	i915_disable_vga(dev);
12840
	intel_setup_outputs(dev);
3480 Serge 12841
 
12842
	/* Just in case the BIOS is doing something questionable. */
12843
	intel_disable_fbc(dev);
2330 Serge 12844
 
5060 serge 12845
	drm_modeset_lock_all(dev);
12846
	intel_modeset_setup_hw_state(dev, false);
12847
	drm_modeset_unlock_all(dev);
12848
 
12849
	for_each_intel_crtc(dev, crtc) {
12850
		if (!crtc->active)
12851
			continue;
12852
 
12853
		/*
12854
		 * Note that reserving the BIOS fb up front prevents us
12855
		 * from stuffing other stolen allocations like the ring
12856
		 * on top.  This prevents some ugliness at boot time, and
12857
		 * can even allow for smooth boot transitions if the BIOS
12858
		 * fb is large enough for the active pipe configuration.
12859
		 */
12860
		if (dev_priv->display.get_plane_config) {
12861
			dev_priv->display.get_plane_config(crtc,
12862
							   &crtc->plane_config);
12863
			/*
12864
			 * If the fb is shared between multiple heads, we'll
12865
			 * just get the first one.
12866
			 */
5367 serge 12867
            crtc->plane_config.size = 16*1024*1024;
5060 serge 12868
			intel_find_plane_obj(crtc, &crtc->plane_config);
12869
		}
12870
	}
2330 Serge 12871
}
12872
 
3031 serge 12873
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 12874
{
3031 serge 12875
	struct intel_connector *connector;
12876
	struct drm_connector *crt = NULL;
12877
	struct intel_load_detect_pipe load_detect_temp;
5060 serge 12878
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
2330 Serge 12879
 
3031 serge 12880
	/* We can't just switch on the pipe A, we need to set things up with a
12881
	 * proper mode and output configuration. As a gross hack, enable pipe A
12882
	 * by enabling the load detect pipe once. */
12883
	list_for_each_entry(connector,
12884
			    &dev->mode_config.connector_list,
12885
			    base.head) {
12886
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12887
			crt = &connector->base;
12888
			break;
2330 Serge 12889
		}
12890
	}
12891
 
3031 serge 12892
	if (!crt)
12893
		return;
2330 Serge 12894
 
5060 serge 12895
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
3031 serge 12896
		intel_release_load_detect_pipe(crt, &load_detect_temp);
2327 Serge 12897
}
12898
 
3031 serge 12899
static bool
12900
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 12901
{
3746 Serge 12902
	struct drm_device *dev = crtc->base.dev;
12903
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12904
	u32 reg, val;
2327 Serge 12905
 
3746 Serge 12906
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 12907
		return true;
2327 Serge 12908
 
3031 serge 12909
	reg = DSPCNTR(!crtc->plane);
12910
	val = I915_READ(reg);
2327 Serge 12911
 
3031 serge 12912
	if ((val & DISPLAY_PLANE_ENABLE) &&
12913
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12914
		return false;
2327 Serge 12915
 
3031 serge 12916
	return true;
2327 Serge 12917
}
12918
 
3031 serge 12919
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 12920
{
3031 serge 12921
	struct drm_device *dev = crtc->base.dev;
2327 Serge 12922
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12923
	u32 reg;
2327 Serge 12924
 
3031 serge 12925
	/* Clear any frame start delays used for debugging left by the BIOS */
3746 Serge 12926
	reg = PIPECONF(crtc->config.cpu_transcoder);
3031 serge 12927
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 12928
 
5060 serge 12929
	/* restore vblank interrupts to correct state */
5354 serge 12930
	if (crtc->active) {
12931
		update_scanline_offset(crtc);
5060 serge 12932
		drm_vblank_on(dev, crtc->pipe);
5354 serge 12933
	} else
5060 serge 12934
		drm_vblank_off(dev, crtc->pipe);
12935
 
3031 serge 12936
	/* We need to sanitize the plane -> pipe mapping first because this will
12937
	 * disable the crtc (and hence change the state) if it is wrong. Note
12938
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12939
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12940
		struct intel_connector *connector;
12941
		bool plane;
2327 Serge 12942
 
3031 serge 12943
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12944
			      crtc->base.base.id);
2327 Serge 12945
 
3031 serge 12946
		/* Pipe has the wrong plane attached and the plane is active.
12947
		 * Temporarily change the plane mapping and disable everything
12948
		 * ...  */
12949
		plane = crtc->plane;
12950
		crtc->plane = !plane;
5060 serge 12951
		crtc->primary_enabled = true;
3031 serge 12952
		dev_priv->display.crtc_disable(&crtc->base);
12953
		crtc->plane = plane;
2342 Serge 12954
 
3031 serge 12955
		/* ... and break all links. */
12956
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12957
				    base.head) {
12958
			if (connector->encoder->base.crtc != &crtc->base)
12959
				continue;
2327 Serge 12960
 
5060 serge 12961
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12962
			connector->base.encoder = NULL;
3031 serge 12963
		}
5060 serge 12964
		/* multiple connectors may have the same encoder:
12965
		 *  handle them and break crtc link separately */
12966
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12967
				    base.head)
12968
			if (connector->encoder->base.crtc == &crtc->base) {
12969
				connector->encoder->base.crtc = NULL;
12970
				connector->encoder->connectors_active = false;
12971
		}
2327 Serge 12972
 
3031 serge 12973
		WARN_ON(crtc->active);
12974
		crtc->base.enabled = false;
12975
	}
2327 Serge 12976
 
3031 serge 12977
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12978
	    crtc->pipe == PIPE_A && !crtc->active) {
12979
		/* BIOS forgot to enable pipe A, this mostly happens after
12980
		 * resume. Force-enable the pipe to fix this, the update_dpms
12981
		 * call below we restore the pipe to the right state, but leave
12982
		 * the required bits on. */
12983
		intel_enable_pipe_a(dev);
12984
	}
2327 Serge 12985
 
3031 serge 12986
	/* Adjust the state of the output pipe according to whether we
12987
	 * have active connectors/encoders. */
12988
	intel_crtc_update_dpms(&crtc->base);
2327 Serge 12989
 
3031 serge 12990
	if (crtc->active != crtc->base.enabled) {
12991
		struct intel_encoder *encoder;
2327 Serge 12992
 
3031 serge 12993
		/* This can happen either due to bugs in the get_hw_state
12994
		 * functions or because the pipe is force-enabled due to the
12995
		 * pipe A quirk. */
12996
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12997
			      crtc->base.base.id,
12998
			      crtc->base.enabled ? "enabled" : "disabled",
12999
			      crtc->active ? "enabled" : "disabled");
2327 Serge 13000
 
3031 serge 13001
		crtc->base.enabled = crtc->active;
2327 Serge 13002
 
3031 serge 13003
		/* Because we only establish the connector -> encoder ->
13004
		 * crtc links if something is active, this means the
13005
		 * crtc is now deactivated. Break the links. connector
13006
		 * -> encoder links are only establish when things are
13007
		 *  actually up, hence no need to break them. */
13008
		WARN_ON(crtc->active);
2327 Serge 13009
 
3031 serge 13010
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13011
			WARN_ON(encoder->connectors_active);
13012
			encoder->base.crtc = NULL;
13013
		}
13014
	}
5060 serge 13015
 
5354 serge 13016
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
5060 serge 13017
		/*
13018
		 * We start out with underrun reporting disabled to avoid races.
13019
		 * For correct bookkeeping mark this on active crtcs.
13020
		 *
13021
		 * Also on gmch platforms we dont have any hardware bits to
13022
		 * disable the underrun reporting. Which means we need to start
13023
		 * out with underrun reporting disabled also on inactive pipes,
13024
		 * since otherwise we'll complain about the garbage we read when
13025
		 * e.g. coming up after runtime pm.
13026
		 *
13027
		 * No protection against concurrent access is required - at
13028
		 * worst a fifo underrun happens which also sets this to false.
13029
		 */
13030
		crtc->cpu_fifo_underrun_disabled = true;
13031
		crtc->pch_fifo_underrun_disabled = true;
13032
	}
2327 Serge 13033
}
13034
 
3031 serge 13035
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 13036
{
3031 serge 13037
	struct intel_connector *connector;
13038
	struct drm_device *dev = encoder->base.dev;
2327 Serge 13039
 
3031 serge 13040
	/* We need to check both for a crtc link (meaning that the
13041
	 * encoder is active and trying to read from a pipe) and the
13042
	 * pipe itself being active. */
13043
	bool has_active_crtc = encoder->base.crtc &&
13044
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 13045
 
3031 serge 13046
	if (encoder->connectors_active && !has_active_crtc) {
13047
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13048
			      encoder->base.base.id,
5060 serge 13049
			      encoder->base.name);
2327 Serge 13050
 
3031 serge 13051
		/* Connector is active, but has no active pipe. This is
13052
		 * fallout from our resume register restoring. Disable
13053
		 * the encoder manually again. */
13054
		if (encoder->base.crtc) {
13055
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
13056
				      encoder->base.base.id,
5060 serge 13057
				      encoder->base.name);
3031 serge 13058
			encoder->disable(encoder);
5060 serge 13059
			if (encoder->post_disable)
13060
				encoder->post_disable(encoder);
3031 serge 13061
		}
5060 serge 13062
		encoder->base.crtc = NULL;
13063
		encoder->connectors_active = false;
2327 Serge 13064
 
3031 serge 13065
		/* Inconsistent output/port/pipe state happens presumably due to
13066
		 * a bug in one of the get_hw_state functions. Or someplace else
13067
		 * in our code, like the register restore mess on resume. Clamp
13068
		 * things to off as a safer default. */
13069
		list_for_each_entry(connector,
13070
				    &dev->mode_config.connector_list,
13071
				    base.head) {
13072
			if (connector->encoder != encoder)
13073
				continue;
5060 serge 13074
			connector->base.dpms = DRM_MODE_DPMS_OFF;
13075
			connector->base.encoder = NULL;
3031 serge 13076
		}
13077
	}
13078
	/* Enabled encoders without active connectors will be fixed in
13079
	 * the crtc fixup. */
2327 Serge 13080
}
13081
 
5060 serge 13082
void i915_redisable_vga_power_on(struct drm_device *dev)
3746 Serge 13083
{
13084
	struct drm_i915_private *dev_priv = dev->dev_private;
13085
	u32 vga_reg = i915_vgacntrl_reg(dev);
13086
 
5060 serge 13087
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
13088
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
13089
		i915_disable_vga(dev);
13090
	}
13091
}
13092
 
13093
void i915_redisable_vga(struct drm_device *dev)
13094
{
13095
	struct drm_i915_private *dev_priv = dev->dev_private;
13096
 
4104 Serge 13097
	/* This function can be called both from intel_modeset_setup_hw_state or
13098
	 * at a very early point in our resume sequence, where the power well
13099
	 * structures are not yet restored. Since this function is at a very
13100
	 * paranoid "someone might have enabled VGA while we were not looking"
13101
	 * level, just check if the power well is enabled instead of trying to
13102
	 * follow the "don't touch the power well if we don't need it" policy
13103
	 * the rest of the driver uses. */
5354 serge 13104
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
4104 Serge 13105
		return;
13106
 
5060 serge 13107
	i915_redisable_vga_power_on(dev);
3746 Serge 13108
}
13109
 
5060 serge 13110
static bool primary_get_hw_state(struct intel_crtc *crtc)
13111
{
13112
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
13113
 
13114
	if (!crtc->active)
13115
		return false;
13116
 
13117
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
13118
}
13119
 
4104 Serge 13120
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 13121
{
13122
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 13123
	enum pipe pipe;
13124
	struct intel_crtc *crtc;
13125
	struct intel_encoder *encoder;
13126
	struct intel_connector *connector;
4104 Serge 13127
	int i;
2327 Serge 13128
 
5060 serge 13129
	for_each_intel_crtc(dev, crtc) {
3746 Serge 13130
		memset(&crtc->config, 0, sizeof(crtc->config));
2327 Serge 13131
 
5060 serge 13132
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
13133
 
3746 Serge 13134
		crtc->active = dev_priv->display.get_pipe_config(crtc,
13135
								 &crtc->config);
2327 Serge 13136
 
3031 serge 13137
		crtc->base.enabled = crtc->active;
5060 serge 13138
		crtc->primary_enabled = primary_get_hw_state(crtc);
2330 Serge 13139
 
3031 serge 13140
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
13141
			      crtc->base.base.id,
13142
			      crtc->active ? "enabled" : "disabled");
2339 Serge 13143
	}
2332 Serge 13144
 
4104 Serge 13145
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13146
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13147
 
5354 serge 13148
		pll->on = pll->get_hw_state(dev_priv, pll,
13149
					    &pll->config.hw_state);
4104 Serge 13150
		pll->active = 0;
5354 serge 13151
		pll->config.crtc_mask = 0;
5060 serge 13152
		for_each_intel_crtc(dev, crtc) {
5354 serge 13153
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
4104 Serge 13154
				pll->active++;
5354 serge 13155
				pll->config.crtc_mask |= 1 << crtc->pipe;
13156
			}
4104 Serge 13157
		}
13158
 
5354 serge 13159
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
13160
			      pll->name, pll->config.crtc_mask, pll->on);
5060 serge 13161
 
5354 serge 13162
		if (pll->config.crtc_mask)
5060 serge 13163
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
4104 Serge 13164
	}
13165
 
5354 serge 13166
	for_each_intel_encoder(dev, encoder) {
3031 serge 13167
		pipe = 0;
2332 Serge 13168
 
3031 serge 13169
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 13170
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13171
			encoder->base.crtc = &crtc->base;
13172
				encoder->get_config(encoder, &crtc->config);
3031 serge 13173
		} else {
13174
			encoder->base.crtc = NULL;
13175
		}
2332 Serge 13176
 
3031 serge 13177
		encoder->connectors_active = false;
4560 Serge 13178
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 13179
			      encoder->base.base.id,
5060 serge 13180
			      encoder->base.name,
3031 serge 13181
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 13182
			      pipe_name(pipe));
3031 serge 13183
	}
2332 Serge 13184
 
3031 serge 13185
	list_for_each_entry(connector, &dev->mode_config.connector_list,
13186
			    base.head) {
13187
		if (connector->get_hw_state(connector)) {
13188
			connector->base.dpms = DRM_MODE_DPMS_ON;
13189
			connector->encoder->connectors_active = true;
13190
			connector->base.encoder = &connector->encoder->base;
13191
		} else {
13192
			connector->base.dpms = DRM_MODE_DPMS_OFF;
13193
			connector->base.encoder = NULL;
13194
		}
13195
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
13196
			      connector->base.base.id,
5060 serge 13197
			      connector->base.name,
3031 serge 13198
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 13199
	}
4104 Serge 13200
}
2332 Serge 13201
 
4104 Serge 13202
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
13203
 * and i915 state tracking structures. */
13204
void intel_modeset_setup_hw_state(struct drm_device *dev,
13205
				  bool force_restore)
13206
{
13207
	struct drm_i915_private *dev_priv = dev->dev_private;
13208
	enum pipe pipe;
13209
	struct intel_crtc *crtc;
13210
	struct intel_encoder *encoder;
13211
	int i;
13212
 
13213
	intel_modeset_readout_hw_state(dev);
13214
 
13215
	/*
13216
	 * Now that we have the config, copy it to each CRTC struct
13217
	 * Note that this could go away if we move to using crtc_config
13218
	 * checking everywhere.
13219
	 */
5060 serge 13220
	for_each_intel_crtc(dev, crtc) {
13221
		if (crtc->active && i915.fastboot) {
13222
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
4104 Serge 13223
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
13224
				      crtc->base.base.id);
13225
			drm_mode_debug_printmodeline(&crtc->base.mode);
13226
		}
13227
	}
13228
 
3031 serge 13229
	/* HW state is read out, now we need to sanitize this mess. */
5354 serge 13230
	for_each_intel_encoder(dev, encoder) {
3031 serge 13231
		intel_sanitize_encoder(encoder);
2332 Serge 13232
	}
13233
 
5354 serge 13234
	for_each_pipe(dev_priv, pipe) {
3031 serge 13235
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13236
		intel_sanitize_crtc(crtc);
4104 Serge 13237
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
2332 Serge 13238
	}
13239
 
4104 Serge 13240
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13241
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13242
 
13243
		if (!pll->on || pll->active)
13244
			continue;
13245
 
13246
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
13247
 
13248
		pll->disable(dev_priv, pll);
13249
		pll->on = false;
13250
	}
13251
 
5354 serge 13252
	if (IS_GEN9(dev))
13253
		skl_wm_get_hw_state(dev);
13254
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 13255
		ilk_wm_get_hw_state(dev);
13256
 
3243 Serge 13257
	if (force_restore) {
4560 Serge 13258
		i915_redisable_vga(dev);
13259
 
3746 Serge 13260
		/*
13261
		 * We need to use raw interfaces for restoring state to avoid
13262
		 * checking (bogus) intermediate states.
13263
		 */
5354 serge 13264
		for_each_pipe(dev_priv, pipe) {
3746 Serge 13265
			struct drm_crtc *crtc =
13266
				dev_priv->pipe_to_crtc_mapping[pipe];
13267
 
5354 serge 13268
			intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
5060 serge 13269
					 crtc->primary->fb);
3243 Serge 13270
		}
13271
	} else {
3031 serge 13272
	intel_modeset_update_staged_output_state(dev);
3243 Serge 13273
	}
2332 Serge 13274
 
3031 serge 13275
	intel_modeset_check_state(dev);
2332 Serge 13276
}
13277
 
3031 serge 13278
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 13279
{
5354 serge 13280
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 13281
	struct drm_crtc *c;
13282
	struct drm_i915_gem_object *obj;
13283
 
13284
	mutex_lock(&dev->struct_mutex);
13285
	intel_init_gt_powersave(dev);
13286
	mutex_unlock(&dev->struct_mutex);
13287
 
5354 serge 13288
	/*
13289
	 * There may be no VBT; and if the BIOS enabled SSC we can
13290
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
13291
	 * BIOS isn't using it, don't assume it will work even if the VBT
13292
	 * indicates as much.
13293
	 */
13294
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13295
		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
13296
						DREF_SSC1_ENABLE);
13297
 
3031 serge 13298
	intel_modeset_init_hw(dev);
2330 Serge 13299
 
3031 serge 13300
//   intel_setup_overlay(dev);
2330 Serge 13301
 
5060 serge 13302
	/*
13303
	 * Make sure any fbs we allocated at startup are properly
13304
	 * pinned & fenced.  When we do the allocation it's too early
13305
	 * for this.
13306
	 */
13307
	mutex_lock(&dev->struct_mutex);
13308
	for_each_crtc(dev, c) {
13309
		obj = intel_fb_obj(c->primary->fb);
13310
		if (obj == NULL)
13311
			continue;
13312
 
5354 serge 13313
		if (intel_pin_and_fence_fb_obj(c->primary,
13314
					       c->primary->fb,
13315
					       NULL)) {
5060 serge 13316
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
13317
				  to_intel_crtc(c)->pipe);
13318
			drm_framebuffer_unreference(c->primary->fb);
13319
			c->primary->fb = NULL;
13320
		}
13321
	}
13322
	mutex_unlock(&dev->struct_mutex);
2330 Serge 13323
}
13324
 
5060 serge 13325
void intel_connector_unregister(struct intel_connector *intel_connector)
13326
{
13327
	struct drm_connector *connector = &intel_connector->base;
13328
 
13329
	intel_panel_destroy_backlight(connector);
13330
	drm_connector_unregister(connector);
13331
}
13332
 
3031 serge 13333
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 13334
{
3031 serge 13335
#if 0
13336
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 13337
	struct drm_connector *connector;
2327 Serge 13338
 
5354 serge 13339
	intel_disable_gt_powersave(dev);
13340
 
13341
	intel_backlight_unregister(dev);
13342
 
4104 Serge 13343
	/*
13344
	 * Interrupts and polling as the first thing to avoid creating havoc.
5354 serge 13345
	 * Too much stuff here (turning of connectors, ...) would
4104 Serge 13346
	 * experience fancy races otherwise.
13347
	 */
5354 serge 13348
	intel_irq_uninstall(dev_priv);
5060 serge 13349
 
4104 Serge 13350
	/*
13351
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13352
	 * poll handlers. Hence disable polling after hpd handling is shut down.
13353
	 */
4560 Serge 13354
	drm_kms_helper_poll_fini(dev);
4104 Serge 13355
 
3031 serge 13356
	mutex_lock(&dev->struct_mutex);
2327 Serge 13357
 
4560 Serge 13358
	intel_unregister_dsm_handler();
2327 Serge 13359
 
3031 serge 13360
	intel_disable_fbc(dev);
2342 Serge 13361
 
3031 serge 13362
	ironlake_teardown_rc6(dev);
2327 Serge 13363
 
3031 serge 13364
	mutex_unlock(&dev->struct_mutex);
2327 Serge 13365
 
4104 Serge 13366
	/* flush any delayed tasks or pending work */
13367
	flush_scheduled_work();
2327 Serge 13368
 
4560 Serge 13369
	/* destroy the backlight and sysfs files before encoders/connectors */
13370
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5060 serge 13371
		struct intel_connector *intel_connector;
13372
 
13373
		intel_connector = to_intel_connector(connector);
13374
		intel_connector->unregister(intel_connector);
4560 Serge 13375
	}
2327 Serge 13376
 
3031 serge 13377
	drm_mode_config_cleanup(dev);
5060 serge 13378
 
13379
	intel_cleanup_overlay(dev);
13380
 
13381
	mutex_lock(&dev->struct_mutex);
13382
	intel_cleanup_gt_powersave(dev);
13383
	mutex_unlock(&dev->struct_mutex);
2327 Serge 13384
#endif
13385
}
13386
 
13387
/*
3031 serge 13388
 * Return which encoder is currently attached for connector.
2327 Serge 13389
 */
3031 serge 13390
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 13391
{
3031 serge 13392
	return &intel_attached_encoder(connector)->base;
13393
}
2327 Serge 13394
 
3031 serge 13395
void intel_connector_attach_encoder(struct intel_connector *connector,
13396
				    struct intel_encoder *encoder)
13397
{
13398
	connector->encoder = encoder;
13399
	drm_mode_connector_attach_encoder(&connector->base,
13400
					  &encoder->base);
2327 Serge 13401
}
13402
 
13403
/*
3031 serge 13404
 * set vga decode state - true == enable VGA decode
2327 Serge 13405
 */
3031 serge 13406
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 13407
{
2330 Serge 13408
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 13409
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 13410
	u16 gmch_ctrl;
2327 Serge 13411
 
5060 serge 13412
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
13413
		DRM_ERROR("failed to read control word\n");
13414
		return -EIO;
13415
	}
13416
 
13417
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
13418
		return 0;
13419
 
3031 serge 13420
	if (state)
13421
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 13422
	else
3031 serge 13423
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5060 serge 13424
 
13425
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
13426
		DRM_ERROR("failed to write control word\n");
13427
		return -EIO;
13428
	}
13429
 
3031 serge 13430
	return 0;
2330 Serge 13431
}
13432
 
3031 serge 13433
#ifdef CONFIG_DEBUG_FS
2327 Serge 13434
 
3031 serge 13435
struct intel_display_error_state {
4104 Serge 13436
 
13437
	u32 power_well_driver;
13438
 
13439
	int num_transcoders;
13440
 
3031 serge 13441
	struct intel_cursor_error_state {
13442
		u32 control;
13443
		u32 position;
13444
		u32 base;
13445
		u32 size;
13446
	} cursor[I915_MAX_PIPES];
2327 Serge 13447
 
3031 serge 13448
	struct intel_pipe_error_state {
4560 Serge 13449
		bool power_domain_on;
3031 serge 13450
		u32 source;
5060 serge 13451
		u32 stat;
3031 serge 13452
	} pipe[I915_MAX_PIPES];
2327 Serge 13453
 
3031 serge 13454
	struct intel_plane_error_state {
13455
		u32 control;
13456
		u32 stride;
13457
		u32 size;
13458
		u32 pos;
13459
		u32 addr;
13460
		u32 surface;
13461
		u32 tile_offset;
13462
	} plane[I915_MAX_PIPES];
4104 Serge 13463
 
13464
	struct intel_transcoder_error_state {
4560 Serge 13465
		bool power_domain_on;
4104 Serge 13466
		enum transcoder cpu_transcoder;
13467
 
13468
		u32 conf;
13469
 
13470
		u32 htotal;
13471
		u32 hblank;
13472
		u32 hsync;
13473
		u32 vtotal;
13474
		u32 vblank;
13475
		u32 vsync;
13476
	} transcoder[4];
3031 serge 13477
};
2327 Serge 13478
 
3031 serge 13479
struct intel_display_error_state *
13480
intel_display_capture_error_state(struct drm_device *dev)
13481
{
5060 serge 13482
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 13483
	struct intel_display_error_state *error;
4104 Serge 13484
	int transcoders[] = {
13485
		TRANSCODER_A,
13486
		TRANSCODER_B,
13487
		TRANSCODER_C,
13488
		TRANSCODER_EDP,
13489
	};
3031 serge 13490
	int i;
2327 Serge 13491
 
4104 Serge 13492
	if (INTEL_INFO(dev)->num_pipes == 0)
13493
		return NULL;
13494
 
4560 Serge 13495
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 13496
	if (error == NULL)
13497
		return NULL;
2327 Serge 13498
 
4560 Serge 13499
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 13500
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13501
 
5354 serge 13502
	for_each_pipe(dev_priv, i) {
4560 Serge 13503
		error->pipe[i].power_domain_on =
5354 serge 13504
			__intel_display_power_is_enabled(dev_priv,
5060 serge 13505
						       POWER_DOMAIN_PIPE(i));
4560 Serge 13506
		if (!error->pipe[i].power_domain_on)
13507
			continue;
13508
 
3031 serge 13509
		error->cursor[i].control = I915_READ(CURCNTR(i));
13510
		error->cursor[i].position = I915_READ(CURPOS(i));
13511
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 13512
 
3031 serge 13513
		error->plane[i].control = I915_READ(DSPCNTR(i));
13514
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 13515
		if (INTEL_INFO(dev)->gen <= 3) {
3031 serge 13516
		error->plane[i].size = I915_READ(DSPSIZE(i));
13517
		error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 13518
		}
13519
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3031 serge 13520
		error->plane[i].addr = I915_READ(DSPADDR(i));
13521
		if (INTEL_INFO(dev)->gen >= 4) {
13522
			error->plane[i].surface = I915_READ(DSPSURF(i));
13523
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13524
		}
2327 Serge 13525
 
3031 serge 13526
		error->pipe[i].source = I915_READ(PIPESRC(i));
5060 serge 13527
 
13528
		if (HAS_GMCH_DISPLAY(dev))
13529
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
3031 serge 13530
	}
2327 Serge 13531
 
4104 Serge 13532
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13533
	if (HAS_DDI(dev_priv->dev))
13534
		error->num_transcoders++; /* Account for eDP. */
13535
 
13536
	for (i = 0; i < error->num_transcoders; i++) {
13537
		enum transcoder cpu_transcoder = transcoders[i];
13538
 
4560 Serge 13539
		error->transcoder[i].power_domain_on =
5354 serge 13540
			__intel_display_power_is_enabled(dev_priv,
4560 Serge 13541
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13542
		if (!error->transcoder[i].power_domain_on)
13543
			continue;
13544
 
4104 Serge 13545
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13546
 
13547
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13548
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13549
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13550
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13551
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13552
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13553
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13554
	}
13555
 
3031 serge 13556
	return error;
2330 Serge 13557
}
2327 Serge 13558
 
4104 Serge 13559
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13560
 
3031 serge 13561
void
4104 Serge 13562
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 13563
				struct drm_device *dev,
13564
				struct intel_display_error_state *error)
2332 Serge 13565
{
5354 serge 13566
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 13567
	int i;
2330 Serge 13568
 
4104 Serge 13569
	if (!error)
13570
		return;
13571
 
13572
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 13573
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 13574
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13575
			   error->power_well_driver);
5354 serge 13576
	for_each_pipe(dev_priv, i) {
4104 Serge 13577
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 13578
		err_printf(m, "  Power: %s\n",
13579
			   error->pipe[i].power_domain_on ? "on" : "off");
4104 Serge 13580
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
5060 serge 13581
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
2332 Serge 13582
 
4104 Serge 13583
		err_printf(m, "Plane [%d]:\n", i);
13584
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13585
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 13586
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 13587
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13588
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 13589
		}
13590
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 13591
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 13592
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 13593
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13594
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 13595
		}
2332 Serge 13596
 
4104 Serge 13597
		err_printf(m, "Cursor [%d]:\n", i);
13598
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13599
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13600
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 13601
	}
4104 Serge 13602
 
13603
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 13604
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 13605
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 13606
		err_printf(m, "  Power: %s\n",
13607
			   error->transcoder[i].power_domain_on ? "on" : "off");
4104 Serge 13608
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13609
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13610
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13611
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13612
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13613
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13614
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13615
	}
2327 Serge 13616
}
3031 serge 13617
#endif
5354 serge 13618
 
13619
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13620
{
13621
	struct intel_crtc *crtc;
13622
 
13623
	for_each_intel_crtc(dev, crtc) {
13624
		struct intel_unpin_work *work;
13625
 
13626
		spin_lock_irq(&dev->event_lock);
13627
 
13628
		work = crtc->unpin_work;
13629
 
13630
		if (work && work->event &&
13631
		    work->event->base.file_priv == file) {
13632
			kfree(work->event);
13633
			work->event = NULL;
13634
		}
13635
 
13636
		spin_unlock_irq(&dev->event_lock);
13637
	}
13638
}