Subversion Repositories Kolibri OS

Rev

Rev 5097 | Rev 5367 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
5097 serge 27
#include 
2327 Serge 28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
5354 serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
3031 serge 40
#include 
41
#include 
5060 serge 42
#include 
43
#include 
44
#include 
2327 Serge 45
 
5060 serge 46
/* Primary plane formats supported by all gen */
47
#define COMMON_PRIMARY_FORMATS \
48
	DRM_FORMAT_C8, \
49
	DRM_FORMAT_RGB565, \
50
	DRM_FORMAT_XRGB8888, \
51
	DRM_FORMAT_ARGB8888
52
 
53
/* Primary plane formats for gen <= 3 */
54
static const uint32_t intel_primary_formats_gen2[] = {
55
	COMMON_PRIMARY_FORMATS,
56
	DRM_FORMAT_XRGB1555,
57
	DRM_FORMAT_ARGB1555,
58
};
59
 
60
/* Primary plane formats for gen >= 4 */
61
static const uint32_t intel_primary_formats_gen4[] = {
62
	COMMON_PRIMARY_FORMATS, \
63
	DRM_FORMAT_XBGR8888,
64
	DRM_FORMAT_ABGR8888,
65
	DRM_FORMAT_XRGB2101010,
66
	DRM_FORMAT_ARGB2101010,
67
	DRM_FORMAT_XBGR2101010,
68
	DRM_FORMAT_ABGR2101010,
69
};
70
 
71
/* Cursor formats */
72
static const uint32_t intel_cursor_formats[] = {
73
	DRM_FORMAT_ARGB8888,
74
};
75
 
76
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 77
 
4104 Serge 78
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
79
				struct intel_crtc_config *pipe_config);
4560 Serge 80
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
4104 Serge 81
				    struct intel_crtc_config *pipe_config);
2327 Serge 82
 
4104 Serge 83
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
84
			  int x, int y, struct drm_framebuffer *old_fb);
5060 serge 85
static int intel_framebuffer_init(struct drm_device *dev,
86
				  struct intel_framebuffer *ifb,
87
				  struct drm_mode_fb_cmd2 *mode_cmd,
88
				  struct drm_i915_gem_object *obj);
89
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
90
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
91
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 92
					 struct intel_link_m_n *m_n,
93
					 struct intel_link_m_n *m2_n2);
5060 serge 94
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
95
static void haswell_set_pipeconf(struct drm_crtc *crtc);
96
static void intel_set_pipe_csc(struct drm_crtc *crtc);
5354 serge 97
static void vlv_prepare_pll(struct intel_crtc *crtc,
98
			    const struct intel_crtc_config *pipe_config);
99
static void chv_prepare_pll(struct intel_crtc *crtc,
100
			    const struct intel_crtc_config *pipe_config);
4104 Serge 101
 
5060 serge 102
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
103
{
104
	if (!connector->mst_port)
105
		return connector->encoder;
106
	else
107
		return &connector->mst_port->mst_encoders[pipe]->base;
108
}
4104 Serge 109
 
2327 Serge 110
typedef struct {
111
    int min, max;
112
} intel_range_t;
113
 
114
typedef struct {
115
    int dot_limit;
116
    int p2_slow, p2_fast;
117
} intel_p2_t;
118
 
119
typedef struct intel_limit intel_limit_t;
120
struct intel_limit {
121
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
122
    intel_p2_t      p2;
123
};
124
 
3243 Serge 125
int
126
intel_pch_rawclk(struct drm_device *dev)
127
{
128
	struct drm_i915_private *dev_priv = dev->dev_private;
129
 
130
	WARN_ON(!HAS_PCH_SPLIT(dev));
131
 
132
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
133
}
134
 
2327 Serge 135
static inline u32 /* units of 100MHz */
136
intel_fdi_link_freq(struct drm_device *dev)
137
{
138
	if (IS_GEN5(dev)) {
139
		struct drm_i915_private *dev_priv = dev->dev_private;
140
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
141
	} else
142
		return 27;
143
}
144
 
4104 Serge 145
static const intel_limit_t intel_limits_i8xx_dac = {
146
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 147
	.vco = { .min = 908000, .max = 1512000 },
148
	.n = { .min = 2, .max = 16 },
4104 Serge 149
	.m = { .min = 96, .max = 140 },
150
	.m1 = { .min = 18, .max = 26 },
151
	.m2 = { .min = 6, .max = 16 },
152
	.p = { .min = 4, .max = 128 },
153
	.p1 = { .min = 2, .max = 33 },
154
	.p2 = { .dot_limit = 165000,
155
		.p2_slow = 4, .p2_fast = 2 },
156
};
157
 
2327 Serge 158
static const intel_limit_t intel_limits_i8xx_dvo = {
159
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 160
	.vco = { .min = 908000, .max = 1512000 },
161
	.n = { .min = 2, .max = 16 },
2327 Serge 162
        .m = { .min = 96, .max = 140 },
163
        .m1 = { .min = 18, .max = 26 },
164
        .m2 = { .min = 6, .max = 16 },
165
        .p = { .min = 4, .max = 128 },
166
        .p1 = { .min = 2, .max = 33 },
167
	.p2 = { .dot_limit = 165000,
4104 Serge 168
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 169
};
170
 
171
static const intel_limit_t intel_limits_i8xx_lvds = {
172
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 173
	.vco = { .min = 908000, .max = 1512000 },
174
	.n = { .min = 2, .max = 16 },
2327 Serge 175
        .m = { .min = 96, .max = 140 },
176
        .m1 = { .min = 18, .max = 26 },
177
        .m2 = { .min = 6, .max = 16 },
178
        .p = { .min = 4, .max = 128 },
179
        .p1 = { .min = 1, .max = 6 },
180
	.p2 = { .dot_limit = 165000,
181
		.p2_slow = 14, .p2_fast = 7 },
182
};
183
 
184
static const intel_limit_t intel_limits_i9xx_sdvo = {
185
        .dot = { .min = 20000, .max = 400000 },
186
        .vco = { .min = 1400000, .max = 2800000 },
187
        .n = { .min = 1, .max = 6 },
188
        .m = { .min = 70, .max = 120 },
3480 Serge 189
	.m1 = { .min = 8, .max = 18 },
190
	.m2 = { .min = 3, .max = 7 },
2327 Serge 191
        .p = { .min = 5, .max = 80 },
192
        .p1 = { .min = 1, .max = 8 },
193
	.p2 = { .dot_limit = 200000,
194
		.p2_slow = 10, .p2_fast = 5 },
195
};
196
 
197
static const intel_limit_t intel_limits_i9xx_lvds = {
198
        .dot = { .min = 20000, .max = 400000 },
199
        .vco = { .min = 1400000, .max = 2800000 },
200
        .n = { .min = 1, .max = 6 },
201
        .m = { .min = 70, .max = 120 },
3480 Serge 202
	.m1 = { .min = 8, .max = 18 },
203
	.m2 = { .min = 3, .max = 7 },
2327 Serge 204
        .p = { .min = 7, .max = 98 },
205
        .p1 = { .min = 1, .max = 8 },
206
	.p2 = { .dot_limit = 112000,
207
		.p2_slow = 14, .p2_fast = 7 },
208
};
209
 
210
 
211
static const intel_limit_t intel_limits_g4x_sdvo = {
212
	.dot = { .min = 25000, .max = 270000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 17, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 10, .max = 30 },
219
	.p1 = { .min = 1, .max = 3},
220
	.p2 = { .dot_limit = 270000,
221
		.p2_slow = 10,
222
		.p2_fast = 10
223
	},
224
};
225
 
226
static const intel_limit_t intel_limits_g4x_hdmi = {
227
	.dot = { .min = 22000, .max = 400000 },
228
	.vco = { .min = 1750000, .max = 3500000},
229
	.n = { .min = 1, .max = 4 },
230
	.m = { .min = 104, .max = 138 },
231
	.m1 = { .min = 16, .max = 23 },
232
	.m2 = { .min = 5, .max = 11 },
233
	.p = { .min = 5, .max = 80 },
234
	.p1 = { .min = 1, .max = 8},
235
	.p2 = { .dot_limit = 165000,
236
		.p2_slow = 10, .p2_fast = 5 },
237
};
238
 
239
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
240
	.dot = { .min = 20000, .max = 115000 },
241
	.vco = { .min = 1750000, .max = 3500000 },
242
	.n = { .min = 1, .max = 3 },
243
	.m = { .min = 104, .max = 138 },
244
	.m1 = { .min = 17, .max = 23 },
245
	.m2 = { .min = 5, .max = 11 },
246
	.p = { .min = 28, .max = 112 },
247
	.p1 = { .min = 2, .max = 8 },
248
	.p2 = { .dot_limit = 0,
249
		.p2_slow = 14, .p2_fast = 14
250
	},
251
};
252
 
253
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
254
	.dot = { .min = 80000, .max = 224000 },
255
	.vco = { .min = 1750000, .max = 3500000 },
256
	.n = { .min = 1, .max = 3 },
257
	.m = { .min = 104, .max = 138 },
258
	.m1 = { .min = 17, .max = 23 },
259
	.m2 = { .min = 5, .max = 11 },
260
	.p = { .min = 14, .max = 42 },
261
	.p1 = { .min = 2, .max = 6 },
262
	.p2 = { .dot_limit = 0,
263
		.p2_slow = 7, .p2_fast = 7
264
	},
265
};
266
 
267
static const intel_limit_t intel_limits_pineview_sdvo = {
268
        .dot = { .min = 20000, .max = 400000},
269
        .vco = { .min = 1700000, .max = 3500000 },
270
	/* Pineview's Ncounter is a ring counter */
271
        .n = { .min = 3, .max = 6 },
272
        .m = { .min = 2, .max = 256 },
273
	/* Pineview only has one combined m divider, which we treat as m2. */
274
        .m1 = { .min = 0, .max = 0 },
275
        .m2 = { .min = 0, .max = 254 },
276
        .p = { .min = 5, .max = 80 },
277
        .p1 = { .min = 1, .max = 8 },
278
	.p2 = { .dot_limit = 200000,
279
		.p2_slow = 10, .p2_fast = 5 },
280
};
281
 
282
static const intel_limit_t intel_limits_pineview_lvds = {
283
        .dot = { .min = 20000, .max = 400000 },
284
        .vco = { .min = 1700000, .max = 3500000 },
285
        .n = { .min = 3, .max = 6 },
286
        .m = { .min = 2, .max = 256 },
287
        .m1 = { .min = 0, .max = 0 },
288
        .m2 = { .min = 0, .max = 254 },
289
        .p = { .min = 7, .max = 112 },
290
        .p1 = { .min = 1, .max = 8 },
291
	.p2 = { .dot_limit = 112000,
292
		.p2_slow = 14, .p2_fast = 14 },
293
};
294
 
295
/* Ironlake / Sandybridge
296
 *
297
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
298
 * the range value for them is (actual_value - 2).
299
 */
300
static const intel_limit_t intel_limits_ironlake_dac = {
301
	.dot = { .min = 25000, .max = 350000 },
302
	.vco = { .min = 1760000, .max = 3510000 },
303
	.n = { .min = 1, .max = 5 },
304
	.m = { .min = 79, .max = 127 },
305
	.m1 = { .min = 12, .max = 22 },
306
	.m2 = { .min = 5, .max = 9 },
307
	.p = { .min = 5, .max = 80 },
308
	.p1 = { .min = 1, .max = 8 },
309
	.p2 = { .dot_limit = 225000,
310
		.p2_slow = 10, .p2_fast = 5 },
311
};
312
 
313
static const intel_limit_t intel_limits_ironlake_single_lvds = {
314
	.dot = { .min = 25000, .max = 350000 },
315
	.vco = { .min = 1760000, .max = 3510000 },
316
	.n = { .min = 1, .max = 3 },
317
	.m = { .min = 79, .max = 118 },
318
	.m1 = { .min = 12, .max = 22 },
319
	.m2 = { .min = 5, .max = 9 },
320
	.p = { .min = 28, .max = 112 },
321
	.p1 = { .min = 2, .max = 8 },
322
	.p2 = { .dot_limit = 225000,
323
		.p2_slow = 14, .p2_fast = 14 },
324
};
325
 
326
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
327
	.dot = { .min = 25000, .max = 350000 },
328
	.vco = { .min = 1760000, .max = 3510000 },
329
	.n = { .min = 1, .max = 3 },
330
	.m = { .min = 79, .max = 127 },
331
	.m1 = { .min = 12, .max = 22 },
332
	.m2 = { .min = 5, .max = 9 },
333
	.p = { .min = 14, .max = 56 },
334
	.p1 = { .min = 2, .max = 8 },
335
	.p2 = { .dot_limit = 225000,
336
		.p2_slow = 7, .p2_fast = 7 },
337
};
338
 
339
/* LVDS 100mhz refclk limits. */
340
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
341
	.dot = { .min = 25000, .max = 350000 },
342
	.vco = { .min = 1760000, .max = 3510000 },
343
	.n = { .min = 1, .max = 2 },
344
	.m = { .min = 79, .max = 126 },
345
	.m1 = { .min = 12, .max = 22 },
346
	.m2 = { .min = 5, .max = 9 },
347
	.p = { .min = 28, .max = 112 },
2342 Serge 348
	.p1 = { .min = 2, .max = 8 },
2327 Serge 349
	.p2 = { .dot_limit = 225000,
350
		.p2_slow = 14, .p2_fast = 14 },
351
};
352
 
353
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
354
	.dot = { .min = 25000, .max = 350000 },
355
	.vco = { .min = 1760000, .max = 3510000 },
356
	.n = { .min = 1, .max = 3 },
357
	.m = { .min = 79, .max = 126 },
358
	.m1 = { .min = 12, .max = 22 },
359
	.m2 = { .min = 5, .max = 9 },
360
	.p = { .min = 14, .max = 42 },
2342 Serge 361
	.p1 = { .min = 2, .max = 6 },
2327 Serge 362
	.p2 = { .dot_limit = 225000,
363
		.p2_slow = 7, .p2_fast = 7 },
364
};
365
 
4560 Serge 366
static const intel_limit_t intel_limits_vlv = {
367
	 /*
368
	  * These are the data rate limits (measured in fast clocks)
369
	  * since those are the strictest limits we have. The fast
370
	  * clock and actual rate limits are more relaxed, so checking
371
	  * them would make no difference.
372
	  */
373
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 374
	.vco = { .min = 4000000, .max = 6000000 },
375
	.n = { .min = 1, .max = 7 },
376
	.m1 = { .min = 2, .max = 3 },
377
	.m2 = { .min = 11, .max = 156 },
378
	.p1 = { .min = 2, .max = 3 },
4560 Serge 379
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 380
};
381
 
5060 serge 382
static const intel_limit_t intel_limits_chv = {
383
	/*
384
	 * These are the data rate limits (measured in fast clocks)
385
	 * since those are the strictest limits we have.  The fast
386
	 * clock and actual rate limits are more relaxed, so checking
387
	 * them would make no difference.
388
	 */
389
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
390
	.vco = { .min = 4860000, .max = 6700000 },
391
	.n = { .min = 1, .max = 1 },
392
	.m1 = { .min = 2, .max = 2 },
393
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
394
	.p1 = { .min = 2, .max = 4 },
395
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
396
};
397
 
4560 Serge 398
static void vlv_clock(int refclk, intel_clock_t *clock)
399
{
400
	clock->m = clock->m1 * clock->m2;
401
	clock->p = clock->p1 * clock->p2;
402
	if (WARN_ON(clock->n == 0 || clock->p == 0))
403
		return;
404
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
405
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
406
}
3031 serge 407
 
4560 Serge 408
/**
409
 * Returns whether any output on the specified pipe is of the specified type
410
 */
5354 serge 411
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
4560 Serge 412
{
5354 serge 413
	struct drm_device *dev = crtc->base.dev;
4560 Serge 414
	struct intel_encoder *encoder;
415
 
5354 serge 416
	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4560 Serge 417
		if (encoder->type == type)
418
			return true;
419
 
420
	return false;
421
}
422
 
5354 serge 423
/**
424
 * Returns whether any output on the specified pipe will have the specified
425
 * type after a staged modeset is complete, i.e., the same as
426
 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
427
 * encoder->crtc.
428
 */
429
static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
430
{
431
	struct drm_device *dev = crtc->base.dev;
432
	struct intel_encoder *encoder;
433
 
434
	for_each_intel_encoder(dev, encoder)
435
		if (encoder->new_crtc == crtc && encoder->type == type)
436
			return true;
437
 
438
	return false;
439
}
440
 
441
static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
2327 Serge 442
						int refclk)
443
{
5354 serge 444
	struct drm_device *dev = crtc->base.dev;
2327 Serge 445
	const intel_limit_t *limit;
446
 
5354 serge 447
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 448
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 449
			if (refclk == 100000)
450
				limit = &intel_limits_ironlake_dual_lvds_100m;
451
			else
452
				limit = &intel_limits_ironlake_dual_lvds;
453
		} else {
454
			if (refclk == 100000)
455
				limit = &intel_limits_ironlake_single_lvds_100m;
456
			else
457
				limit = &intel_limits_ironlake_single_lvds;
458
		}
4104 Serge 459
	} else
2327 Serge 460
		limit = &intel_limits_ironlake_dac;
461
 
462
	return limit;
463
}
464
 
5354 serge 465
static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
2327 Serge 466
{
5354 serge 467
	struct drm_device *dev = crtc->base.dev;
2327 Serge 468
	const intel_limit_t *limit;
469
 
5354 serge 470
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 471
		if (intel_is_dual_link_lvds(dev))
2327 Serge 472
			limit = &intel_limits_g4x_dual_channel_lvds;
473
		else
474
			limit = &intel_limits_g4x_single_channel_lvds;
5354 serge 475
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
476
		   intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
2327 Serge 477
		limit = &intel_limits_g4x_hdmi;
5354 serge 478
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
2327 Serge 479
		limit = &intel_limits_g4x_sdvo;
480
	} else /* The option is for other outputs */
481
		limit = &intel_limits_i9xx_sdvo;
482
 
483
	return limit;
484
}
485
 
5354 serge 486
static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
2327 Serge 487
{
5354 serge 488
	struct drm_device *dev = crtc->base.dev;
2327 Serge 489
	const intel_limit_t *limit;
490
 
491
	if (HAS_PCH_SPLIT(dev))
492
		limit = intel_ironlake_limit(crtc, refclk);
493
	else if (IS_G4X(dev)) {
494
		limit = intel_g4x_limit(crtc);
495
	} else if (IS_PINEVIEW(dev)) {
5354 serge 496
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
2327 Serge 497
			limit = &intel_limits_pineview_lvds;
498
		else
499
			limit = &intel_limits_pineview_sdvo;
5060 serge 500
	} else if (IS_CHERRYVIEW(dev)) {
501
		limit = &intel_limits_chv;
3031 serge 502
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 503
		limit = &intel_limits_vlv;
2327 Serge 504
	} else if (!IS_GEN2(dev)) {
5354 serge 505
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
2327 Serge 506
			limit = &intel_limits_i9xx_lvds;
507
		else
508
			limit = &intel_limits_i9xx_sdvo;
509
	} else {
5354 serge 510
		if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
2327 Serge 511
			limit = &intel_limits_i8xx_lvds;
5354 serge 512
		else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
4104 Serge 513
			limit = &intel_limits_i8xx_dvo;
2327 Serge 514
		else
4104 Serge 515
			limit = &intel_limits_i8xx_dac;
2327 Serge 516
	}
517
	return limit;
518
}
519
 
520
/* m1 is reserved as 0 in Pineview, n is a ring counter */
521
static void pineview_clock(int refclk, intel_clock_t *clock)
522
{
523
	clock->m = clock->m2 + 2;
524
	clock->p = clock->p1 * clock->p2;
4560 Serge 525
	if (WARN_ON(clock->n == 0 || clock->p == 0))
526
		return;
527
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
528
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 529
}
530
 
4104 Serge 531
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 532
{
4104 Serge 533
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
534
}
535
 
536
static void i9xx_clock(int refclk, intel_clock_t *clock)
537
{
538
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 539
	clock->p = clock->p1 * clock->p2;
4560 Serge 540
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
541
		return;
542
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
543
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 544
}
545
 
5060 serge 546
static void chv_clock(int refclk, intel_clock_t *clock)
547
{
548
	clock->m = clock->m1 * clock->m2;
549
	clock->p = clock->p1 * clock->p2;
550
	if (WARN_ON(clock->n == 0 || clock->p == 0))
551
		return;
552
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
553
			clock->n << 22);
554
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
555
}
556
 
2327 Serge 557
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
558
/**
559
 * Returns whether the given set of divisors are valid for a given refclk with
560
 * the given connectors.
561
 */
562
 
563
static bool intel_PLL_is_valid(struct drm_device *dev,
564
			       const intel_limit_t *limit,
565
			       const intel_clock_t *clock)
566
{
4560 Serge 567
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
568
		INTELPllInvalid("n out of range\n");
2327 Serge 569
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 570
		INTELPllInvalid("p1 out of range\n");
2327 Serge 571
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 572
		INTELPllInvalid("m2 out of range\n");
2327 Serge 573
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 574
		INTELPllInvalid("m1 out of range\n");
4560 Serge 575
 
576
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
577
		if (clock->m1 <= clock->m2)
2342 Serge 578
		INTELPllInvalid("m1 <= m2\n");
4560 Serge 579
 
580
	if (!IS_VALLEYVIEW(dev)) {
581
		if (clock->p < limit->p.min || limit->p.max < clock->p)
582
			INTELPllInvalid("p out of range\n");
2327 Serge 583
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 584
		INTELPllInvalid("m out of range\n");
4560 Serge 585
	}
586
 
2327 Serge 587
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 588
		INTELPllInvalid("vco out of range\n");
2327 Serge 589
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
590
	 * connector, etc., rather than just a single range.
591
	 */
592
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 593
		INTELPllInvalid("dot out of range\n");
2327 Serge 594
 
595
	return true;
596
}
597
 
598
static bool
5354 serge 599
i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
3031 serge 600
		    int target, int refclk, intel_clock_t *match_clock,
601
		    intel_clock_t *best_clock)
2327 Serge 602
{
5354 serge 603
	struct drm_device *dev = crtc->base.dev;
2327 Serge 604
	intel_clock_t clock;
605
	int err = target;
606
 
5354 serge 607
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
2327 Serge 608
		/*
3480 Serge 609
		 * For LVDS just rely on its current settings for dual-channel.
610
		 * We haven't figured out how to reliably set up different
611
		 * single/dual channel state, if we even can.
2327 Serge 612
		 */
3480 Serge 613
		if (intel_is_dual_link_lvds(dev))
2327 Serge 614
			clock.p2 = limit->p2.p2_fast;
615
		else
616
			clock.p2 = limit->p2.p2_slow;
617
	} else {
618
		if (target < limit->p2.dot_limit)
619
			clock.p2 = limit->p2.p2_slow;
620
		else
621
			clock.p2 = limit->p2.p2_fast;
622
	}
623
 
2342 Serge 624
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 625
 
626
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
627
	     clock.m1++) {
628
		for (clock.m2 = limit->m2.min;
629
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 630
			if (clock.m2 >= clock.m1)
2327 Serge 631
				break;
632
			for (clock.n = limit->n.min;
633
			     clock.n <= limit->n.max; clock.n++) {
634
				for (clock.p1 = limit->p1.min;
635
					clock.p1 <= limit->p1.max; clock.p1++) {
636
					int this_err;
637
 
4104 Serge 638
					i9xx_clock(refclk, &clock);
2327 Serge 639
					if (!intel_PLL_is_valid(dev, limit,
640
								&clock))
641
						continue;
3031 serge 642
					if (match_clock &&
643
					    clock.p != match_clock->p)
644
						continue;
2327 Serge 645
 
646
					this_err = abs(clock.dot - target);
647
					if (this_err < err) {
648
						*best_clock = clock;
649
						err = this_err;
650
					}
651
				}
652
			}
653
		}
654
	}
655
 
656
	return (err != target);
657
}
658
 
659
static bool
5354 serge 660
pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
4104 Serge 661
		   int target, int refclk, intel_clock_t *match_clock,
662
		   intel_clock_t *best_clock)
663
{
5354 serge 664
	struct drm_device *dev = crtc->base.dev;
4104 Serge 665
	intel_clock_t clock;
666
	int err = target;
667
 
5354 serge 668
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
4104 Serge 669
		/*
670
		 * For LVDS just rely on its current settings for dual-channel.
671
		 * We haven't figured out how to reliably set up different
672
		 * single/dual channel state, if we even can.
673
		 */
674
		if (intel_is_dual_link_lvds(dev))
675
			clock.p2 = limit->p2.p2_fast;
676
		else
677
			clock.p2 = limit->p2.p2_slow;
678
	} else {
679
		if (target < limit->p2.dot_limit)
680
			clock.p2 = limit->p2.p2_slow;
681
		else
682
			clock.p2 = limit->p2.p2_fast;
683
	}
684
 
685
	memset(best_clock, 0, sizeof(*best_clock));
686
 
687
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
688
	     clock.m1++) {
689
		for (clock.m2 = limit->m2.min;
690
		     clock.m2 <= limit->m2.max; clock.m2++) {
691
			for (clock.n = limit->n.min;
692
			     clock.n <= limit->n.max; clock.n++) {
693
				for (clock.p1 = limit->p1.min;
694
					clock.p1 <= limit->p1.max; clock.p1++) {
695
					int this_err;
696
 
697
					pineview_clock(refclk, &clock);
698
					if (!intel_PLL_is_valid(dev, limit,
699
								&clock))
700
						continue;
701
					if (match_clock &&
702
					    clock.p != match_clock->p)
703
						continue;
704
 
705
					this_err = abs(clock.dot - target);
706
					if (this_err < err) {
707
						*best_clock = clock;
708
						err = this_err;
709
					}
710
				}
711
			}
712
		}
713
	}
714
 
715
	return (err != target);
716
}
717
 
718
static bool
5354 serge 719
g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
3031 serge 720
			int target, int refclk, intel_clock_t *match_clock,
721
			intel_clock_t *best_clock)
2327 Serge 722
{
5354 serge 723
	struct drm_device *dev = crtc->base.dev;
2327 Serge 724
	intel_clock_t clock;
725
	int max_n;
726
	bool found;
727
	/* approximately equals target * 0.00585 */
728
	int err_most = (target >> 8) + (target >> 9);
729
	found = false;
730
 
5354 serge 731
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 732
		if (intel_is_dual_link_lvds(dev))
2327 Serge 733
			clock.p2 = limit->p2.p2_fast;
734
		else
735
			clock.p2 = limit->p2.p2_slow;
736
	} else {
737
		if (target < limit->p2.dot_limit)
738
			clock.p2 = limit->p2.p2_slow;
739
		else
740
			clock.p2 = limit->p2.p2_fast;
741
	}
742
 
743
	memset(best_clock, 0, sizeof(*best_clock));
744
	max_n = limit->n.max;
745
	/* based on hardware requirement, prefer smaller n to precision */
746
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
747
		/* based on hardware requirement, prefere larger m1,m2 */
748
		for (clock.m1 = limit->m1.max;
749
		     clock.m1 >= limit->m1.min; clock.m1--) {
750
			for (clock.m2 = limit->m2.max;
751
			     clock.m2 >= limit->m2.min; clock.m2--) {
752
				for (clock.p1 = limit->p1.max;
753
				     clock.p1 >= limit->p1.min; clock.p1--) {
754
					int this_err;
755
 
4104 Serge 756
					i9xx_clock(refclk, &clock);
2327 Serge 757
					if (!intel_PLL_is_valid(dev, limit,
758
								&clock))
759
						continue;
760
 
761
					this_err = abs(clock.dot - target);
762
					if (this_err < err_most) {
763
						*best_clock = clock;
764
						err_most = this_err;
765
						max_n = clock.n;
766
						found = true;
767
					}
768
				}
769
			}
770
		}
771
	}
772
	return found;
773
}
774
 
775
static bool
5354 serge 776
vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
3031 serge 777
			int target, int refclk, intel_clock_t *match_clock,
778
			intel_clock_t *best_clock)
779
{
5354 serge 780
	struct drm_device *dev = crtc->base.dev;
4560 Serge 781
	intel_clock_t clock;
782
	unsigned int bestppm = 1000000;
783
	/* min update 19.2 MHz */
784
	int max_n = min(limit->n.max, refclk / 19200);
785
	bool found = false;
2327 Serge 786
 
4560 Serge 787
	target *= 5; /* fast clock */
3031 serge 788
 
4560 Serge 789
	memset(best_clock, 0, sizeof(*best_clock));
790
 
3031 serge 791
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 792
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
793
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
794
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
795
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
796
				clock.p = clock.p1 * clock.p2;
3031 serge 797
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 798
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
799
					unsigned int ppm, diff;
800
 
801
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
802
								     refclk * clock.m1);
803
 
804
					vlv_clock(refclk, &clock);
805
 
806
					if (!intel_PLL_is_valid(dev, limit,
807
								&clock))
808
						continue;
809
 
810
					diff = abs(clock.dot - target);
811
					ppm = div_u64(1000000ULL * diff, target);
812
 
813
					if (ppm < 100 && clock.p > best_clock->p) {
3031 serge 814
							bestppm = 0;
4560 Serge 815
						*best_clock = clock;
816
						found = true;
3031 serge 817
						}
4560 Serge 818
 
819
					if (bestppm >= 10 && ppm < bestppm - 10) {
820
						bestppm = ppm;
821
						*best_clock = clock;
822
						found = true;
3031 serge 823
						}
824
						}
825
					}
826
				}
827
			}
828
 
4560 Serge 829
	return found;
3031 serge 830
}
831
 
5060 serge 832
static bool
5354 serge 833
chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
5060 serge 834
		   int target, int refclk, intel_clock_t *match_clock,
835
		   intel_clock_t *best_clock)
836
{
5354 serge 837
	struct drm_device *dev = crtc->base.dev;
5060 serge 838
	intel_clock_t clock;
839
	uint64_t m2;
840
	int found = false;
841
 
842
	memset(best_clock, 0, sizeof(*best_clock));
843
 
844
	/*
845
	 * Based on hardware doc, the n always set to 1, and m1 always
846
	 * set to 2.  If requires to support 200Mhz refclk, we need to
847
	 * revisit this because n may not 1 anymore.
848
	 */
849
	clock.n = 1, clock.m1 = 2;
850
	target *= 5;	/* fast clock */
851
 
852
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
853
		for (clock.p2 = limit->p2.p2_fast;
854
				clock.p2 >= limit->p2.p2_slow;
855
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
856
 
857
			clock.p = clock.p1 * clock.p2;
858
 
859
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
860
					clock.n) << 22, refclk * clock.m1);
861
 
862
			if (m2 > INT_MAX/clock.m1)
863
				continue;
864
 
865
			clock.m2 = m2;
866
 
867
			chv_clock(refclk, &clock);
868
 
869
			if (!intel_PLL_is_valid(dev, limit, &clock))
870
				continue;
871
 
872
			/* based on hardware requirement, prefer bigger p
873
			 */
874
			if (clock.p > best_clock->p) {
875
				*best_clock = clock;
876
				found = true;
877
			}
878
		}
879
	}
880
 
881
	return found;
882
}
883
 
4560 Serge 884
bool intel_crtc_active(struct drm_crtc *crtc)
885
{
886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
887
 
888
	/* Be paranoid as we can arrive here with only partial
889
	 * state retrieved from the hardware during setup.
890
	 *
891
	 * We can ditch the adjusted_mode.crtc_clock check as soon
892
	 * as Haswell has gained clock readout/fastboot support.
893
	 *
5060 serge 894
	 * We can ditch the crtc->primary->fb check as soon as we can
4560 Serge 895
	 * properly reconstruct framebuffers.
896
	 */
5060 serge 897
	return intel_crtc->active && crtc->primary->fb &&
4560 Serge 898
		intel_crtc->config.adjusted_mode.crtc_clock;
899
}
900
 
3243 Serge 901
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
902
					     enum pipe pipe)
903
{
904
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
905
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
906
 
3746 Serge 907
	return intel_crtc->config.cpu_transcoder;
3243 Serge 908
}
909
 
4560 Serge 910
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
911
{
912
	struct drm_i915_private *dev_priv = dev->dev_private;
913
	u32 reg = PIPEDSL(pipe);
914
	u32 line1, line2;
915
	u32 line_mask;
916
 
917
	if (IS_GEN2(dev))
918
		line_mask = DSL_LINEMASK_GEN2;
919
	else
920
		line_mask = DSL_LINEMASK_GEN3;
921
 
922
	line1 = I915_READ(reg) & line_mask;
923
	mdelay(5);
924
	line2 = I915_READ(reg) & line_mask;
925
 
926
	return line1 == line2;
927
}
928
 
2327 Serge 929
/*
930
 * intel_wait_for_pipe_off - wait for pipe to turn off
5354 serge 931
 * @crtc: crtc whose pipe to wait for
2327 Serge 932
 *
933
 * After disabling a pipe, we can't wait for vblank in the usual way,
934
 * spinning on the vblank interrupt status bit, since we won't actually
935
 * see an interrupt when the pipe is disabled.
936
 *
937
 * On Gen4 and above:
938
 *   wait for the pipe register state bit to turn off
939
 *
940
 * Otherwise:
941
 *   wait for the display line value to settle (it usually
942
 *   ends up stopping at the start of the next frame).
943
 *
944
 */
5354 serge 945
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
2327 Serge 946
{
5354 serge 947
	struct drm_device *dev = crtc->base.dev;
2327 Serge 948
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 949
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
950
	enum pipe pipe = crtc->pipe;
2327 Serge 951
 
952
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 953
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 954
 
955
		/* Wait for the Pipe State to go off */
956
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
957
			     100))
3031 serge 958
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 959
	} else {
960
		/* Wait for the display line to settle */
4560 Serge 961
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 962
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 963
	}
964
}
965
 
3480 Serge 966
/*
967
 * ibx_digital_port_connected - is the specified port connected?
968
 * @dev_priv: i915 private structure
969
 * @port: the port to test
970
 *
971
 * Returns true if @port is connected, false otherwise.
972
 */
973
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
974
				struct intel_digital_port *port)
975
{
976
	u32 bit;
977
 
978
	if (HAS_PCH_IBX(dev_priv->dev)) {
5060 serge 979
		switch (port->port) {
3480 Serge 980
		case PORT_B:
981
			bit = SDE_PORTB_HOTPLUG;
982
			break;
983
		case PORT_C:
984
			bit = SDE_PORTC_HOTPLUG;
985
			break;
986
		case PORT_D:
987
			bit = SDE_PORTD_HOTPLUG;
988
			break;
989
		default:
990
			return true;
991
		}
992
	} else {
5060 serge 993
		switch (port->port) {
3480 Serge 994
		case PORT_B:
995
			bit = SDE_PORTB_HOTPLUG_CPT;
996
			break;
997
		case PORT_C:
998
			bit = SDE_PORTC_HOTPLUG_CPT;
999
			break;
1000
		case PORT_D:
1001
			bit = SDE_PORTD_HOTPLUG_CPT;
1002
			break;
1003
		default:
1004
			return true;
1005
		}
1006
	}
1007
 
1008
	return I915_READ(SDEISR) & bit;
1009
}
1010
 
2327 Serge 1011
static const char *state_string(bool enabled)
1012
{
1013
	return enabled ? "on" : "off";
1014
}
1015
 
1016
/* Only for pre-ILK configs */
4104 Serge 1017
void assert_pll(struct drm_i915_private *dev_priv,
2327 Serge 1018
		       enum pipe pipe, bool state)
1019
{
1020
	int reg;
1021
	u32 val;
1022
	bool cur_state;
1023
 
1024
	reg = DPLL(pipe);
1025
	val = I915_READ(reg);
1026
	cur_state = !!(val & DPLL_VCO_ENABLE);
1027
	WARN(cur_state != state,
1028
	     "PLL state assertion failure (expected %s, current %s)\n",
1029
	     state_string(state), state_string(cur_state));
1030
}
1031
 
4560 Serge 1032
/* XXX: the dsi pll is shared between MIPI DSI ports */
1033
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1034
{
1035
	u32 val;
1036
	bool cur_state;
1037
 
1038
	mutex_lock(&dev_priv->dpio_lock);
1039
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1040
	mutex_unlock(&dev_priv->dpio_lock);
1041
 
1042
	cur_state = val & DSI_PLL_VCO_EN;
1043
	WARN(cur_state != state,
1044
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1045
	     state_string(state), state_string(cur_state));
1046
}
1047
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1048
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1049
 
4104 Serge 1050
struct intel_shared_dpll *
1051
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1052
{
1053
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1054
 
1055
	if (crtc->config.shared_dpll < 0)
1056
		return NULL;
1057
 
1058
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1059
}
1060
 
2327 Serge 1061
/* For ILK+ */
4104 Serge 1062
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1063
			       struct intel_shared_dpll *pll,
3031 serge 1064
			   bool state)
2327 Serge 1065
{
1066
	bool cur_state;
4104 Serge 1067
	struct intel_dpll_hw_state hw_state;
2327 Serge 1068
 
3031 serge 1069
	if (WARN (!pll,
4104 Serge 1070
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 1071
		return;
2342 Serge 1072
 
4104 Serge 1073
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
3031 serge 1074
	WARN(cur_state != state,
4104 Serge 1075
	     "%s assertion failure (expected %s, current %s)\n",
1076
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 1077
}
1078
 
1079
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1080
			  enum pipe pipe, bool state)
1081
{
1082
	int reg;
1083
	u32 val;
1084
	bool cur_state;
3243 Serge 1085
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1086
								      pipe);
2327 Serge 1087
 
3480 Serge 1088
	if (HAS_DDI(dev_priv->dev)) {
1089
		/* DDI does not have a specific FDI_TX register */
3243 Serge 1090
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
3031 serge 1091
		val = I915_READ(reg);
3243 Serge 1092
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1093
	} else {
2327 Serge 1094
	reg = FDI_TX_CTL(pipe);
1095
	val = I915_READ(reg);
1096
	cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1097
	}
2327 Serge 1098
	WARN(cur_state != state,
1099
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1100
	     state_string(state), state_string(cur_state));
1101
}
1102
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1103
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1104
 
1105
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1106
			  enum pipe pipe, bool state)
1107
{
1108
	int reg;
1109
	u32 val;
1110
	bool cur_state;
1111
 
1112
	reg = FDI_RX_CTL(pipe);
1113
	val = I915_READ(reg);
1114
	cur_state = !!(val & FDI_RX_ENABLE);
1115
	WARN(cur_state != state,
1116
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1117
	     state_string(state), state_string(cur_state));
1118
}
1119
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1120
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1121
 
1122
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1123
				      enum pipe pipe)
1124
{
1125
	int reg;
1126
	u32 val;
1127
 
1128
	/* ILK FDI PLL is always enabled */
5060 serge 1129
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
2327 Serge 1130
		return;
1131
 
3031 serge 1132
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1133
	if (HAS_DDI(dev_priv->dev))
3031 serge 1134
		return;
1135
 
2327 Serge 1136
	reg = FDI_TX_CTL(pipe);
1137
	val = I915_READ(reg);
1138
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1139
}
1140
 
4104 Serge 1141
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1142
		       enum pipe pipe, bool state)
2327 Serge 1143
{
1144
	int reg;
1145
	u32 val;
4104 Serge 1146
	bool cur_state;
2327 Serge 1147
 
1148
	reg = FDI_RX_CTL(pipe);
1149
	val = I915_READ(reg);
4104 Serge 1150
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1151
	WARN(cur_state != state,
1152
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1153
	     state_string(state), state_string(cur_state));
2327 Serge 1154
}
1155
 
5354 serge 1156
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
2327 Serge 1157
				  enum pipe pipe)
1158
{
5354 serge 1159
	struct drm_device *dev = dev_priv->dev;
1160
	int pp_reg;
2327 Serge 1161
	u32 val;
1162
	enum pipe panel_pipe = PIPE_A;
1163
	bool locked = true;
1164
 
5354 serge 1165
	if (WARN_ON(HAS_DDI(dev)))
1166
		return;
1167
 
1168
	if (HAS_PCH_SPLIT(dev)) {
1169
		u32 port_sel;
1170
 
2327 Serge 1171
		pp_reg = PCH_PP_CONTROL;
5354 serge 1172
		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1173
 
1174
		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1175
		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1176
			panel_pipe = PIPE_B;
1177
		/* XXX: else fix for eDP */
1178
	} else if (IS_VALLEYVIEW(dev)) {
1179
		/* presumably write lock depends on pipe, not port select */
1180
		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1181
		panel_pipe = pipe;
2327 Serge 1182
	} else {
1183
		pp_reg = PP_CONTROL;
5354 serge 1184
		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1185
			panel_pipe = PIPE_B;
2327 Serge 1186
	}
1187
 
1188
	val = I915_READ(pp_reg);
1189
	if (!(val & PANEL_POWER_ON) ||
5354 serge 1190
	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
2327 Serge 1191
		locked = false;
1192
 
1193
	WARN(panel_pipe == pipe && locked,
1194
	     "panel assertion failure, pipe %c regs locked\n",
1195
	     pipe_name(pipe));
1196
}
1197
 
4560 Serge 1198
static void assert_cursor(struct drm_i915_private *dev_priv,
1199
			  enum pipe pipe, bool state)
1200
{
1201
	struct drm_device *dev = dev_priv->dev;
1202
	bool cur_state;
1203
 
5060 serge 1204
	if (IS_845G(dev) || IS_I865G(dev))
4560 Serge 1205
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1206
	else
1207
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1208
 
1209
	WARN(cur_state != state,
1210
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1211
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1212
}
1213
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1214
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1215
 
2342 Serge 1216
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 1217
			enum pipe pipe, bool state)
1218
{
1219
	int reg;
1220
	u32 val;
1221
	bool cur_state;
3243 Serge 1222
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1223
								      pipe);
2327 Serge 1224
 
5354 serge 1225
	/* if we need the pipe quirk it must be always on */
1226
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1227
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3031 serge 1228
		state = true;
1229
 
5354 serge 1230
	if (!intel_display_power_is_enabled(dev_priv,
4104 Serge 1231
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
3480 Serge 1232
		cur_state = false;
1233
	} else {
3243 Serge 1234
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1235
	val = I915_READ(reg);
1236
	cur_state = !!(val & PIPECONF_ENABLE);
3480 Serge 1237
	}
1238
 
2327 Serge 1239
	WARN(cur_state != state,
1240
	     "pipe %c assertion failure (expected %s, current %s)\n",
1241
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1242
}
1243
 
3031 serge 1244
static void assert_plane(struct drm_i915_private *dev_priv,
1245
			 enum plane plane, bool state)
2327 Serge 1246
{
1247
	int reg;
1248
	u32 val;
3031 serge 1249
	bool cur_state;
2327 Serge 1250
 
1251
	reg = DSPCNTR(plane);
1252
	val = I915_READ(reg);
3031 serge 1253
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1254
	WARN(cur_state != state,
1255
	     "plane %c assertion failure (expected %s, current %s)\n",
1256
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1257
}
1258
 
3031 serge 1259
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1260
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1261
 
2327 Serge 1262
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1263
				   enum pipe pipe)
1264
{
4104 Serge 1265
	struct drm_device *dev = dev_priv->dev;
2327 Serge 1266
	int reg, i;
1267
	u32 val;
1268
	int cur_pipe;
1269
 
4104 Serge 1270
	/* Primary planes are fixed to pipes on gen4+ */
1271
	if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 1272
		reg = DSPCNTR(pipe);
1273
		val = I915_READ(reg);
5060 serge 1274
		WARN(val & DISPLAY_PLANE_ENABLE,
3031 serge 1275
		     "plane %c assertion failure, should be disabled but not\n",
1276
		     plane_name(pipe));
2327 Serge 1277
		return;
3031 serge 1278
	}
2327 Serge 1279
 
1280
	/* Need to check both planes against the pipe */
5354 serge 1281
	for_each_pipe(dev_priv, i) {
2327 Serge 1282
		reg = DSPCNTR(i);
1283
		val = I915_READ(reg);
1284
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1285
			DISPPLANE_SEL_PIPE_SHIFT;
1286
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1287
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1288
		     plane_name(i), pipe_name(pipe));
1289
	}
1290
}
1291
 
3746 Serge 1292
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1293
				    enum pipe pipe)
1294
{
4104 Serge 1295
	struct drm_device *dev = dev_priv->dev;
5060 serge 1296
	int reg, sprite;
3746 Serge 1297
	u32 val;
1298
 
5354 serge 1299
	if (INTEL_INFO(dev)->gen >= 9) {
5060 serge 1300
		for_each_sprite(pipe, sprite) {
5354 serge 1301
			val = I915_READ(PLANE_CTL(pipe, sprite));
1302
			WARN(val & PLANE_CTL_ENABLE,
1303
			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1304
			     sprite, pipe_name(pipe));
1305
		}
1306
	} else if (IS_VALLEYVIEW(dev)) {
1307
		for_each_sprite(pipe, sprite) {
5060 serge 1308
			reg = SPCNTR(pipe, sprite);
3746 Serge 1309
		val = I915_READ(reg);
5060 serge 1310
			WARN(val & SP_ENABLE,
4104 Serge 1311
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
5060 serge 1312
			     sprite_name(pipe, sprite), pipe_name(pipe));
4104 Serge 1313
		}
1314
	} else if (INTEL_INFO(dev)->gen >= 7) {
1315
		reg = SPRCTL(pipe);
1316
		val = I915_READ(reg);
5060 serge 1317
		WARN(val & SPRITE_ENABLE,
4104 Serge 1318
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1319
		     plane_name(pipe), pipe_name(pipe));
1320
	} else if (INTEL_INFO(dev)->gen >= 5) {
1321
		reg = DVSCNTR(pipe);
1322
		val = I915_READ(reg);
5060 serge 1323
		WARN(val & DVS_ENABLE,
4104 Serge 1324
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1325
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1326
	}
1327
}
1328
 
5354 serge 1329
static void assert_vblank_disabled(struct drm_crtc *crtc)
1330
{
1331
	if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1332
		drm_crtc_vblank_put(crtc);
1333
}
1334
 
4560 Serge 1335
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1336
{
1337
	u32 val;
1338
	bool enabled;
1339
 
4560 Serge 1340
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1341
 
2327 Serge 1342
	val = I915_READ(PCH_DREF_CONTROL);
1343
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1344
			    DREF_SUPERSPREAD_SOURCE_MASK));
1345
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1346
}
1347
 
4104 Serge 1348
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
2327 Serge 1349
				       enum pipe pipe)
1350
{
1351
	int reg;
1352
	u32 val;
1353
	bool enabled;
1354
 
4104 Serge 1355
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1356
	val = I915_READ(reg);
1357
	enabled = !!(val & TRANS_ENABLE);
1358
	WARN(enabled,
1359
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1360
	     pipe_name(pipe));
1361
}
1362
 
1363
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1364
			    enum pipe pipe, u32 port_sel, u32 val)
1365
{
1366
	if ((val & DP_PORT_EN) == 0)
1367
		return false;
1368
 
1369
	if (HAS_PCH_CPT(dev_priv->dev)) {
1370
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1371
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1372
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1373
			return false;
5060 serge 1374
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1375
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1376
			return false;
2327 Serge 1377
	} else {
1378
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1379
			return false;
1380
	}
1381
	return true;
1382
}
1383
 
1384
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1385
			      enum pipe pipe, u32 val)
1386
{
3746 Serge 1387
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1388
		return false;
1389
 
1390
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1391
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1392
			return false;
5060 serge 1393
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1394
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1395
			return false;
2327 Serge 1396
	} else {
3746 Serge 1397
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1398
			return false;
1399
	}
1400
	return true;
1401
}
1402
 
1403
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1404
			      enum pipe pipe, u32 val)
1405
{
1406
	if ((val & LVDS_PORT_EN) == 0)
1407
		return false;
1408
 
1409
	if (HAS_PCH_CPT(dev_priv->dev)) {
1410
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1411
			return false;
1412
	} else {
1413
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1414
			return false;
1415
	}
1416
	return true;
1417
}
1418
 
1419
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1420
			      enum pipe pipe, u32 val)
1421
{
1422
	if ((val & ADPA_DAC_ENABLE) == 0)
1423
		return false;
1424
	if (HAS_PCH_CPT(dev_priv->dev)) {
1425
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1426
			return false;
1427
	} else {
1428
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1429
			return false;
1430
	}
1431
	return true;
1432
}
1433
 
1434
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1435
				   enum pipe pipe, int reg, u32 port_sel)
1436
{
1437
	u32 val = I915_READ(reg);
1438
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1439
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1440
	     reg, pipe_name(pipe));
3031 serge 1441
 
1442
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1443
	     && (val & DP_PIPEB_SELECT),
1444
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1445
}
1446
 
1447
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1448
				     enum pipe pipe, int reg)
1449
{
1450
	u32 val = I915_READ(reg);
3031 serge 1451
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1452
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1453
	     reg, pipe_name(pipe));
3031 serge 1454
 
3746 Serge 1455
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1456
	     && (val & SDVO_PIPE_B_SELECT),
1457
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1458
}
1459
 
1460
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1461
				      enum pipe pipe)
1462
{
1463
	int reg;
1464
	u32 val;
1465
 
1466
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1467
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1468
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1469
 
1470
	reg = PCH_ADPA;
1471
	val = I915_READ(reg);
3031 serge 1472
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1473
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1474
	     pipe_name(pipe));
1475
 
1476
	reg = PCH_LVDS;
1477
	val = I915_READ(reg);
3031 serge 1478
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1479
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1480
	     pipe_name(pipe));
1481
 
3746 Serge 1482
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1483
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1484
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1485
}
1486
 
4560 Serge 1487
static void intel_init_dpio(struct drm_device *dev)
1488
{
1489
	struct drm_i915_private *dev_priv = dev->dev_private;
1490
 
1491
	if (!IS_VALLEYVIEW(dev))
1492
		return;
1493
 
5060 serge 1494
	/*
1495
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1496
	 * CHV x1 PHY (DP/HDMI D)
1497
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1498
	 */
1499
	if (IS_CHERRYVIEW(dev)) {
1500
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1501
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1502
	} else {
4560 Serge 1503
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
5060 serge 1504
	}
4560 Serge 1505
}
1506
 
5354 serge 1507
static void vlv_enable_pll(struct intel_crtc *crtc,
1508
			   const struct intel_crtc_config *pipe_config)
4560 Serge 1509
{
4104 Serge 1510
	struct drm_device *dev = crtc->base.dev;
1511
	struct drm_i915_private *dev_priv = dev->dev_private;
1512
	int reg = DPLL(crtc->pipe);
5354 serge 1513
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2327 Serge 1514
 
4104 Serge 1515
	assert_pipe_disabled(dev_priv, crtc->pipe);
1516
 
2327 Serge 1517
    /* No really, not for ILK+ */
4104 Serge 1518
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
2327 Serge 1519
 
1520
    /* PLL is protected by panel, make sure we can write it */
5354 serge 1521
	if (IS_MOBILE(dev_priv->dev))
4104 Serge 1522
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1523
 
4104 Serge 1524
	I915_WRITE(reg, dpll);
1525
	POSTING_READ(reg);
1526
	udelay(150);
2327 Serge 1527
 
4104 Serge 1528
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1529
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1530
 
5354 serge 1531
	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
4104 Serge 1532
	POSTING_READ(DPLL_MD(crtc->pipe));
1533
 
1534
	/* We do this three times for luck */
1535
	I915_WRITE(reg, dpll);
1536
	POSTING_READ(reg);
1537
	udelay(150); /* wait for warmup */
1538
	I915_WRITE(reg, dpll);
1539
	POSTING_READ(reg);
1540
	udelay(150); /* wait for warmup */
1541
	I915_WRITE(reg, dpll);
1542
	POSTING_READ(reg);
1543
	udelay(150); /* wait for warmup */
1544
}
1545
 
5354 serge 1546
static void chv_enable_pll(struct intel_crtc *crtc,
1547
			   const struct intel_crtc_config *pipe_config)
5060 serge 1548
{
1549
	struct drm_device *dev = crtc->base.dev;
1550
	struct drm_i915_private *dev_priv = dev->dev_private;
1551
	int pipe = crtc->pipe;
1552
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1553
	u32 tmp;
1554
 
1555
	assert_pipe_disabled(dev_priv, crtc->pipe);
1556
 
1557
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1558
 
1559
	mutex_lock(&dev_priv->dpio_lock);
1560
 
1561
	/* Enable back the 10bit clock to display controller */
1562
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1563
	tmp |= DPIO_DCLKP_EN;
1564
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1565
 
1566
	/*
1567
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1568
	 */
1569
	udelay(1);
1570
 
1571
	/* Enable PLL */
5354 serge 1572
	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
5060 serge 1573
 
1574
	/* Check PLL is locked */
1575
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1576
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1577
 
1578
	/* not sure when this should be written */
5354 serge 1579
	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
5060 serge 1580
	POSTING_READ(DPLL_MD(pipe));
1581
 
1582
	mutex_unlock(&dev_priv->dpio_lock);
1583
}
1584
 
5354 serge 1585
static int intel_num_dvo_pipes(struct drm_device *dev)
1586
{
1587
	struct intel_crtc *crtc;
1588
	int count = 0;
1589
 
1590
	for_each_intel_crtc(dev, crtc)
1591
		count += crtc->active &&
1592
			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1593
 
1594
	return count;
1595
}
1596
 
4104 Serge 1597
static void i9xx_enable_pll(struct intel_crtc *crtc)
1598
{
1599
	struct drm_device *dev = crtc->base.dev;
1600
	struct drm_i915_private *dev_priv = dev->dev_private;
1601
	int reg = DPLL(crtc->pipe);
1602
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1603
 
1604
	assert_pipe_disabled(dev_priv, crtc->pipe);
1605
 
1606
	/* No really, not for ILK+ */
5060 serge 1607
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
4104 Serge 1608
 
1609
	/* PLL is protected by panel, make sure we can write it */
1610
	if (IS_MOBILE(dev) && !IS_I830(dev))
1611
		assert_panel_unlocked(dev_priv, crtc->pipe);
1612
 
5354 serge 1613
	/* Enable DVO 2x clock on both PLLs if necessary */
1614
	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1615
		/*
1616
		 * It appears to be important that we don't enable this
1617
		 * for the current pipe before otherwise configuring the
1618
		 * PLL. No idea how this should be handled if multiple
1619
		 * DVO outputs are enabled simultaneosly.
1620
		 */
1621
		dpll |= DPLL_DVO_2X_MODE;
1622
		I915_WRITE(DPLL(!crtc->pipe),
1623
			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1624
	}
4104 Serge 1625
 
1626
	/* Wait for the clocks to stabilize. */
1627
	POSTING_READ(reg);
1628
	udelay(150);
1629
 
1630
	if (INTEL_INFO(dev)->gen >= 4) {
1631
		I915_WRITE(DPLL_MD(crtc->pipe),
1632
			   crtc->config.dpll_hw_state.dpll_md);
1633
	} else {
1634
		/* The pixel multiplier can only be updated once the
1635
		 * DPLL is enabled and the clocks are stable.
1636
		 *
1637
		 * So write it again.
1638
		 */
1639
		I915_WRITE(reg, dpll);
1640
	}
1641
 
2327 Serge 1642
    /* We do this three times for luck */
4104 Serge 1643
	I915_WRITE(reg, dpll);
2327 Serge 1644
    POSTING_READ(reg);
1645
    udelay(150); /* wait for warmup */
4104 Serge 1646
	I915_WRITE(reg, dpll);
2327 Serge 1647
    POSTING_READ(reg);
1648
    udelay(150); /* wait for warmup */
4104 Serge 1649
	I915_WRITE(reg, dpll);
2327 Serge 1650
    POSTING_READ(reg);
1651
    udelay(150); /* wait for warmup */
1652
}
1653
 
1654
/**
4104 Serge 1655
 * i9xx_disable_pll - disable a PLL
2327 Serge 1656
 * @dev_priv: i915 private structure
1657
 * @pipe: pipe PLL to disable
1658
 *
1659
 * Disable the PLL for @pipe, making sure the pipe is off first.
1660
 *
1661
 * Note!  This is for pre-ILK only.
1662
 */
5354 serge 1663
static void i9xx_disable_pll(struct intel_crtc *crtc)
2327 Serge 1664
{
5354 serge 1665
	struct drm_device *dev = crtc->base.dev;
1666
	struct drm_i915_private *dev_priv = dev->dev_private;
1667
	enum pipe pipe = crtc->pipe;
1668
 
1669
	/* Disable DVO 2x clock on both PLLs if necessary */
1670
	if (IS_I830(dev) &&
1671
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1672
	    intel_num_dvo_pipes(dev) == 1) {
1673
		I915_WRITE(DPLL(PIPE_B),
1674
			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1675
		I915_WRITE(DPLL(PIPE_A),
1676
			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1677
	}
1678
 
1679
	/* Don't disable pipe or pipe PLLs if needed */
1680
	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1681
	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2327 Serge 1682
		return;
1683
 
1684
	/* Make sure the pipe isn't still relying on us */
1685
	assert_pipe_disabled(dev_priv, pipe);
1686
 
4104 Serge 1687
	I915_WRITE(DPLL(pipe), 0);
1688
	POSTING_READ(DPLL(pipe));
2327 Serge 1689
}
1690
 
4539 Serge 1691
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1692
{
1693
	u32 val = 0;
1694
 
1695
	/* Make sure the pipe isn't still relying on us */
1696
	assert_pipe_disabled(dev_priv, pipe);
1697
 
4560 Serge 1698
	/*
1699
	 * Leave integrated clock source and reference clock enabled for pipe B.
1700
	 * The latter is needed for VGA hotplug / manual detection.
1701
	 */
4539 Serge 1702
	if (pipe == PIPE_B)
4560 Serge 1703
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
4539 Serge 1704
	I915_WRITE(DPLL(pipe), val);
1705
	POSTING_READ(DPLL(pipe));
5060 serge 1706
 
4539 Serge 1707
}
1708
 
5060 serge 1709
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1710
{
1711
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1712
	u32 val;
1713
 
1714
	/* Make sure the pipe isn't still relying on us */
1715
	assert_pipe_disabled(dev_priv, pipe);
1716
 
1717
	/* Set PLL en = 0 */
5354 serge 1718
	val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
5060 serge 1719
	if (pipe != PIPE_A)
1720
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1721
	I915_WRITE(DPLL(pipe), val);
1722
	POSTING_READ(DPLL(pipe));
1723
 
1724
	mutex_lock(&dev_priv->dpio_lock);
1725
 
1726
	/* Disable 10bit clock to display controller */
1727
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1728
	val &= ~DPIO_DCLKP_EN;
1729
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1730
 
1731
	/* disable left/right clock distribution */
1732
	if (pipe != PIPE_B) {
1733
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1734
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1735
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1736
	} else {
1737
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1738
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1739
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1740
	}
1741
 
1742
	mutex_unlock(&dev_priv->dpio_lock);
1743
}
1744
 
4560 Serge 1745
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1746
		struct intel_digital_port *dport)
3031 serge 1747
{
4104 Serge 1748
	u32 port_mask;
5060 serge 1749
	int dpll_reg;
3031 serge 1750
 
4560 Serge 1751
	switch (dport->port) {
1752
	case PORT_B:
4104 Serge 1753
		port_mask = DPLL_PORTB_READY_MASK;
5060 serge 1754
		dpll_reg = DPLL(0);
4560 Serge 1755
		break;
1756
	case PORT_C:
4104 Serge 1757
		port_mask = DPLL_PORTC_READY_MASK;
5060 serge 1758
		dpll_reg = DPLL(0);
4560 Serge 1759
		break;
5060 serge 1760
	case PORT_D:
1761
		port_mask = DPLL_PORTD_READY_MASK;
1762
		dpll_reg = DPIO_PHY_STATUS;
1763
		break;
4560 Serge 1764
	default:
1765
		BUG();
1766
	}
3243 Serge 1767
 
5060 serge 1768
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
4104 Serge 1769
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
5060 serge 1770
		     port_name(dport->port), I915_READ(dpll_reg));
3031 serge 1771
}
1772
 
5060 serge 1773
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1774
{
1775
	struct drm_device *dev = crtc->base.dev;
1776
	struct drm_i915_private *dev_priv = dev->dev_private;
1777
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1778
 
1779
	if (WARN_ON(pll == NULL))
1780
		return;
1781
 
5354 serge 1782
	WARN_ON(!pll->config.crtc_mask);
5060 serge 1783
	if (pll->active == 0) {
1784
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1785
		WARN_ON(pll->on);
1786
		assert_shared_dpll_disabled(dev_priv, pll);
1787
 
1788
		pll->mode_set(dev_priv, pll);
1789
	}
1790
}
1791
 
2327 Serge 1792
/**
5060 serge 1793
 * intel_enable_shared_dpll - enable PCH PLL
2327 Serge 1794
 * @dev_priv: i915 private structure
1795
 * @pipe: pipe PLL to enable
1796
 *
1797
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1798
 * drives the transcoder clock.
1799
 */
5060 serge 1800
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1801
{
5060 serge 1802
	struct drm_device *dev = crtc->base.dev;
1803
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1804
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1805
 
4104 Serge 1806
	if (WARN_ON(pll == NULL))
2342 Serge 1807
		return;
1808
 
5354 serge 1809
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1810
		return;
2327 Serge 1811
 
5354 serge 1812
	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
4104 Serge 1813
		      pll->name, pll->active, pll->on,
1814
		      crtc->base.base.id);
3031 serge 1815
 
4104 Serge 1816
	if (pll->active++) {
1817
		WARN_ON(!pll->on);
1818
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1819
		return;
1820
	}
4104 Serge 1821
	WARN_ON(pll->on);
3031 serge 1822
 
5060 serge 1823
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1824
 
4104 Serge 1825
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1826
	pll->enable(dev_priv, pll);
3031 serge 1827
	pll->on = true;
2327 Serge 1828
}
1829
 
5354 serge 1830
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1831
{
5060 serge 1832
	struct drm_device *dev = crtc->base.dev;
1833
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1834
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1835
 
1836
	/* PCH only available on ILK+ */
5060 serge 1837
	BUG_ON(INTEL_INFO(dev)->gen < 5);
4104 Serge 1838
	if (WARN_ON(pll == NULL))
3031 serge 1839
	       return;
2327 Serge 1840
 
5354 serge 1841
	if (WARN_ON(pll->config.crtc_mask == 0))
3031 serge 1842
		return;
2327 Serge 1843
 
4104 Serge 1844
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1845
		      pll->name, pll->active, pll->on,
1846
		      crtc->base.base.id);
2342 Serge 1847
 
3031 serge 1848
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1849
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1850
		return;
1851
	}
2342 Serge 1852
 
4104 Serge 1853
	assert_shared_dpll_enabled(dev_priv, pll);
1854
	WARN_ON(!pll->on);
1855
	if (--pll->active)
2342 Serge 1856
		return;
1857
 
4104 Serge 1858
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1859
	pll->disable(dev_priv, pll);
3031 serge 1860
	pll->on = false;
5060 serge 1861
 
1862
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2327 Serge 1863
}
1864
 
3243 Serge 1865
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1866
				    enum pipe pipe)
1867
{
3243 Serge 1868
	struct drm_device *dev = dev_priv->dev;
3031 serge 1869
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1870
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 1871
	uint32_t reg, val, pipeconf_val;
2327 Serge 1872
 
1873
	/* PCH only available on ILK+ */
5354 serge 1874
	BUG_ON(!HAS_PCH_SPLIT(dev));
2327 Serge 1875
 
1876
	/* Make sure PCH DPLL is enabled */
4104 Serge 1877
	assert_shared_dpll_enabled(dev_priv,
1878
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1879
 
1880
	/* FDI must be feeding us bits for PCH ports */
1881
	assert_fdi_tx_enabled(dev_priv, pipe);
1882
	assert_fdi_rx_enabled(dev_priv, pipe);
1883
 
3243 Serge 1884
	if (HAS_PCH_CPT(dev)) {
1885
		/* Workaround: Set the timing override bit before enabling the
1886
		 * pch transcoder. */
1887
		reg = TRANS_CHICKEN2(pipe);
1888
		val = I915_READ(reg);
1889
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1890
		I915_WRITE(reg, val);
3031 serge 1891
	}
3243 Serge 1892
 
4104 Serge 1893
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1894
	val = I915_READ(reg);
3031 serge 1895
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1896
 
1897
	if (HAS_PCH_IBX(dev_priv->dev)) {
1898
		/*
1899
		 * make the BPC in transcoder be consistent with
1900
		 * that in pipeconf reg.
1901
		 */
3480 Serge 1902
		val &= ~PIPECONF_BPC_MASK;
1903
		val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 1904
	}
3031 serge 1905
 
1906
	val &= ~TRANS_INTERLACE_MASK;
1907
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1908
		if (HAS_PCH_IBX(dev_priv->dev) &&
5354 serge 1909
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3031 serge 1910
			val |= TRANS_LEGACY_INTERLACED_ILK;
1911
		else
1912
			val |= TRANS_INTERLACED;
1913
	else
1914
		val |= TRANS_PROGRESSIVE;
1915
 
2327 Serge 1916
	I915_WRITE(reg, val | TRANS_ENABLE);
1917
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 1918
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 1919
}
1920
 
3243 Serge 1921
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1922
				      enum transcoder cpu_transcoder)
1923
{
1924
	u32 val, pipeconf_val;
1925
 
1926
	/* PCH only available on ILK+ */
5354 serge 1927
	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
3243 Serge 1928
 
1929
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 1930
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 1931
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1932
 
1933
	/* Workaround: set timing override bit. */
1934
	val = I915_READ(_TRANSA_CHICKEN2);
1935
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1936
	I915_WRITE(_TRANSA_CHICKEN2, val);
1937
 
1938
	val = TRANS_ENABLE;
1939
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1940
 
1941
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1942
	    PIPECONF_INTERLACED_ILK)
1943
		val |= TRANS_INTERLACED;
1944
	else
1945
		val |= TRANS_PROGRESSIVE;
1946
 
4104 Serge 1947
	I915_WRITE(LPT_TRANSCONF, val);
1948
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 1949
		DRM_ERROR("Failed to enable PCH transcoder\n");
1950
}
1951
 
1952
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1953
				     enum pipe pipe)
1954
{
3243 Serge 1955
	struct drm_device *dev = dev_priv->dev;
1956
	uint32_t reg, val;
2327 Serge 1957
 
1958
	/* FDI relies on the transcoder */
1959
	assert_fdi_tx_disabled(dev_priv, pipe);
1960
	assert_fdi_rx_disabled(dev_priv, pipe);
1961
 
1962
	/* Ports must be off as well */
1963
	assert_pch_ports_disabled(dev_priv, pipe);
1964
 
4104 Serge 1965
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1966
	val = I915_READ(reg);
1967
	val &= ~TRANS_ENABLE;
1968
	I915_WRITE(reg, val);
1969
	/* wait for PCH transcoder off, transcoder state */
1970
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 1971
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 1972
 
1973
	if (!HAS_PCH_IBX(dev)) {
1974
		/* Workaround: Clear the timing override chicken bit again. */
1975
		reg = TRANS_CHICKEN2(pipe);
1976
		val = I915_READ(reg);
1977
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1978
		I915_WRITE(reg, val);
1979
	}
2327 Serge 1980
}
1981
 
3243 Serge 1982
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1983
{
1984
	u32 val;
1985
 
4104 Serge 1986
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 1987
	val &= ~TRANS_ENABLE;
4104 Serge 1988
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 1989
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 1990
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 1991
		DRM_ERROR("Failed to disable PCH transcoder\n");
1992
 
1993
	/* Workaround: clear timing override bit. */
1994
	val = I915_READ(_TRANSA_CHICKEN2);
1995
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1996
	I915_WRITE(_TRANSA_CHICKEN2, val);
1997
}
1998
 
2327 Serge 1999
/**
2000
 * intel_enable_pipe - enable a pipe, asserting requirements
5060 serge 2001
 * @crtc: crtc responsible for the pipe
2327 Serge 2002
 *
5060 serge 2003
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2327 Serge 2004
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2005
 */
5060 serge 2006
static void intel_enable_pipe(struct intel_crtc *crtc)
2327 Serge 2007
{
5060 serge 2008
	struct drm_device *dev = crtc->base.dev;
2009
	struct drm_i915_private *dev_priv = dev->dev_private;
2010
	enum pipe pipe = crtc->pipe;
3243 Serge 2011
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2012
								      pipe);
3480 Serge 2013
	enum pipe pch_transcoder;
2327 Serge 2014
	int reg;
2015
	u32 val;
2016
 
4104 Serge 2017
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2018
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 2019
	assert_sprites_disabled(dev_priv, pipe);
2020
 
3480 Serge 2021
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 2022
		pch_transcoder = TRANSCODER_A;
2023
	else
2024
		pch_transcoder = pipe;
2025
 
2327 Serge 2026
	/*
2027
	 * A pipe without a PLL won't actually be able to drive bits from
2028
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2029
	 * need the check.
2030
	 */
2031
	if (!HAS_PCH_SPLIT(dev_priv->dev))
5354 serge 2032
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4560 Serge 2033
			assert_dsi_pll_enabled(dev_priv);
2034
		else
2327 Serge 2035
		assert_pll_enabled(dev_priv, pipe);
2036
	else {
5060 serge 2037
		if (crtc->config.has_pch_encoder) {
2327 Serge 2038
			/* if driving the PCH, we need FDI enabled */
3243 Serge 2039
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 2040
			assert_fdi_tx_pll_enabled(dev_priv,
2041
						  (enum pipe) cpu_transcoder);
2327 Serge 2042
		}
2043
		/* FIXME: assert CPU port conditions for SNB+ */
2044
	}
2045
 
3243 Serge 2046
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2047
	val = I915_READ(reg);
5060 serge 2048
	if (val & PIPECONF_ENABLE) {
5354 serge 2049
		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2050
			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2327 Serge 2051
		return;
5060 serge 2052
	}
2327 Serge 2053
 
2054
	I915_WRITE(reg, val | PIPECONF_ENABLE);
5060 serge 2055
	POSTING_READ(reg);
2327 Serge 2056
}
2057
 
2058
/**
2059
 * intel_disable_pipe - disable a pipe, asserting requirements
5354 serge 2060
 * @crtc: crtc whose pipes is to be disabled
2327 Serge 2061
 *
5354 serge 2062
 * Disable the pipe of @crtc, making sure that various hardware
2063
 * specific requirements are met, if applicable, e.g. plane
2064
 * disabled, panel fitter off, etc.
2327 Serge 2065
 *
2066
 * Will wait until the pipe has shut down before returning.
2067
 */
5354 serge 2068
static void intel_disable_pipe(struct intel_crtc *crtc)
2327 Serge 2069
{
5354 serge 2070
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2071
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2072
	enum pipe pipe = crtc->pipe;
2327 Serge 2073
	int reg;
2074
	u32 val;
2075
 
3031 serge 2076
    /*
2327 Serge 2077
	 * Make sure planes won't keep trying to pump pixels to us,
2078
	 * or we might hang the display.
2079
	 */
2080
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2081
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 2082
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 2083
 
3243 Serge 2084
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2085
	val = I915_READ(reg);
2086
	if ((val & PIPECONF_ENABLE) == 0)
2087
		return;
2088
 
5354 serge 2089
	/*
2090
	 * Double wide has implications for planes
2091
	 * so best keep it disabled when not needed.
2092
	 */
2093
	if (crtc->config.double_wide)
2094
		val &= ~PIPECONF_DOUBLE_WIDE;
2095
 
2096
	/* Don't disable pipe or pipe PLLs if needed */
2097
	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2098
	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2099
		val &= ~PIPECONF_ENABLE;
2100
 
2101
	I915_WRITE(reg, val);
2102
	if ((val & PIPECONF_ENABLE) == 0)
2103
		intel_wait_for_pipe_off(crtc);
2327 Serge 2104
}
2105
 
2106
/*
2107
 * Plane regs are double buffered, going from enabled->disabled needs a
2108
 * trigger in order to latch.  The display address reg provides this.
2109
 */
4560 Serge 2110
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2327 Serge 2111
				      enum plane plane)
2112
{
5060 serge 2113
	struct drm_device *dev = dev_priv->dev;
2114
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
4560 Serge 2115
 
2116
	I915_WRITE(reg, I915_READ(reg));
2117
	POSTING_READ(reg);
2327 Serge 2118
}
2119
 
2120
/**
5060 serge 2121
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
5354 serge 2122
 * @plane:  plane to be enabled
2123
 * @crtc: crtc for the plane
2327 Serge 2124
 *
5354 serge 2125
 * Enable @plane on @crtc, making sure that the pipe is running first.
2327 Serge 2126
 */
5354 serge 2127
static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2128
					  struct drm_crtc *crtc)
2327 Serge 2129
{
5354 serge 2130
	struct drm_device *dev = plane->dev;
2131
	struct drm_i915_private *dev_priv = dev->dev_private;
2132
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 2133
 
2134
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
5354 serge 2135
	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2327 Serge 2136
 
5060 serge 2137
	if (intel_crtc->primary_enabled)
2138
		return;
4560 Serge 2139
 
2140
	intel_crtc->primary_enabled = true;
2141
 
5354 serge 2142
	dev_priv->display.update_primary_plane(crtc, plane->fb,
2143
					       crtc->x, crtc->y);
2327 Serge 2144
 
5354 serge 2145
	/*
2146
	 * BDW signals flip done immediately if the plane
2147
	 * is disabled, even if the plane enable is already
2148
	 * armed to occur at the next vblank :(
2149
	 */
2150
	if (IS_BROADWELL(dev))
2151
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 2152
}
2153
 
2154
/**
5060 serge 2155
 * intel_disable_primary_hw_plane - disable the primary hardware plane
5354 serge 2156
 * @plane: plane to be disabled
2157
 * @crtc: crtc for the plane
2327 Serge 2158
 *
5354 serge 2159
 * Disable @plane on @crtc, making sure that the pipe is running first.
2327 Serge 2160
 */
5354 serge 2161
static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2162
					   struct drm_crtc *crtc)
2327 Serge 2163
{
5354 serge 2164
	struct drm_device *dev = plane->dev;
2165
	struct drm_i915_private *dev_priv = dev->dev_private;
2166
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 2167
 
5354 serge 2168
	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2169
 
5060 serge 2170
	if (!intel_crtc->primary_enabled)
2171
		return;
4560 Serge 2172
 
2173
	intel_crtc->primary_enabled = false;
2174
 
5354 serge 2175
	dev_priv->display.update_primary_plane(crtc, plane->fb,
2176
					       crtc->x, crtc->y);
2327 Serge 2177
}
2178
 
3746 Serge 2179
static bool need_vtd_wa(struct drm_device *dev)
2180
{
2181
#ifdef CONFIG_INTEL_IOMMU
2182
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2183
		return true;
2184
#endif
2185
	return false;
2186
}
2187
 
5060 serge 2188
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2189
{
2190
	int tile_height;
2191
 
2192
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2193
	return ALIGN(height, tile_height);
2194
}
2195
 
2335 Serge 2196
int
5354 serge 2197
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2198
			   struct drm_framebuffer *fb,
5060 serge 2199
			   struct intel_engine_cs *pipelined)
2335 Serge 2200
{
5354 serge 2201
	struct drm_device *dev = fb->dev;
2335 Serge 2202
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2203
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2335 Serge 2204
	u32 alignment;
2205
	int ret;
2327 Serge 2206
 
5060 serge 2207
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2208
 
2335 Serge 2209
	switch (obj->tiling_mode) {
2210
	case I915_TILING_NONE:
5354 serge 2211
		if (INTEL_INFO(dev)->gen >= 9)
2212
			alignment = 256 * 1024;
2213
		else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2335 Serge 2214
			alignment = 128 * 1024;
2215
		else if (INTEL_INFO(dev)->gen >= 4)
2216
			alignment = 4 * 1024;
2217
		else
2218
			alignment = 64 * 1024;
2219
		break;
2220
	case I915_TILING_X:
5354 serge 2221
		if (INTEL_INFO(dev)->gen >= 9)
2222
			alignment = 256 * 1024;
2223
		else {
2335 Serge 2224
		/* pin() will align the object as required by fence */
2225
		alignment = 0;
5354 serge 2226
		}
2335 Serge 2227
		break;
2228
	case I915_TILING_Y:
4560 Serge 2229
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2335 Serge 2230
		return -EINVAL;
2231
	default:
2232
		BUG();
2233
	}
2327 Serge 2234
 
3746 Serge 2235
	/* Note that the w/a also requires 64 PTE of padding following the
2236
	 * bo. We currently fill all unused PTE with the shadow page and so
2237
	 * we should always have valid PTE following the scanout preventing
2238
	 * the VT-d warning.
2239
	 */
2240
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2241
		alignment = 256 * 1024;
2242
 
5097 serge 2243
	/*
2244
	 * Global gtt pte registers are special registers which actually forward
2245
	 * writes to a chunk of system memory. Which means that there is no risk
2246
	 * that the register values disappear as soon as we call
2247
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2248
	 * pin/unpin/fence and not more.
2249
	 */
2250
	intel_runtime_pm_get(dev_priv);
2251
 
2335 Serge 2252
	dev_priv->mm.interruptible = false;
2253
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2254
	if (ret)
2255
		goto err_interruptible;
2327 Serge 2256
 
2335 Serge 2257
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2258
	 * fence, whereas 965+ only requires a fence if using
2259
	 * framebuffer compression.  For simplicity, we always install
2260
	 * a fence as the cost is not that onerous.
2261
	 */
3480 Serge 2262
	ret = i915_gem_object_get_fence(obj);
2263
	if (ret)
2264
		goto err_unpin;
2327 Serge 2265
 
3480 Serge 2266
	i915_gem_object_pin_fence(obj);
2267
 
2335 Serge 2268
	dev_priv->mm.interruptible = true;
5097 serge 2269
	intel_runtime_pm_put(dev_priv);
2335 Serge 2270
	return 0;
2327 Serge 2271
 
2335 Serge 2272
err_unpin:
4104 Serge 2273
	i915_gem_object_unpin_from_display_plane(obj);
2335 Serge 2274
err_interruptible:
2275
	dev_priv->mm.interruptible = true;
5097 serge 2276
	intel_runtime_pm_put(dev_priv);
2335 Serge 2277
	return ret;
2278
}
2327 Serge 2279
 
3031 serge 2280
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2281
{
5060 serge 2282
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2283
 
2284
	i915_gem_object_unpin_fence(obj);
2285
//	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 2286
}
2287
 
2288
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2289
 * is assumed to be a power-of-two. */
3480 Serge 2290
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2291
					     unsigned int tiling_mode,
2292
					     unsigned int cpp,
3031 serge 2293
							unsigned int pitch)
2294
{
3480 Serge 2295
	if (tiling_mode != I915_TILING_NONE) {
2296
		unsigned int tile_rows, tiles;
3031 serge 2297
 
2298
	tile_rows = *y / 8;
2299
	*y %= 8;
2300
 
3480 Serge 2301
		tiles = *x / (512/cpp);
2302
		*x %= 512/cpp;
2303
 
3031 serge 2304
	return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 2305
	} else {
2306
		unsigned int offset;
2307
 
2308
		offset = *y * pitch + *x * cpp;
2309
		*y = 0;
2310
		*x = (offset & 4095) / cpp;
2311
		return offset & -4096;
2312
	}
3031 serge 2313
}
2314
 
5060 serge 2315
int intel_format_to_fourcc(int format)
2327 Serge 2316
{
5060 serge 2317
	switch (format) {
2318
	case DISPPLANE_8BPP:
2319
		return DRM_FORMAT_C8;
2320
	case DISPPLANE_BGRX555:
2321
		return DRM_FORMAT_XRGB1555;
2322
	case DISPPLANE_BGRX565:
2323
		return DRM_FORMAT_RGB565;
2324
	default:
2325
	case DISPPLANE_BGRX888:
2326
		return DRM_FORMAT_XRGB8888;
2327
	case DISPPLANE_RGBX888:
2328
		return DRM_FORMAT_XBGR8888;
2329
	case DISPPLANE_BGRX101010:
2330
		return DRM_FORMAT_XRGB2101010;
2331
	case DISPPLANE_RGBX101010:
2332
		return DRM_FORMAT_XBGR2101010;
2333
	}
2334
}
2335
 
2336
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2337
				  struct intel_plane_config *plane_config)
2338
{
2339
	struct drm_device *dev = crtc->base.dev;
2340
	struct drm_i915_gem_object *obj = NULL;
2341
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2342
	u32 base = plane_config->base;
2343
 
2344
	if (plane_config->size == 0)
2345
		return false;
2346
 
2347
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2348
							     plane_config->size);
2349
	if (!obj)
2350
		return false;
2351
 
2352
    main_fb_obj = obj;
2353
 
2354
	if (plane_config->tiled) {
2355
		obj->tiling_mode = I915_TILING_X;
2356
		obj->stride = crtc->base.primary->fb->pitches[0];
2357
	}
2358
 
2359
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2360
	mode_cmd.width = crtc->base.primary->fb->width;
2361
	mode_cmd.height = crtc->base.primary->fb->height;
2362
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2363
 
2364
	mutex_lock(&dev->struct_mutex);
2365
 
2366
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2367
				   &mode_cmd, obj)) {
2368
		DRM_DEBUG_KMS("intel fb init failed\n");
2369
		goto out_unref_obj;
2370
	}
2371
 
2372
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2373
	mutex_unlock(&dev->struct_mutex);
2374
 
2375
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2376
	return true;
2377
 
2378
out_unref_obj:
2379
	drm_gem_object_unreference(&obj->base);
2380
	mutex_unlock(&dev->struct_mutex);
2381
	return false;
2382
}
2383
 
2384
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2385
				 struct intel_plane_config *plane_config)
2386
{
2387
	struct drm_device *dev = intel_crtc->base.dev;
5354 serge 2388
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2389
	struct drm_crtc *c;
2390
	struct intel_crtc *i;
2391
	struct drm_i915_gem_object *obj;
2392
 
2393
	if (!intel_crtc->base.primary->fb)
2394
		return;
2395
 
2396
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2397
		return;
2398
 
2399
	kfree(intel_crtc->base.primary->fb);
2400
	intel_crtc->base.primary->fb = NULL;
2401
 
2402
	/*
2403
	 * Failed to alloc the obj, check to see if we should share
2404
	 * an fb with another CRTC instead
2405
	 */
2406
	for_each_crtc(dev, c) {
2407
		i = to_intel_crtc(c);
2408
 
2409
		if (c == &intel_crtc->base)
2410
			continue;
2411
 
2412
		if (!i->active)
2413
			continue;
2414
 
2415
		obj = intel_fb_obj(c->primary->fb);
2416
		if (obj == NULL)
2417
			continue;
2418
 
2419
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
5354 serge 2420
			if (obj->tiling_mode != I915_TILING_NONE)
2421
				dev_priv->preserve_bios_swizzle = true;
2422
 
5060 serge 2423
			drm_framebuffer_reference(c->primary->fb);
2424
			intel_crtc->base.primary->fb = c->primary->fb;
2425
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2426
			break;
2427
		}
2428
	}
2429
}
2430
 
2431
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2432
				     struct drm_framebuffer *fb,
2433
				     int x, int y)
2434
{
2327 Serge 2435
    struct drm_device *dev = crtc->dev;
2436
    struct drm_i915_private *dev_priv = dev->dev_private;
2437
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5354 serge 2438
	struct drm_i915_gem_object *obj;
2327 Serge 2439
    int plane = intel_crtc->plane;
3031 serge 2440
	unsigned long linear_offset;
2327 Serge 2441
    u32 dspcntr;
5354 serge 2442
	u32 reg = DSPCNTR(plane);
2443
	int pixel_size;
2327 Serge 2444
 
5354 serge 2445
	if (!intel_crtc->primary_enabled) {
2446
		I915_WRITE(reg, 0);
2447
		if (INTEL_INFO(dev)->gen >= 4)
2448
			I915_WRITE(DSPSURF(plane), 0);
2449
		else
2450
			I915_WRITE(DSPADDR(plane), 0);
2451
		POSTING_READ(reg);
2452
		return;
2453
	}
2454
 
2455
	obj = intel_fb_obj(fb);
2456
	if (WARN_ON(obj == NULL))
2457
		return;
2458
 
2459
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2460
 
2461
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2462
 
2463
	dspcntr |= DISPLAY_PLANE_ENABLE;
2464
 
2465
	if (INTEL_INFO(dev)->gen < 4) {
2466
		if (intel_crtc->pipe == PIPE_B)
2467
			dspcntr |= DISPPLANE_SEL_PIPE_B;
2468
 
2469
		/* pipesrc and dspsize control the size that is scaled from,
2470
		 * which should always be the user's requested size.
2471
		 */
2472
		I915_WRITE(DSPSIZE(plane),
2473
			   ((intel_crtc->config.pipe_src_h - 1) << 16) |
2474
			   (intel_crtc->config.pipe_src_w - 1));
2475
		I915_WRITE(DSPPOS(plane), 0);
2476
	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2477
		I915_WRITE(PRIMSIZE(plane),
2478
			   ((intel_crtc->config.pipe_src_h - 1) << 16) |
2479
			   (intel_crtc->config.pipe_src_w - 1));
2480
		I915_WRITE(PRIMPOS(plane), 0);
2481
		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2482
	}
2483
 
3243 Serge 2484
	switch (fb->pixel_format) {
2485
	case DRM_FORMAT_C8:
2327 Serge 2486
        dspcntr |= DISPPLANE_8BPP;
2487
        break;
3243 Serge 2488
	case DRM_FORMAT_XRGB1555:
2489
	case DRM_FORMAT_ARGB1555:
2490
		dspcntr |= DISPPLANE_BGRX555;
2491
		break;
2492
	case DRM_FORMAT_RGB565:
2493
		dspcntr |= DISPPLANE_BGRX565;
2494
		break;
2495
	case DRM_FORMAT_XRGB8888:
2496
	case DRM_FORMAT_ARGB8888:
2497
		dspcntr |= DISPPLANE_BGRX888;
2498
		break;
2499
	case DRM_FORMAT_XBGR8888:
2500
	case DRM_FORMAT_ABGR8888:
2501
		dspcntr |= DISPPLANE_RGBX888;
2502
		break;
2503
	case DRM_FORMAT_XRGB2101010:
2504
	case DRM_FORMAT_ARGB2101010:
2505
		dspcntr |= DISPPLANE_BGRX101010;
2327 Serge 2506
        break;
3243 Serge 2507
	case DRM_FORMAT_XBGR2101010:
2508
	case DRM_FORMAT_ABGR2101010:
2509
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2510
        break;
2511
    default:
3746 Serge 2512
		BUG();
2327 Serge 2513
    }
3243 Serge 2514
 
5354 serge 2515
	if (INTEL_INFO(dev)->gen >= 4 &&
2516
	    obj->tiling_mode != I915_TILING_NONE)
2327 Serge 2517
            dspcntr |= DISPPLANE_TILED;
2518
 
4104 Serge 2519
	if (IS_G4X(dev))
2520
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2521
 
5354 serge 2522
	linear_offset = y * fb->pitches[0] + x * pixel_size;
2327 Serge 2523
 
3031 serge 2524
	if (INTEL_INFO(dev)->gen >= 4) {
2525
		intel_crtc->dspaddr_offset =
3480 Serge 2526
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
5354 serge 2527
						       pixel_size,
3031 serge 2528
							   fb->pitches[0]);
2529
		linear_offset -= intel_crtc->dspaddr_offset;
2530
	} else {
2531
		intel_crtc->dspaddr_offset = linear_offset;
2532
	}
2533
 
5354 serge 2534
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2535
		dspcntr |= DISPPLANE_ROTATE_180;
2536
 
2537
		x += (intel_crtc->config.pipe_src_w - 1);
2538
		y += (intel_crtc->config.pipe_src_h - 1);
2539
 
2540
		/* Finding the last pixel of the last line of the display
2541
		data and adding to linear_offset*/
2542
		linear_offset +=
2543
			(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2544
			(intel_crtc->config.pipe_src_w - 1) * pixel_size;
2545
	}
2546
 
2547
	I915_WRITE(reg, dspcntr);
2548
 
4104 Serge 2549
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2550
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2551
		      fb->pitches[0]);
2342 Serge 2552
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2553
    if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2554
		I915_WRITE(DSPSURF(plane),
4104 Serge 2555
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 2556
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2557
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2327 Serge 2558
    } else
4104 Serge 2559
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2327 Serge 2560
    POSTING_READ(reg);
2561
}
2562
 
5060 serge 2563
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2564
					 struct drm_framebuffer *fb,
2565
					 int x, int y)
2327 Serge 2566
{
2567
    struct drm_device *dev = crtc->dev;
2568
    struct drm_i915_private *dev_priv = dev->dev_private;
2569
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5354 serge 2570
	struct drm_i915_gem_object *obj;
2327 Serge 2571
    int plane = intel_crtc->plane;
3031 serge 2572
	unsigned long linear_offset;
2327 Serge 2573
    u32 dspcntr;
5354 serge 2574
	u32 reg = DSPCNTR(plane);
2575
	int pixel_size;
2327 Serge 2576
 
5354 serge 2577
	if (!intel_crtc->primary_enabled) {
2578
		I915_WRITE(reg, 0);
2579
		I915_WRITE(DSPSURF(plane), 0);
2580
		POSTING_READ(reg);
2581
		return;
2582
	}
2583
 
2584
	obj = intel_fb_obj(fb);
2585
	if (WARN_ON(obj == NULL))
2586
		return;
2587
 
2588
	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2589
 
2590
	dspcntr = DISPPLANE_GAMMA_ENABLE;
2591
 
2592
	dspcntr |= DISPLAY_PLANE_ENABLE;
2593
 
2594
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2595
		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2596
 
3243 Serge 2597
	switch (fb->pixel_format) {
2598
	case DRM_FORMAT_C8:
2327 Serge 2599
        dspcntr |= DISPPLANE_8BPP;
2600
        break;
3243 Serge 2601
	case DRM_FORMAT_RGB565:
2602
		dspcntr |= DISPPLANE_BGRX565;
2327 Serge 2603
        break;
3243 Serge 2604
	case DRM_FORMAT_XRGB8888:
2605
	case DRM_FORMAT_ARGB8888:
2606
		dspcntr |= DISPPLANE_BGRX888;
2607
		break;
2608
	case DRM_FORMAT_XBGR8888:
2609
	case DRM_FORMAT_ABGR8888:
2610
		dspcntr |= DISPPLANE_RGBX888;
2611
		break;
2612
	case DRM_FORMAT_XRGB2101010:
2613
	case DRM_FORMAT_ARGB2101010:
2614
		dspcntr |= DISPPLANE_BGRX101010;
2615
		break;
2616
	case DRM_FORMAT_XBGR2101010:
2617
	case DRM_FORMAT_ABGR2101010:
2618
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2619
        break;
2620
    default:
3746 Serge 2621
		BUG();
2327 Serge 2622
    }
2623
 
3480 Serge 2624
	if (obj->tiling_mode != I915_TILING_NONE)
2625
		dspcntr |= DISPPLANE_TILED;
2327 Serge 2626
 
5354 serge 2627
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2327 Serge 2628
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2629
 
5354 serge 2630
	linear_offset = y * fb->pitches[0] + x * pixel_size;
3031 serge 2631
	intel_crtc->dspaddr_offset =
3480 Serge 2632
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
5354 serge 2633
					       pixel_size,
3031 serge 2634
						   fb->pitches[0]);
2635
	linear_offset -= intel_crtc->dspaddr_offset;
5354 serge 2636
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2637
		dspcntr |= DISPPLANE_ROTATE_180;
2327 Serge 2638
 
5354 serge 2639
		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2640
			x += (intel_crtc->config.pipe_src_w - 1);
2641
			y += (intel_crtc->config.pipe_src_h - 1);
2642
 
2643
			/* Finding the last pixel of the last line of the display
2644
			data and adding to linear_offset*/
2645
			linear_offset +=
2646
				(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2647
				(intel_crtc->config.pipe_src_w - 1) * pixel_size;
2648
		}
2649
	}
2650
 
2651
	I915_WRITE(reg, dspcntr);
2652
 
4104 Serge 2653
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2654
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2655
		      fb->pitches[0]);
2342 Serge 2656
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2657
	I915_WRITE(DSPSURF(plane),
4104 Serge 2658
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2659
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2660
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2661
	} else {
2330 Serge 2662
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2663
	I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2664
	}
2330 Serge 2665
	POSTING_READ(reg);
2327 Serge 2666
}
2667
 
5354 serge 2668
static void skylake_update_primary_plane(struct drm_crtc *crtc,
2669
					 struct drm_framebuffer *fb,
2670
					 int x, int y)
2671
{
2672
	struct drm_device *dev = crtc->dev;
2673
	struct drm_i915_private *dev_priv = dev->dev_private;
2674
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2675
	struct intel_framebuffer *intel_fb;
2676
	struct drm_i915_gem_object *obj;
2677
	int pipe = intel_crtc->pipe;
2678
	u32 plane_ctl, stride;
2679
 
2680
	if (!intel_crtc->primary_enabled) {
2681
		I915_WRITE(PLANE_CTL(pipe, 0), 0);
2682
		I915_WRITE(PLANE_SURF(pipe, 0), 0);
2683
		POSTING_READ(PLANE_CTL(pipe, 0));
2684
		return;
2685
	}
2686
 
2687
	plane_ctl = PLANE_CTL_ENABLE |
2688
		    PLANE_CTL_PIPE_GAMMA_ENABLE |
2689
		    PLANE_CTL_PIPE_CSC_ENABLE;
2690
 
2691
	switch (fb->pixel_format) {
2692
	case DRM_FORMAT_RGB565:
2693
		plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2694
		break;
2695
	case DRM_FORMAT_XRGB8888:
2696
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2697
		break;
2698
	case DRM_FORMAT_XBGR8888:
2699
		plane_ctl |= PLANE_CTL_ORDER_RGBX;
2700
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2701
		break;
2702
	case DRM_FORMAT_XRGB2101010:
2703
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2704
		break;
2705
	case DRM_FORMAT_XBGR2101010:
2706
		plane_ctl |= PLANE_CTL_ORDER_RGBX;
2707
		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2708
		break;
2709
	default:
2710
		BUG();
2711
	}
2712
 
2713
	intel_fb = to_intel_framebuffer(fb);
2714
	obj = intel_fb->obj;
2715
 
2716
	/*
2717
	 * The stride is either expressed as a multiple of 64 bytes chunks for
2718
	 * linear buffers or in number of tiles for tiled buffers.
2719
	 */
2720
	switch (obj->tiling_mode) {
2721
	case I915_TILING_NONE:
2722
		stride = fb->pitches[0] >> 6;
2723
		break;
2724
	case I915_TILING_X:
2725
		plane_ctl |= PLANE_CTL_TILED_X;
2726
		stride = fb->pitches[0] >> 9;
2727
		break;
2728
	default:
2729
		BUG();
2730
	}
2731
 
2732
	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2733
	if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2734
		plane_ctl |= PLANE_CTL_ROTATE_180;
2735
 
2736
	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2737
 
2738
	DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2739
		      i915_gem_obj_ggtt_offset(obj),
2740
		      x, y, fb->width, fb->height,
2741
		      fb->pitches[0]);
2742
 
2743
	I915_WRITE(PLANE_POS(pipe, 0), 0);
2744
	I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2745
	I915_WRITE(PLANE_SIZE(pipe, 0),
2746
		   (intel_crtc->config.pipe_src_h - 1) << 16 |
2747
		   (intel_crtc->config.pipe_src_w - 1));
2748
	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2749
	I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2750
 
2751
	POSTING_READ(PLANE_SURF(pipe, 0));
2752
}
2753
 
2327 Serge 2754
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2755
static int
2756
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2757
			   int x, int y, enum mode_set_atomic state)
2758
{
2759
	struct drm_device *dev = crtc->dev;
2760
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2761
 
2762
	if (dev_priv->display.disable_fbc)
2763
		dev_priv->display.disable_fbc(dev);
2764
 
5060 serge 2765
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2766
 
2767
	return 0;
3031 serge 2768
}
2769
 
2770
#if 0
5354 serge 2771
static void intel_complete_page_flips(struct drm_device *dev)
4104 Serge 2772
{
2773
	struct drm_crtc *crtc;
2774
 
5060 serge 2775
	for_each_crtc(dev, crtc) {
4104 Serge 2776
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2777
		enum plane plane = intel_crtc->plane;
2778
 
2779
		intel_prepare_page_flip(dev, plane);
2780
		intel_finish_page_flip_plane(dev, plane);
2781
	}
5354 serge 2782
}
4104 Serge 2783
 
5354 serge 2784
static void intel_update_primary_planes(struct drm_device *dev)
2785
{
2786
	struct drm_i915_private *dev_priv = dev->dev_private;
2787
	struct drm_crtc *crtc;
2788
 
5060 serge 2789
	for_each_crtc(dev, crtc) {
4104 Serge 2790
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2791
 
5060 serge 2792
		drm_modeset_lock(&crtc->mutex, NULL);
4560 Serge 2793
		/*
2794
		 * FIXME: Once we have proper support for primary planes (and
2795
		 * disabling them without disabling the entire crtc) allow again
5060 serge 2796
		 * a NULL crtc->primary->fb.
4560 Serge 2797
		 */
5060 serge 2798
		if (intel_crtc->active && crtc->primary->fb)
2799
			dev_priv->display.update_primary_plane(crtc,
2800
							       crtc->primary->fb,
2801
							       crtc->x,
2802
							       crtc->y);
2803
		drm_modeset_unlock(&crtc->mutex);
4104 Serge 2804
	}
2805
}
2806
 
5354 serge 2807
void intel_prepare_reset(struct drm_device *dev)
2808
{
2809
	struct drm_i915_private *dev_priv = to_i915(dev);
2810
	struct intel_crtc *crtc;
2811
 
2812
	/* no reset support for gen2 */
2813
	if (IS_GEN2(dev))
2814
		return;
2815
 
2816
	/* reset doesn't touch the display */
2817
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2818
		return;
2819
 
2820
	drm_modeset_lock_all(dev);
2821
 
2822
	/*
2823
	 * Disabling the crtcs gracefully seems nicer. Also the
2824
	 * g33 docs say we should at least disable all the planes.
2825
	 */
2826
	for_each_intel_crtc(dev, crtc) {
2827
		if (crtc->active)
2828
			dev_priv->display.crtc_disable(&crtc->base);
2829
	}
2830
}
2831
 
2832
void intel_finish_reset(struct drm_device *dev)
2833
{
2834
	struct drm_i915_private *dev_priv = to_i915(dev);
2835
 
2836
	/*
2837
	 * Flips in the rings will be nuked by the reset,
2838
	 * so complete all pending flips so that user space
2839
	 * will get its events and not get stuck.
2840
	 */
2841
	intel_complete_page_flips(dev);
2842
 
2843
	/* no reset support for gen2 */
2844
	if (IS_GEN2(dev))
2845
		return;
2846
 
2847
	/* reset doesn't touch the display */
2848
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2849
		/*
2850
		 * Flips in the rings have been nuked by the reset,
2851
		 * so update the base address of all primary
2852
		 * planes to the the last fb to make sure we're
2853
		 * showing the correct fb after a reset.
2854
		 */
2855
		intel_update_primary_planes(dev);
2856
		return;
2857
	}
2858
 
2859
	/*
2860
	 * The display has been reset as well,
2861
	 * so need a full re-initialization.
2862
	 */
2863
	intel_runtime_pm_disable_interrupts(dev_priv);
2864
	intel_runtime_pm_enable_interrupts(dev_priv);
2865
 
2866
	intel_modeset_init_hw(dev);
2867
 
2868
	spin_lock_irq(&dev_priv->irq_lock);
2869
	if (dev_priv->display.hpd_irq_setup)
2870
		dev_priv->display.hpd_irq_setup(dev);
2871
	spin_unlock_irq(&dev_priv->irq_lock);
2872
 
2873
	intel_modeset_setup_hw_state(dev, true);
2874
 
2875
	intel_hpd_init(dev_priv);
2876
 
2877
	drm_modeset_unlock_all(dev);
2878
}
2879
 
3031 serge 2880
static int
2881
intel_finish_fb(struct drm_framebuffer *old_fb)
2882
{
5060 serge 2883
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3031 serge 2884
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2885
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 2886
	int ret;
2887
 
3031 serge 2888
	/* Big Hammer, we also need to ensure that any pending
2889
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2890
	 * current scanout is retired before unpinning the old
2891
	 * framebuffer.
2892
	 *
2893
	 * This should only fail upon a hung GPU, in which case we
2894
	 * can safely continue.
2895
	 */
2896
	dev_priv->mm.interruptible = false;
2897
	ret = i915_gem_object_finish_gpu(obj);
2898
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 2899
 
3031 serge 2900
	return ret;
2327 Serge 2901
}
4104 Serge 2902
 
5060 serge 2903
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
4104 Serge 2904
{
2905
	struct drm_device *dev = crtc->dev;
5060 serge 2906
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2907
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2908
	bool pending;
4104 Serge 2909
 
5060 serge 2910
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2911
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2912
		return false;
4104 Serge 2913
 
5354 serge 2914
	spin_lock_irq(&dev->event_lock);
5060 serge 2915
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
5354 serge 2916
	spin_unlock_irq(&dev->event_lock);
4104 Serge 2917
 
5060 serge 2918
	return pending;
4104 Serge 2919
}
3031 serge 2920
#endif
2327 Serge 2921
 
5354 serge 2922
static void intel_update_pipe_size(struct intel_crtc *crtc)
2923
{
2924
	struct drm_device *dev = crtc->base.dev;
2925
	struct drm_i915_private *dev_priv = dev->dev_private;
2926
	const struct drm_display_mode *adjusted_mode;
2927
 
2928
	if (!i915.fastboot)
2929
		return;
2930
 
2931
	/*
2932
	 * Update pipe size and adjust fitter if needed: the reason for this is
2933
	 * that in compute_mode_changes we check the native mode (not the pfit
2934
	 * mode) to see if we can flip rather than do a full mode set. In the
2935
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2936
	 * pfit state, we'll end up with a big fb scanned out into the wrong
2937
	 * sized surface.
2938
	 *
2939
	 * To fix this properly, we need to hoist the checks up into
2940
	 * compute_mode_changes (or above), check the actual pfit state and
2941
	 * whether the platform allows pfit disable with pipe active, and only
2942
	 * then update the pipesrc and pfit state, even on the flip path.
2943
	 */
2944
 
2945
	adjusted_mode = &crtc->config.adjusted_mode;
2946
 
2947
	I915_WRITE(PIPESRC(crtc->pipe),
2948
		   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2949
		   (adjusted_mode->crtc_vdisplay - 1));
2950
	if (!crtc->config.pch_pfit.enabled &&
2951
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2952
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2953
		I915_WRITE(PF_CTL(crtc->pipe), 0);
2954
		I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2955
		I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2956
	}
2957
	crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2958
	crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2959
}
2960
 
2327 Serge 2961
static int
2962
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
3031 serge 2963
		    struct drm_framebuffer *fb)
2327 Serge 2964
{
2965
	struct drm_device *dev = crtc->dev;
3031 serge 2966
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2967
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2968
	enum pipe pipe = intel_crtc->pipe;
2969
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2970
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2342 Serge 2971
	int ret;
2327 Serge 2972
 
5060 serge 2973
 
2327 Serge 2974
	/* no fb bound */
3031 serge 2975
	if (!fb) {
2327 Serge 2976
		DRM_ERROR("No FB bound\n");
2977
		return 0;
2978
	}
2979
 
3746 Serge 2980
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
4104 Serge 2981
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2982
			  plane_name(intel_crtc->plane),
3746 Serge 2983
				INTEL_INFO(dev)->num_pipes);
2327 Serge 2984
		return -EINVAL;
2985
	}
2986
 
2987
	mutex_lock(&dev->struct_mutex);
5354 serge 2988
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
5060 serge 2989
	if (ret == 0)
5354 serge 2990
		i915_gem_track_fb(old_obj, intel_fb_obj(fb),
5060 serge 2991
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2992
	mutex_unlock(&dev->struct_mutex);
4280 Serge 2993
    if (ret != 0) {
2994
       DRM_ERROR("pin & fence failed\n");
2995
       return ret;
2996
    }
2327 Serge 2997
 
5060 serge 2998
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2327 Serge 2999
 
5060 serge 3000
	if (intel_crtc->active)
3001
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
3002
 
3003
	crtc->primary->fb = fb;
3031 serge 3004
	crtc->x = x;
3005
	crtc->y = y;
3006
 
3007
	if (old_fb) {
4104 Serge 3008
		if (intel_crtc->active && old_fb != fb)
3031 serge 3009
		intel_wait_for_vblank(dev, intel_crtc->pipe);
5060 serge 3010
		mutex_lock(&dev->struct_mutex);
3011
		intel_unpin_fb_obj(old_obj);
3012
		mutex_unlock(&dev->struct_mutex);
3031 serge 3013
	}
3014
 
5060 serge 3015
	mutex_lock(&dev->struct_mutex);
3031 serge 3016
	intel_update_fbc(dev);
2336 Serge 3017
	mutex_unlock(&dev->struct_mutex);
2327 Serge 3018
 
2336 Serge 3019
    return 0;
2327 Serge 3020
}
3021
 
3022
static void intel_fdi_normal_train(struct drm_crtc *crtc)
3023
{
3024
	struct drm_device *dev = crtc->dev;
3025
	struct drm_i915_private *dev_priv = dev->dev_private;
3026
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3027
	int pipe = intel_crtc->pipe;
3028
	u32 reg, temp;
3029
 
3030
	/* enable normal train */
3031
	reg = FDI_TX_CTL(pipe);
3032
	temp = I915_READ(reg);
3033
	if (IS_IVYBRIDGE(dev)) {
3034
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3035
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3036
	} else {
3037
		temp &= ~FDI_LINK_TRAIN_NONE;
3038
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3039
	}
3040
	I915_WRITE(reg, temp);
3041
 
3042
	reg = FDI_RX_CTL(pipe);
3043
	temp = I915_READ(reg);
3044
	if (HAS_PCH_CPT(dev)) {
3045
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3046
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3047
	} else {
3048
		temp &= ~FDI_LINK_TRAIN_NONE;
3049
		temp |= FDI_LINK_TRAIN_NONE;
3050
	}
3051
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3052
 
3053
	/* wait one idle pattern time */
3054
	POSTING_READ(reg);
3055
	udelay(1000);
3056
 
3057
	/* IVB wants error correction enabled */
3058
	if (IS_IVYBRIDGE(dev))
3059
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3060
			   FDI_FE_ERRC_ENABLE);
3061
}
3062
 
4280 Serge 3063
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
4104 Serge 3064
{
4280 Serge 3065
	return crtc->base.enabled && crtc->active &&
3066
		crtc->config.has_pch_encoder;
4104 Serge 3067
}
3068
 
3243 Serge 3069
static void ivb_modeset_global_resources(struct drm_device *dev)
2327 Serge 3070
{
3071
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3072
	struct intel_crtc *pipe_B_crtc =
3073
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
3074
	struct intel_crtc *pipe_C_crtc =
3075
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
3076
	uint32_t temp;
2327 Serge 3077
 
4104 Serge 3078
	/*
3079
	 * When everything is off disable fdi C so that we could enable fdi B
3080
	 * with all lanes. Note that we don't care about enabled pipes without
3081
	 * an enabled pch encoder.
3082
	 */
3083
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
3084
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
3243 Serge 3085
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3086
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3087
 
3088
		temp = I915_READ(SOUTH_CHICKEN1);
3089
		temp &= ~FDI_BC_BIFURCATION_SELECT;
3090
		DRM_DEBUG_KMS("disabling fdi C rx\n");
3091
		I915_WRITE(SOUTH_CHICKEN1, temp);
3092
	}
2327 Serge 3093
}
3094
 
3095
/* The FDI link training functions for ILK/Ibexpeak. */
3096
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3097
{
3098
    struct drm_device *dev = crtc->dev;
3099
    struct drm_i915_private *dev_priv = dev->dev_private;
3100
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3101
    int pipe = intel_crtc->pipe;
3102
    u32 reg, temp, tries;
3103
 
5060 serge 3104
	/* FDI needs bits from pipe first */
2327 Serge 3105
    assert_pipe_enabled(dev_priv, pipe);
3106
 
3107
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3108
       for train result */
3109
    reg = FDI_RX_IMR(pipe);
3110
    temp = I915_READ(reg);
3111
    temp &= ~FDI_RX_SYMBOL_LOCK;
3112
    temp &= ~FDI_RX_BIT_LOCK;
3113
    I915_WRITE(reg, temp);
3114
    I915_READ(reg);
3115
    udelay(150);
3116
 
3117
    /* enable CPU FDI TX and PCH FDI RX */
3118
    reg = FDI_TX_CTL(pipe);
3119
    temp = I915_READ(reg);
4104 Serge 3120
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3121
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3122
    temp &= ~FDI_LINK_TRAIN_NONE;
3123
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3124
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3125
 
3126
    reg = FDI_RX_CTL(pipe);
3127
    temp = I915_READ(reg);
3128
    temp &= ~FDI_LINK_TRAIN_NONE;
3129
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3130
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3131
 
3132
    POSTING_READ(reg);
3133
    udelay(150);
3134
 
3135
    /* Ironlake workaround, enable clock pointer after FDI enable*/
3136
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3137
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3138
               FDI_RX_PHASE_SYNC_POINTER_EN);
3139
 
3140
    reg = FDI_RX_IIR(pipe);
3141
    for (tries = 0; tries < 5; tries++) {
3142
        temp = I915_READ(reg);
3143
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3144
 
3145
        if ((temp & FDI_RX_BIT_LOCK)) {
3146
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3147
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3148
            break;
3149
        }
3150
    }
3151
    if (tries == 5)
3152
        DRM_ERROR("FDI train 1 fail!\n");
3153
 
3154
    /* Train 2 */
3155
    reg = FDI_TX_CTL(pipe);
3156
    temp = I915_READ(reg);
3157
    temp &= ~FDI_LINK_TRAIN_NONE;
3158
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3159
    I915_WRITE(reg, temp);
3160
 
3161
    reg = FDI_RX_CTL(pipe);
3162
    temp = I915_READ(reg);
3163
    temp &= ~FDI_LINK_TRAIN_NONE;
3164
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3165
    I915_WRITE(reg, temp);
3166
 
3167
    POSTING_READ(reg);
3168
    udelay(150);
3169
 
3170
    reg = FDI_RX_IIR(pipe);
3171
    for (tries = 0; tries < 5; tries++) {
3172
        temp = I915_READ(reg);
3173
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3174
 
3175
        if (temp & FDI_RX_SYMBOL_LOCK) {
3176
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3177
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3178
            break;
3179
        }
3180
    }
3181
    if (tries == 5)
3182
        DRM_ERROR("FDI train 2 fail!\n");
3183
 
3184
    DRM_DEBUG_KMS("FDI train done\n");
3185
 
3186
}
3187
 
2342 Serge 3188
static const int snb_b_fdi_train_param[] = {
2327 Serge 3189
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3190
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3191
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3192
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3193
};
3194
 
3195
/* The FDI link training functions for SNB/Cougarpoint. */
3196
static void gen6_fdi_link_train(struct drm_crtc *crtc)
3197
{
3198
    struct drm_device *dev = crtc->dev;
3199
    struct drm_i915_private *dev_priv = dev->dev_private;
3200
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3201
    int pipe = intel_crtc->pipe;
3031 serge 3202
	u32 reg, temp, i, retry;
2327 Serge 3203
 
3204
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3205
       for train result */
3206
    reg = FDI_RX_IMR(pipe);
3207
    temp = I915_READ(reg);
3208
    temp &= ~FDI_RX_SYMBOL_LOCK;
3209
    temp &= ~FDI_RX_BIT_LOCK;
3210
    I915_WRITE(reg, temp);
3211
 
3212
    POSTING_READ(reg);
3213
    udelay(150);
3214
 
3215
    /* enable CPU FDI TX and PCH FDI RX */
3216
    reg = FDI_TX_CTL(pipe);
3217
    temp = I915_READ(reg);
4104 Serge 3218
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3219
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3220
    temp &= ~FDI_LINK_TRAIN_NONE;
3221
    temp |= FDI_LINK_TRAIN_PATTERN_1;
3222
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3223
    /* SNB-B */
3224
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3225
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3226
 
3243 Serge 3227
	I915_WRITE(FDI_RX_MISC(pipe),
3228
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3229
 
2327 Serge 3230
    reg = FDI_RX_CTL(pipe);
3231
    temp = I915_READ(reg);
3232
    if (HAS_PCH_CPT(dev)) {
3233
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3234
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3235
    } else {
3236
        temp &= ~FDI_LINK_TRAIN_NONE;
3237
        temp |= FDI_LINK_TRAIN_PATTERN_1;
3238
    }
3239
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3240
 
3241
    POSTING_READ(reg);
3242
    udelay(150);
3243
 
2342 Serge 3244
	for (i = 0; i < 4; i++) {
2327 Serge 3245
        reg = FDI_TX_CTL(pipe);
3246
        temp = I915_READ(reg);
3247
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3248
        temp |= snb_b_fdi_train_param[i];
3249
        I915_WRITE(reg, temp);
3250
 
3251
        POSTING_READ(reg);
3252
        udelay(500);
3253
 
3031 serge 3254
		for (retry = 0; retry < 5; retry++) {
2327 Serge 3255
        reg = FDI_RX_IIR(pipe);
3256
        temp = I915_READ(reg);
3257
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3258
        if (temp & FDI_RX_BIT_LOCK) {
3259
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3260
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3261
            break;
3262
        }
3031 serge 3263
			udelay(50);
3264
		}
3265
		if (retry < 5)
3266
			break;
2327 Serge 3267
    }
3268
    if (i == 4)
3269
        DRM_ERROR("FDI train 1 fail!\n");
3270
 
3271
    /* Train 2 */
3272
    reg = FDI_TX_CTL(pipe);
3273
    temp = I915_READ(reg);
3274
    temp &= ~FDI_LINK_TRAIN_NONE;
3275
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3276
    if (IS_GEN6(dev)) {
3277
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3278
        /* SNB-B */
3279
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3280
    }
3281
    I915_WRITE(reg, temp);
3282
 
3283
    reg = FDI_RX_CTL(pipe);
3284
    temp = I915_READ(reg);
3285
    if (HAS_PCH_CPT(dev)) {
3286
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3287
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3288
    } else {
3289
        temp &= ~FDI_LINK_TRAIN_NONE;
3290
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3291
    }
3292
    I915_WRITE(reg, temp);
3293
 
3294
    POSTING_READ(reg);
3295
    udelay(150);
3296
 
2342 Serge 3297
	for (i = 0; i < 4; i++) {
2327 Serge 3298
        reg = FDI_TX_CTL(pipe);
3299
        temp = I915_READ(reg);
3300
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3301
        temp |= snb_b_fdi_train_param[i];
3302
        I915_WRITE(reg, temp);
3303
 
3304
        POSTING_READ(reg);
3305
        udelay(500);
3306
 
3031 serge 3307
		for (retry = 0; retry < 5; retry++) {
2327 Serge 3308
        reg = FDI_RX_IIR(pipe);
3309
        temp = I915_READ(reg);
3310
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3311
        if (temp & FDI_RX_SYMBOL_LOCK) {
3312
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3313
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3314
            break;
3315
        }
3031 serge 3316
			udelay(50);
3317
		}
3318
		if (retry < 5)
3319
			break;
2327 Serge 3320
    }
3321
    if (i == 4)
3322
        DRM_ERROR("FDI train 2 fail!\n");
3323
 
3324
    DRM_DEBUG_KMS("FDI train done.\n");
3325
}
3326
 
3327
/* Manual link training for Ivy Bridge A0 parts */
3328
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3329
{
3330
    struct drm_device *dev = crtc->dev;
3331
    struct drm_i915_private *dev_priv = dev->dev_private;
3332
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3333
    int pipe = intel_crtc->pipe;
4104 Serge 3334
	u32 reg, temp, i, j;
2327 Serge 3335
 
3336
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3337
       for train result */
3338
    reg = FDI_RX_IMR(pipe);
3339
    temp = I915_READ(reg);
3340
    temp &= ~FDI_RX_SYMBOL_LOCK;
3341
    temp &= ~FDI_RX_BIT_LOCK;
3342
    I915_WRITE(reg, temp);
3343
 
3344
    POSTING_READ(reg);
3345
    udelay(150);
3346
 
3243 Serge 3347
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3348
		      I915_READ(FDI_RX_IIR(pipe)));
3349
 
4104 Serge 3350
	/* Try each vswing and preemphasis setting twice before moving on */
3351
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3352
		/* disable first in case we need to retry */
3353
		reg = FDI_TX_CTL(pipe);
3354
		temp = I915_READ(reg);
3355
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3356
		temp &= ~FDI_TX_ENABLE;
3357
		I915_WRITE(reg, temp);
3358
 
3359
		reg = FDI_RX_CTL(pipe);
3360
		temp = I915_READ(reg);
3361
		temp &= ~FDI_LINK_TRAIN_AUTO;
3362
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3363
		temp &= ~FDI_RX_ENABLE;
3364
		I915_WRITE(reg, temp);
3365
 
2327 Serge 3366
    /* enable CPU FDI TX and PCH FDI RX */
3367
    reg = FDI_TX_CTL(pipe);
3368
    temp = I915_READ(reg);
4104 Serge 3369
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3370
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3371
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3372
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 3373
		temp |= snb_b_fdi_train_param[j/2];
2342 Serge 3374
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 3375
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3376
 
3243 Serge 3377
	I915_WRITE(FDI_RX_MISC(pipe),
3378
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3379
 
2327 Serge 3380
    reg = FDI_RX_CTL(pipe);
3381
    temp = I915_READ(reg);
3382
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 3383
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 3384
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3385
 
3386
    POSTING_READ(reg);
4104 Serge 3387
		udelay(1); /* should be 0.5us */
2327 Serge 3388
 
2342 Serge 3389
	for (i = 0; i < 4; i++) {
2327 Serge 3390
        reg = FDI_RX_IIR(pipe);
3391
        temp = I915_READ(reg);
3392
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3393
 
3394
        if (temp & FDI_RX_BIT_LOCK ||
3395
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3396
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 3397
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3398
					      i);
2327 Serge 3399
            break;
3400
        }
4104 Serge 3401
			udelay(1); /* should be 0.5us */
3402
		}
3403
		if (i == 4) {
3404
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3405
			continue;
2327 Serge 3406
    }
3407
 
3408
    /* Train 2 */
3409
    reg = FDI_TX_CTL(pipe);
3410
    temp = I915_READ(reg);
3411
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3412
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3413
    I915_WRITE(reg, temp);
3414
 
3415
    reg = FDI_RX_CTL(pipe);
3416
    temp = I915_READ(reg);
3417
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3418
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3419
    I915_WRITE(reg, temp);
3420
 
3421
    POSTING_READ(reg);
4104 Serge 3422
		udelay(2); /* should be 1.5us */
2327 Serge 3423
 
2342 Serge 3424
	for (i = 0; i < 4; i++) {
2327 Serge 3425
        reg = FDI_RX_IIR(pipe);
3426
        temp = I915_READ(reg);
3427
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3428
 
4104 Serge 3429
			if (temp & FDI_RX_SYMBOL_LOCK ||
3430
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2327 Serge 3431
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 3432
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3433
					      i);
3434
				goto train_done;
2327 Serge 3435
        }
4104 Serge 3436
			udelay(2); /* should be 1.5us */
2327 Serge 3437
    }
3438
    if (i == 4)
4104 Serge 3439
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3440
	}
2327 Serge 3441
 
4104 Serge 3442
train_done:
2327 Serge 3443
    DRM_DEBUG_KMS("FDI train done.\n");
3444
}
3445
 
3031 serge 3446
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 3447
{
3031 serge 3448
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 3449
	struct drm_i915_private *dev_priv = dev->dev_private;
3450
	int pipe = intel_crtc->pipe;
3451
	u32 reg, temp;
3452
 
3453
 
3454
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3455
	reg = FDI_RX_CTL(pipe);
3456
	temp = I915_READ(reg);
4104 Serge 3457
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3458
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3480 Serge 3459
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3460
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3461
 
3462
	POSTING_READ(reg);
3463
	udelay(200);
3464
 
3465
	/* Switch from Rawclk to PCDclk */
3466
	temp = I915_READ(reg);
3467
	I915_WRITE(reg, temp | FDI_PCDCLK);
3468
 
3469
	POSTING_READ(reg);
3470
	udelay(200);
3471
 
3472
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3473
	reg = FDI_TX_CTL(pipe);
3474
	temp = I915_READ(reg);
3475
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3476
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3477
 
3478
		POSTING_READ(reg);
3479
		udelay(100);
3480
	}
3481
}
3482
 
3031 serge 3483
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3484
{
3485
	struct drm_device *dev = intel_crtc->base.dev;
3486
	struct drm_i915_private *dev_priv = dev->dev_private;
3487
	int pipe = intel_crtc->pipe;
3488
	u32 reg, temp;
3489
 
3490
	/* Switch from PCDclk to Rawclk */
3491
	reg = FDI_RX_CTL(pipe);
3492
	temp = I915_READ(reg);
3493
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3494
 
3495
	/* Disable CPU FDI TX PLL */
3496
	reg = FDI_TX_CTL(pipe);
3497
	temp = I915_READ(reg);
3498
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3499
 
3500
	POSTING_READ(reg);
3501
	udelay(100);
3502
 
3503
	reg = FDI_RX_CTL(pipe);
3504
	temp = I915_READ(reg);
3505
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3506
 
3507
	/* Wait for the clocks to turn off. */
3508
	POSTING_READ(reg);
3509
	udelay(100);
3510
}
3511
 
2327 Serge 3512
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3513
{
3514
	struct drm_device *dev = crtc->dev;
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3516
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3517
	int pipe = intel_crtc->pipe;
3518
	u32 reg, temp;
3519
 
3520
	/* disable CPU FDI tx and PCH FDI rx */
3521
	reg = FDI_TX_CTL(pipe);
3522
	temp = I915_READ(reg);
3523
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3524
	POSTING_READ(reg);
3525
 
3526
	reg = FDI_RX_CTL(pipe);
3527
	temp = I915_READ(reg);
3528
	temp &= ~(0x7 << 16);
3480 Serge 3529
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3530
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3531
 
3532
	POSTING_READ(reg);
3533
	udelay(100);
3534
 
3535
	/* Ironlake workaround, disable clock pointer after downing FDI */
5060 serge 3536
	if (HAS_PCH_IBX(dev))
2327 Serge 3537
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3538
 
3539
	/* still set train pattern 1 */
3540
	reg = FDI_TX_CTL(pipe);
3541
	temp = I915_READ(reg);
3542
	temp &= ~FDI_LINK_TRAIN_NONE;
3543
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3544
	I915_WRITE(reg, temp);
3545
 
3546
	reg = FDI_RX_CTL(pipe);
3547
	temp = I915_READ(reg);
3548
	if (HAS_PCH_CPT(dev)) {
3549
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3550
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3551
	} else {
3552
		temp &= ~FDI_LINK_TRAIN_NONE;
3553
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3554
	}
3555
	/* BPC in FDI rx is consistent with that in PIPECONF */
3556
	temp &= ~(0x07 << 16);
3480 Serge 3557
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3558
	I915_WRITE(reg, temp);
3559
 
3560
	POSTING_READ(reg);
3561
	udelay(100);
3562
}
3563
 
5060 serge 3564
bool intel_has_pending_fb_unpin(struct drm_device *dev)
2327 Serge 3565
{
5060 serge 3566
	struct intel_crtc *crtc;
2327 Serge 3567
 
5060 serge 3568
	/* Note that we don't need to be called with mode_config.lock here
3569
	 * as our list of CRTC objects is static for the lifetime of the
3570
	 * device and so cannot disappear as we iterate. Similarly, we can
3571
	 * happily treat the predicates as racy, atomic checks as userspace
3572
	 * cannot claim and pin a new fb without at least acquring the
3573
	 * struct_mutex and so serialising with us.
3574
	 */
3575
	for_each_intel_crtc(dev, crtc) {
3576
		if (atomic_read(&crtc->unpin_work_count) == 0)
3577
			continue;
2327 Serge 3578
 
5060 serge 3579
		if (crtc->unpin_work)
3580
			intel_wait_for_vblank(dev, crtc->pipe);
3031 serge 3581
 
5060 serge 3582
		return true;
3583
	}
3584
 
3585
	return false;
2327 Serge 3586
}
3587
 
3031 serge 3588
#if 0
5060 serge 3589
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2327 Serge 3590
{
3031 serge 3591
	struct drm_device *dev = crtc->dev;
3592
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 3593
 
3480 Serge 3594
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5354 serge 3595
	if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3596
				       !intel_crtc_has_pending_flip(crtc),
3597
				       60*HZ) == 0)) {
3598
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3480 Serge 3599
 
5354 serge 3600
		spin_lock_irq(&dev->event_lock);
3601
		if (intel_crtc->unpin_work) {
3602
			WARN_ONCE(1, "Removing stuck page flip\n");
3603
			page_flip_completed(intel_crtc);
3604
		}
3605
		spin_unlock_irq(&dev->event_lock);
3606
	}
3031 serge 3607
 
5354 serge 3608
	if (crtc->primary->fb) {
3031 serge 3609
	mutex_lock(&dev->struct_mutex);
5060 serge 3610
	intel_finish_fb(crtc->primary->fb);
3031 serge 3611
	mutex_unlock(&dev->struct_mutex);
5354 serge 3612
	}
2327 Serge 3613
}
3031 serge 3614
#endif
2327 Serge 3615
 
3031 serge 3616
/* Program iCLKIP clock to the desired frequency */
3617
static void lpt_program_iclkip(struct drm_crtc *crtc)
3618
{
3619
	struct drm_device *dev = crtc->dev;
3620
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3621
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3031 serge 3622
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3623
	u32 temp;
3624
 
3480 Serge 3625
	mutex_lock(&dev_priv->dpio_lock);
3626
 
3031 serge 3627
	/* It is necessary to ungate the pixclk gate prior to programming
3628
	 * the divisors, and gate it back when it is done.
3629
	 */
3630
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3631
 
3632
	/* Disable SSCCTL */
3633
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 3634
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3635
				SBI_SSCCTL_DISABLE,
3636
			SBI_ICLK);
3031 serge 3637
 
3638
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 3639
	if (clock == 20000) {
3031 serge 3640
		auxdiv = 1;
3641
		divsel = 0x41;
3642
		phaseinc = 0x20;
3643
	} else {
3644
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 3645
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3646
		 * divisors, it is necessary to divide one by another, so we
3031 serge 3647
		 * convert the virtual clock precision to KHz here for higher
3648
		 * precision.
3649
		 */
3650
		u32 iclk_virtual_root_freq = 172800 * 1000;
3651
		u32 iclk_pi_range = 64;
3652
		u32 desired_divisor, msb_divisor_value, pi_value;
3653
 
4560 Serge 3654
		desired_divisor = (iclk_virtual_root_freq / clock);
3031 serge 3655
		msb_divisor_value = desired_divisor / iclk_pi_range;
3656
		pi_value = desired_divisor % iclk_pi_range;
3657
 
3658
		auxdiv = 0;
3659
		divsel = msb_divisor_value - 2;
3660
		phaseinc = pi_value;
3661
	}
3662
 
3663
	/* This should not happen with any sane values */
3664
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3665
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3666
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3667
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3668
 
3669
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 3670
			clock,
3031 serge 3671
			auxdiv,
3672
			divsel,
3673
			phasedir,
3674
			phaseinc);
3675
 
3676
	/* Program SSCDIVINTPHASE6 */
3243 Serge 3677
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 3678
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3679
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3680
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3681
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3682
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3683
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 3684
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 3685
 
3686
	/* Program SSCAUXDIV */
3243 Serge 3687
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 3688
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3689
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 3690
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 3691
 
3692
	/* Enable modulator and associated divider */
3243 Serge 3693
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 3694
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 3695
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 3696
 
3697
	/* Wait for initialization time */
3698
	udelay(24);
3699
 
3700
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3480 Serge 3701
 
3702
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 3703
}
3704
 
4104 Serge 3705
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3706
						enum pipe pch_transcoder)
3707
{
3708
	struct drm_device *dev = crtc->base.dev;
3709
	struct drm_i915_private *dev_priv = dev->dev_private;
3710
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3711
 
3712
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3713
		   I915_READ(HTOTAL(cpu_transcoder)));
3714
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3715
		   I915_READ(HBLANK(cpu_transcoder)));
3716
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3717
		   I915_READ(HSYNC(cpu_transcoder)));
3718
 
3719
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3720
		   I915_READ(VTOTAL(cpu_transcoder)));
3721
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3722
		   I915_READ(VBLANK(cpu_transcoder)));
3723
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3724
		   I915_READ(VSYNC(cpu_transcoder)));
3725
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3726
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3727
}
3728
 
4280 Serge 3729
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3730
{
3731
	struct drm_i915_private *dev_priv = dev->dev_private;
3732
	uint32_t temp;
3733
 
3734
	temp = I915_READ(SOUTH_CHICKEN1);
3735
	if (temp & FDI_BC_BIFURCATION_SELECT)
3736
		return;
3737
 
3738
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3739
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3740
 
3741
	temp |= FDI_BC_BIFURCATION_SELECT;
3742
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3743
	I915_WRITE(SOUTH_CHICKEN1, temp);
3744
	POSTING_READ(SOUTH_CHICKEN1);
3745
}
3746
 
3747
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3748
{
3749
	struct drm_device *dev = intel_crtc->base.dev;
3750
	struct drm_i915_private *dev_priv = dev->dev_private;
3751
 
3752
	switch (intel_crtc->pipe) {
3753
	case PIPE_A:
3754
		break;
3755
	case PIPE_B:
3756
		if (intel_crtc->config.fdi_lanes > 2)
3757
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3758
		else
3759
			cpt_enable_fdi_bc_bifurcation(dev);
3760
 
3761
		break;
3762
	case PIPE_C:
3763
		cpt_enable_fdi_bc_bifurcation(dev);
3764
 
3765
		break;
3766
	default:
3767
		BUG();
3768
	}
3769
}
3770
 
2327 Serge 3771
/*
3772
 * Enable PCH resources required for PCH ports:
3773
 *   - PCH PLLs
3774
 *   - FDI training & RX/TX
3775
 *   - update transcoder timings
3776
 *   - DP transcoding bits
3777
 *   - transcoder
3778
 */
3779
static void ironlake_pch_enable(struct drm_crtc *crtc)
3780
{
3781
	struct drm_device *dev = crtc->dev;
3782
	struct drm_i915_private *dev_priv = dev->dev_private;
3783
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3784
	int pipe = intel_crtc->pipe;
3031 serge 3785
	u32 reg, temp;
2327 Serge 3786
 
4104 Serge 3787
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 3788
 
4280 Serge 3789
	if (IS_IVYBRIDGE(dev))
3790
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3791
 
3243 Serge 3792
	/* Write the TU size bits before fdi link training, so that error
3793
	 * detection works. */
3794
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3795
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3796
 
2327 Serge 3797
	/* For PCH output, training FDI link */
3798
	dev_priv->display.fdi_link_train(crtc);
3799
 
4104 Serge 3800
	/* We need to program the right clock selection before writing the pixel
3801
	 * mutliplier into the DPLL. */
3243 Serge 3802
	if (HAS_PCH_CPT(dev)) {
3031 serge 3803
		u32 sel;
2342 Serge 3804
 
2327 Serge 3805
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3806
		temp |= TRANS_DPLL_ENABLE(pipe);
3807
		sel = TRANS_DPLLB_SEL(pipe);
3808
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 3809
			temp |= sel;
3810
		else
3811
			temp &= ~sel;
2327 Serge 3812
		I915_WRITE(PCH_DPLL_SEL, temp);
3813
	}
3814
 
4104 Serge 3815
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3816
	 * transcoder, and we actually should do this to not upset any PCH
3817
	 * transcoder that already use the clock when we share it.
3818
	 *
3819
	 * Note that enable_shared_dpll tries to do the right thing, but
3820
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3821
	 * the right LVDS enable sequence. */
5060 serge 3822
	intel_enable_shared_dpll(intel_crtc);
4104 Serge 3823
 
2327 Serge 3824
	/* set transcoder timing, panel must allow it */
3825
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 3826
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 3827
 
3828
	intel_fdi_normal_train(crtc);
3829
 
3830
	/* For PCH DP, enable TRANS_DP_CTL */
5354 serge 3831
	if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
3480 Serge 3832
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2327 Serge 3833
		reg = TRANS_DP_CTL(pipe);
3834
		temp = I915_READ(reg);
3835
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3836
			  TRANS_DP_SYNC_MASK |
3837
			  TRANS_DP_BPC_MASK);
3838
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3839
			 TRANS_DP_ENH_FRAMING);
3840
		temp |= bpc << 9; /* same format but at 11:9 */
3841
 
3842
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3843
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3844
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3845
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3846
 
3847
		switch (intel_trans_dp_port_sel(crtc)) {
3848
		case PCH_DP_B:
3849
			temp |= TRANS_DP_PORT_SEL_B;
3850
			break;
3851
		case PCH_DP_C:
3852
			temp |= TRANS_DP_PORT_SEL_C;
3853
			break;
3854
		case PCH_DP_D:
3855
			temp |= TRANS_DP_PORT_SEL_D;
3856
			break;
3857
		default:
3243 Serge 3858
			BUG();
2327 Serge 3859
		}
3860
 
3861
		I915_WRITE(reg, temp);
3862
	}
3863
 
3243 Serge 3864
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 3865
}
3866
 
3243 Serge 3867
static void lpt_pch_enable(struct drm_crtc *crtc)
3868
{
3869
	struct drm_device *dev = crtc->dev;
3870
	struct drm_i915_private *dev_priv = dev->dev_private;
3871
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 3872
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3873
 
4104 Serge 3874
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 3875
 
3876
	lpt_program_iclkip(crtc);
3877
 
3878
	/* Set transcoder timing. */
4104 Serge 3879
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 3880
 
3881
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3882
}
3883
 
5060 serge 3884
void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 serge 3885
{
4104 Serge 3886
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3031 serge 3887
 
3888
	if (pll == NULL)
3889
		return;
3890
 
5354 serge 3891
	if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
3892
		WARN(1, "bad %s crtc mask\n", pll->name);
3031 serge 3893
		return;
3894
	}
3895
 
5354 serge 3896
	pll->config.crtc_mask &= ~(1 << crtc->pipe);
3897
	if (pll->config.crtc_mask == 0) {
4104 Serge 3898
		WARN_ON(pll->on);
3899
		WARN_ON(pll->active);
3900
	}
3901
 
3902
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3031 serge 3903
}
3904
 
5060 serge 3905
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3031 serge 3906
{
4104 Serge 3907
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
5354 serge 3908
	struct intel_shared_dpll *pll;
4104 Serge 3909
	enum intel_dpll_id i;
3031 serge 3910
 
3911
	if (HAS_PCH_IBX(dev_priv->dev)) {
3912
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 3913
		i = (enum intel_dpll_id) crtc->pipe;
3914
		pll = &dev_priv->shared_dplls[i];
3031 serge 3915
 
4104 Serge 3916
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3917
			      crtc->base.base.id, pll->name);
3031 serge 3918
 
5354 serge 3919
		WARN_ON(pll->new_config->crtc_mask);
5060 serge 3920
 
3031 serge 3921
		goto found;
3922
	}
3923
 
4104 Serge 3924
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3925
		pll = &dev_priv->shared_dplls[i];
3031 serge 3926
 
3927
		/* Only want to check enabled timings first */
5354 serge 3928
		if (pll->new_config->crtc_mask == 0)
3031 serge 3929
			continue;
3930
 
5354 serge 3931
		if (memcmp(&crtc->new_config->dpll_hw_state,
3932
			   &pll->new_config->hw_state,
3933
			   sizeof(pll->new_config->hw_state)) == 0) {
3934
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
3935
				      crtc->base.base.id, pll->name,
3936
				      pll->new_config->crtc_mask,
3937
				      pll->active);
3031 serge 3938
			goto found;
3939
		}
3940
	}
3941
 
3942
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 3943
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3944
		pll = &dev_priv->shared_dplls[i];
5354 serge 3945
		if (pll->new_config->crtc_mask == 0) {
4104 Serge 3946
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3947
				      crtc->base.base.id, pll->name);
3031 serge 3948
			goto found;
3949
		}
3950
	}
3951
 
3952
	return NULL;
3953
 
3954
found:
5354 serge 3955
	if (pll->new_config->crtc_mask == 0)
3956
		pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
5060 serge 3957
 
5354 serge 3958
	crtc->new_config->shared_dpll = i;
4104 Serge 3959
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3960
			 pipe_name(crtc->pipe));
3961
 
5354 serge 3962
	pll->new_config->crtc_mask |= 1 << crtc->pipe;
3031 serge 3963
 
3964
	return pll;
3965
}
3966
 
5354 serge 3967
/**
3968
 * intel_shared_dpll_start_config - start a new PLL staged config
3969
 * @dev_priv: DRM device
3970
 * @clear_pipes: mask of pipes that will have their PLLs freed
3971
 *
3972
 * Starts a new PLL staged config, copying the current config but
3973
 * releasing the references of pipes specified in clear_pipes.
3974
 */
3975
static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
3976
					  unsigned clear_pipes)
3977
{
3978
	struct intel_shared_dpll *pll;
3979
	enum intel_dpll_id i;
3980
 
3981
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3982
		pll = &dev_priv->shared_dplls[i];
3983
 
3984
		pll->new_config = kmemdup(&pll->config, sizeof pll->config,
3985
					  GFP_KERNEL);
3986
		if (!pll->new_config)
3987
			goto cleanup;
3988
 
3989
		pll->new_config->crtc_mask &= ~clear_pipes;
3990
	}
3991
 
3992
	return 0;
3993
 
3994
cleanup:
3995
	while (--i >= 0) {
3996
		pll = &dev_priv->shared_dplls[i];
3997
		kfree(pll->new_config);
3998
		pll->new_config = NULL;
3999
	}
4000
 
4001
	return -ENOMEM;
4002
}
4003
 
4004
static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
4005
{
4006
	struct intel_shared_dpll *pll;
4007
	enum intel_dpll_id i;
4008
 
4009
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4010
		pll = &dev_priv->shared_dplls[i];
4011
 
4012
		WARN_ON(pll->new_config == &pll->config);
4013
 
4014
		pll->config = *pll->new_config;
4015
		kfree(pll->new_config);
4016
		pll->new_config = NULL;
4017
	}
4018
}
4019
 
4020
static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
4021
{
4022
	struct intel_shared_dpll *pll;
4023
	enum intel_dpll_id i;
4024
 
4025
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4026
		pll = &dev_priv->shared_dplls[i];
4027
 
4028
		WARN_ON(pll->new_config == &pll->config);
4029
 
4030
		kfree(pll->new_config);
4031
		pll->new_config = NULL;
4032
	}
4033
}
4034
 
4104 Serge 4035
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
2342 Serge 4036
{
4037
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 4038
	int dslreg = PIPEDSL(pipe);
2342 Serge 4039
	u32 temp;
4040
 
4041
	temp = I915_READ(dslreg);
4042
	udelay(500);
4043
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4044
		if (wait_for(I915_READ(dslreg) != temp, 5))
4104 Serge 4045
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
2342 Serge 4046
	}
4047
}
4048
 
5354 serge 4049
static void skylake_pfit_enable(struct intel_crtc *crtc)
4050
{
4051
	struct drm_device *dev = crtc->base.dev;
4052
	struct drm_i915_private *dev_priv = dev->dev_private;
4053
	int pipe = crtc->pipe;
4054
 
4055
	if (crtc->config.pch_pfit.enabled) {
4056
		I915_WRITE(PS_CTL(pipe), PS_ENABLE);
4057
		I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4058
		I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4059
	}
4060
}
4061
 
4104 Serge 4062
static void ironlake_pfit_enable(struct intel_crtc *crtc)
4063
{
4064
	struct drm_device *dev = crtc->base.dev;
4065
	struct drm_i915_private *dev_priv = dev->dev_private;
4066
	int pipe = crtc->pipe;
4067
 
4068
	if (crtc->config.pch_pfit.enabled) {
4069
		/* Force use of hard-coded filter coefficients
4070
		 * as some pre-programmed values are broken,
4071
		 * e.g. x201.
4072
		 */
4073
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4074
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4075
						 PF_PIPE_SEL_IVB(pipe));
4076
		else
4077
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4078
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4079
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4080
	}
4081
}
4082
 
4083
static void intel_enable_planes(struct drm_crtc *crtc)
4084
{
4085
	struct drm_device *dev = crtc->dev;
4086
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5060 serge 4087
	struct drm_plane *plane;
4104 Serge 4088
	struct intel_plane *intel_plane;
4089
 
5060 serge 4090
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4091
		intel_plane = to_intel_plane(plane);
4104 Serge 4092
		if (intel_plane->pipe == pipe)
4093
			intel_plane_restore(&intel_plane->base);
5060 serge 4094
	}
4104 Serge 4095
}
4096
 
4097
static void intel_disable_planes(struct drm_crtc *crtc)
4098
{
4099
	struct drm_device *dev = crtc->dev;
4100
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5060 serge 4101
	struct drm_plane *plane;
4104 Serge 4102
	struct intel_plane *intel_plane;
4103
 
5060 serge 4104
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
4105
		intel_plane = to_intel_plane(plane);
4104 Serge 4106
		if (intel_plane->pipe == pipe)
4107
			intel_plane_disable(&intel_plane->base);
5060 serge 4108
	}
4104 Serge 4109
}
4110
 
4560 Serge 4111
void hsw_enable_ips(struct intel_crtc *crtc)
4112
{
5060 serge 4113
	struct drm_device *dev = crtc->base.dev;
4114
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 4115
 
4116
	if (!crtc->config.ips_enabled)
4117
		return;
4118
 
5060 serge 4119
	/* We can only enable IPS after we enable a plane and wait for a vblank */
4120
	intel_wait_for_vblank(dev, crtc->pipe);
4121
 
4560 Serge 4122
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4123
	if (IS_BROADWELL(dev)) {
4560 Serge 4124
		mutex_lock(&dev_priv->rps.hw_lock);
4125
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4126
		mutex_unlock(&dev_priv->rps.hw_lock);
4127
		/* Quoting Art Runyan: "its not safe to expect any particular
4128
		 * value in IPS_CTL bit 31 after enabling IPS through the
4129
		 * mailbox." Moreover, the mailbox may return a bogus state,
4130
		 * so we need to just enable it and continue on.
4131
		 */
4132
	} else {
4133
		I915_WRITE(IPS_CTL, IPS_ENABLE);
4134
		/* The bit only becomes 1 in the next vblank, so this wait here
4135
		 * is essentially intel_wait_for_vblank. If we don't have this
4136
		 * and don't wait for vblanks until the end of crtc_enable, then
4137
		 * the HW state readout code will complain that the expected
4138
		 * IPS_CTL value is not the one we read. */
4139
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4140
			DRM_ERROR("Timed out waiting for IPS enable\n");
4141
	}
4142
}
4143
 
4144
void hsw_disable_ips(struct intel_crtc *crtc)
4145
{
4146
	struct drm_device *dev = crtc->base.dev;
4147
	struct drm_i915_private *dev_priv = dev->dev_private;
4148
 
4149
	if (!crtc->config.ips_enabled)
4150
		return;
4151
 
4152
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 4153
	if (IS_BROADWELL(dev)) {
4560 Serge 4154
		mutex_lock(&dev_priv->rps.hw_lock);
4155
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4156
		mutex_unlock(&dev_priv->rps.hw_lock);
5060 serge 4157
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4158
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4159
			DRM_ERROR("Timed out waiting for IPS disable\n");
4560 Serge 4160
	} else {
4161
		I915_WRITE(IPS_CTL, 0);
4162
		POSTING_READ(IPS_CTL);
4163
	}
4164
 
4165
	/* We need to wait for a vblank before we can disable the plane. */
4166
	intel_wait_for_vblank(dev, crtc->pipe);
4167
}
4168
 
4169
/** Loads the palette/gamma unit for the CRTC with the prepared values */
4170
static void intel_crtc_load_lut(struct drm_crtc *crtc)
4171
{
4172
	struct drm_device *dev = crtc->dev;
4173
	struct drm_i915_private *dev_priv = dev->dev_private;
4174
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4175
	enum pipe pipe = intel_crtc->pipe;
4176
	int palreg = PALETTE(pipe);
4177
	int i;
4178
	bool reenable_ips = false;
4179
 
4180
	/* The clocks have to be on to load the palette. */
4181
	if (!crtc->enabled || !intel_crtc->active)
4182
		return;
4183
 
4184
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
5354 serge 4185
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4560 Serge 4186
			assert_dsi_pll_enabled(dev_priv);
4187
		else
4188
			assert_pll_enabled(dev_priv, pipe);
4189
	}
4190
 
4191
	/* use legacy palette for Ironlake */
5060 serge 4192
	if (!HAS_GMCH_DISPLAY(dev))
4560 Serge 4193
		palreg = LGC_PALETTE(pipe);
4194
 
4195
	/* Workaround : Do not read or write the pipe palette/gamma data while
4196
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4197
	 */
4198
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
4199
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4200
	     GAMMA_MODE_MODE_SPLIT)) {
4201
		hsw_disable_ips(intel_crtc);
4202
		reenable_ips = true;
4203
	}
4204
 
4205
	for (i = 0; i < 256; i++) {
4206
		I915_WRITE(palreg + 4 * i,
4207
			   (intel_crtc->lut_r[i] << 16) |
4208
			   (intel_crtc->lut_g[i] << 8) |
4209
			   intel_crtc->lut_b[i]);
4210
	}
4211
 
4212
	if (reenable_ips)
4213
		hsw_enable_ips(intel_crtc);
4214
}
4215
 
5060 serge 4216
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4217
{
4218
	if (!enable && intel_crtc->overlay) {
4219
		struct drm_device *dev = intel_crtc->base.dev;
4220
		struct drm_i915_private *dev_priv = dev->dev_private;
4221
 
4222
		mutex_lock(&dev->struct_mutex);
4223
		dev_priv->mm.interruptible = false;
5354 serge 4224
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
5060 serge 4225
        dev_priv->mm.interruptible = true;
4226
		mutex_unlock(&dev->struct_mutex);
4227
	}
4228
 
4229
	/* Let userspace switch the overlay on again. In most cases userspace
4230
	 * has to recompute where to put it anyway.
4231
	 */
4232
}
4233
 
4234
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4235
{
4236
	struct drm_device *dev = crtc->dev;
4237
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4238
	int pipe = intel_crtc->pipe;
4239
 
5354 serge 4240
	intel_enable_primary_hw_plane(crtc->primary, crtc);
5060 serge 4241
	intel_enable_planes(crtc);
4242
	intel_crtc_update_cursor(crtc, true);
4243
	intel_crtc_dpms_overlay(intel_crtc, true);
4244
 
4245
	hsw_enable_ips(intel_crtc);
4246
 
4247
	mutex_lock(&dev->struct_mutex);
4248
	intel_update_fbc(dev);
4249
	mutex_unlock(&dev->struct_mutex);
5354 serge 4250
 
4251
	/*
4252
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4253
	 * to compute the mask of flip planes precisely. For the time being
4254
	 * consider this a flip from a NULL plane.
4255
	 */
4256
	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4257
}
4258
 
4259
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4260
{
4261
	struct drm_device *dev = crtc->dev;
4262
	struct drm_i915_private *dev_priv = dev->dev_private;
4263
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4264
	int pipe = intel_crtc->pipe;
4265
	int plane = intel_crtc->plane;
4266
 
4267
 
4268
	if (dev_priv->fbc.plane == plane)
4269
		intel_disable_fbc(dev);
4270
 
4271
	hsw_disable_ips(intel_crtc);
4272
 
4273
	intel_crtc_dpms_overlay(intel_crtc, false);
4274
	intel_crtc_update_cursor(crtc, false);
4275
	intel_disable_planes(crtc);
5354 serge 4276
	intel_disable_primary_hw_plane(crtc->primary, crtc);
4277
 
4278
	/*
4279
	 * FIXME: Once we grow proper nuclear flip support out of this we need
4280
	 * to compute the mask of flip planes precisely. For the time being
4281
	 * consider this a flip to a NULL plane.
4282
	 */
4283
//	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
5060 serge 4284
}
4285
 
2327 Serge 4286
static void ironlake_crtc_enable(struct drm_crtc *crtc)
4287
{
4288
    struct drm_device *dev = crtc->dev;
4289
    struct drm_i915_private *dev_priv = dev->dev_private;
4290
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4291
	struct intel_encoder *encoder;
2327 Serge 4292
    int pipe = intel_crtc->pipe;
4293
 
3031 serge 4294
	WARN_ON(!crtc->enabled);
4295
 
2327 Serge 4296
    if (intel_crtc->active)
4297
        return;
4298
 
5060 serge 4299
	if (intel_crtc->config.has_pch_encoder)
4300
		intel_prepare_shared_dpll(intel_crtc);
4301
 
4302
	if (intel_crtc->config.has_dp_encoder)
4303
		intel_dp_set_m_n(intel_crtc);
4304
 
4305
	intel_set_pipe_timings(intel_crtc);
4306
 
4307
	if (intel_crtc->config.has_pch_encoder) {
4308
		intel_cpu_transcoder_set_m_n(intel_crtc,
5354 serge 4309
				     &intel_crtc->config.fdi_m_n, NULL);
5060 serge 4310
	}
4311
 
4312
	ironlake_set_pipeconf(crtc);
4313
 
2327 Serge 4314
    intel_crtc->active = true;
4104 Serge 4315
 
5354 serge 4316
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4317
	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4104 Serge 4318
 
4319
	for_each_encoder_on_crtc(dev, crtc, encoder)
4320
		if (encoder->pre_enable)
4321
			encoder->pre_enable(encoder);
2327 Serge 4322
 
3746 Serge 4323
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 4324
		/* Note: FDI PLL enabling _must_ be done before we enable the
4325
		 * cpu pipes, hence this is separate from all the other fdi/pch
4326
		 * enabling. */
3031 serge 4327
		ironlake_fdi_pll_enable(intel_crtc);
4328
	} else {
4329
		assert_fdi_tx_disabled(dev_priv, pipe);
4330
		assert_fdi_rx_disabled(dev_priv, pipe);
4331
	}
2327 Serge 4332
 
4104 Serge 4333
	ironlake_pfit_enable(intel_crtc);
3031 serge 4334
 
2327 Serge 4335
    /*
4336
     * On ILK+ LUT must be loaded before the pipe is running but with
4337
     * clocks enabled
4338
     */
4339
    intel_crtc_load_lut(crtc);
4340
 
4560 Serge 4341
	intel_update_watermarks(crtc);
5060 serge 4342
	intel_enable_pipe(intel_crtc);
2327 Serge 4343
 
3746 Serge 4344
	if (intel_crtc->config.has_pch_encoder)
2327 Serge 4345
        ironlake_pch_enable(crtc);
4346
 
3031 serge 4347
	for_each_encoder_on_crtc(dev, crtc, encoder)
4348
		encoder->enable(encoder);
4349
 
4350
	if (HAS_PCH_CPT(dev))
4104 Serge 4351
		cpt_verify_modeset(dev, intel_crtc->pipe);
3031 serge 4352
 
5354 serge 4353
	assert_vblank_disabled(crtc);
4354
	drm_crtc_vblank_on(crtc);
4355
 
5060 serge 4356
	intel_crtc_enable_planes(crtc);
2327 Serge 4357
}
4358
 
4104 Serge 4359
/* IPS only exists on ULT machines and is tied to pipe A. */
4360
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4361
{
4362
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4363
}
4364
 
4560 Serge 4365
/*
4366
 * This implements the workaround described in the "notes" section of the mode
4367
 * set sequence documentation. When going from no pipes or single pipe to
4368
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4369
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4370
 */
4371
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4372
{
4373
	struct drm_device *dev = crtc->base.dev;
4374
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4375
 
4376
	/* We want to get the other_active_crtc only if there's only 1 other
4377
	 * active crtc. */
5060 serge 4378
	for_each_intel_crtc(dev, crtc_it) {
4560 Serge 4379
		if (!crtc_it->active || crtc_it == crtc)
4380
			continue;
4381
 
4382
		if (other_active_crtc)
4104 Serge 4383
		return;
4384
 
4560 Serge 4385
		other_active_crtc = crtc_it;
4386
	}
4387
	if (!other_active_crtc)
4388
		return;
4104 Serge 4389
 
4560 Serge 4390
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4391
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4104 Serge 4392
}
4393
 
3243 Serge 4394
static void haswell_crtc_enable(struct drm_crtc *crtc)
4395
{
4396
	struct drm_device *dev = crtc->dev;
4397
	struct drm_i915_private *dev_priv = dev->dev_private;
4398
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4399
	struct intel_encoder *encoder;
4400
	int pipe = intel_crtc->pipe;
4401
 
4402
	WARN_ON(!crtc->enabled);
4403
 
4404
	if (intel_crtc->active)
4405
		return;
4406
 
5060 serge 4407
	if (intel_crtc_to_shared_dpll(intel_crtc))
4408
		intel_enable_shared_dpll(intel_crtc);
4409
 
4410
	if (intel_crtc->config.has_dp_encoder)
4411
		intel_dp_set_m_n(intel_crtc);
4412
 
4413
	intel_set_pipe_timings(intel_crtc);
4414
 
5354 serge 4415
	if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4416
		I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4417
			   intel_crtc->config.pixel_multiplier - 1);
4418
	}
4419
 
5060 serge 4420
	if (intel_crtc->config.has_pch_encoder) {
4421
		intel_cpu_transcoder_set_m_n(intel_crtc,
5354 serge 4422
				     &intel_crtc->config.fdi_m_n, NULL);
5060 serge 4423
	}
4424
 
4425
	haswell_set_pipeconf(crtc);
4426
 
4427
	intel_set_pipe_csc(crtc);
4428
 
3243 Serge 4429
	intel_crtc->active = true;
4104 Serge 4430
 
5354 serge 4431
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3243 Serge 4432
	for_each_encoder_on_crtc(dev, crtc, encoder)
4433
		if (encoder->pre_enable)
4434
			encoder->pre_enable(encoder);
4435
 
5060 serge 4436
	if (intel_crtc->config.has_pch_encoder) {
5354 serge 4437
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4438
						      true);
5060 serge 4439
		dev_priv->display.fdi_link_train(crtc);
4440
	}
4441
 
3243 Serge 4442
	intel_ddi_enable_pipe_clock(intel_crtc);
4443
 
5354 serge 4444
	if (IS_SKYLAKE(dev))
4445
		skylake_pfit_enable(intel_crtc);
4446
	else
4104 Serge 4447
	ironlake_pfit_enable(intel_crtc);
3243 Serge 4448
 
4449
	/*
4450
	 * On ILK+ LUT must be loaded before the pipe is running but with
4451
	 * clocks enabled
4452
	 */
4453
	intel_crtc_load_lut(crtc);
4454
 
4455
	intel_ddi_set_pipe_settings(crtc);
3746 Serge 4456
	intel_ddi_enable_transcoder_func(crtc);
3243 Serge 4457
 
4560 Serge 4458
	intel_update_watermarks(crtc);
5060 serge 4459
	intel_enable_pipe(intel_crtc);
3243 Serge 4460
 
3746 Serge 4461
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 4462
		lpt_pch_enable(crtc);
4463
 
5060 serge 4464
	if (intel_crtc->config.dp_encoder_is_mst)
4465
		intel_ddi_set_vc_payload_alloc(crtc, true);
4466
 
4560 Serge 4467
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 4468
		encoder->enable(encoder);
4560 Serge 4469
		intel_opregion_notify_encoder(encoder, true);
4470
	}
3243 Serge 4471
 
5354 serge 4472
	assert_vblank_disabled(crtc);
4473
	drm_crtc_vblank_on(crtc);
4474
 
4560 Serge 4475
	/* If we change the relative order between pipe/planes enabling, we need
4476
	 * to change the workaround. */
4477
	haswell_mode_set_planes_workaround(intel_crtc);
5060 serge 4478
	intel_crtc_enable_planes(crtc);
3243 Serge 4479
}
4480
 
5354 serge 4481
static void skylake_pfit_disable(struct intel_crtc *crtc)
4482
{
4483
	struct drm_device *dev = crtc->base.dev;
4484
	struct drm_i915_private *dev_priv = dev->dev_private;
4485
	int pipe = crtc->pipe;
4486
 
4487
	/* To avoid upsetting the power well on haswell only disable the pfit if
4488
	 * it's in use. The hw state code will make sure we get this right. */
4489
	if (crtc->config.pch_pfit.enabled) {
4490
		I915_WRITE(PS_CTL(pipe), 0);
4491
		I915_WRITE(PS_WIN_POS(pipe), 0);
4492
		I915_WRITE(PS_WIN_SZ(pipe), 0);
4493
	}
4494
}
4495
 
4104 Serge 4496
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4497
{
4498
	struct drm_device *dev = crtc->base.dev;
4499
	struct drm_i915_private *dev_priv = dev->dev_private;
4500
	int pipe = crtc->pipe;
4501
 
4502
	/* To avoid upsetting the power well on haswell only disable the pfit if
4503
	 * it's in use. The hw state code will make sure we get this right. */
4504
	if (crtc->config.pch_pfit.enabled) {
4505
		I915_WRITE(PF_CTL(pipe), 0);
4506
		I915_WRITE(PF_WIN_POS(pipe), 0);
4507
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4508
	}
4509
}
4510
 
2327 Serge 4511
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4512
{
4513
    struct drm_device *dev = crtc->dev;
4514
    struct drm_i915_private *dev_priv = dev->dev_private;
4515
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4516
	struct intel_encoder *encoder;
2327 Serge 4517
    int pipe = intel_crtc->pipe;
4518
    u32 reg, temp;
4519
 
4520
    if (!intel_crtc->active)
4521
        return;
4522
 
5060 serge 4523
	intel_crtc_disable_planes(crtc);
4524
 
5354 serge 4525
	drm_crtc_vblank_off(crtc);
4526
	assert_vblank_disabled(crtc);
4527
 
3031 serge 4528
	for_each_encoder_on_crtc(dev, crtc, encoder)
4529
		encoder->disable(encoder);
2336 Serge 4530
 
4104 Serge 4531
	if (intel_crtc->config.has_pch_encoder)
5354 serge 4532
		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2327 Serge 4533
 
5354 serge 4534
	intel_disable_pipe(intel_crtc);
4535
 
4104 Serge 4536
	ironlake_pfit_disable(intel_crtc);
2327 Serge 4537
 
3031 serge 4538
	for_each_encoder_on_crtc(dev, crtc, encoder)
4539
		if (encoder->post_disable)
4540
			encoder->post_disable(encoder);
4541
 
4104 Serge 4542
	if (intel_crtc->config.has_pch_encoder) {
2327 Serge 4543
    ironlake_fdi_disable(crtc);
4544
 
3243 Serge 4545
	ironlake_disable_pch_transcoder(dev_priv, pipe);
2327 Serge 4546
 
4547
    if (HAS_PCH_CPT(dev)) {
4548
        /* disable TRANS_DP_CTL */
4549
        reg = TRANS_DP_CTL(pipe);
4550
        temp = I915_READ(reg);
4104 Serge 4551
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4552
				  TRANS_DP_PORT_SEL_MASK);
2327 Serge 4553
        temp |= TRANS_DP_PORT_SEL_NONE;
4554
        I915_WRITE(reg, temp);
4555
 
4556
        /* disable DPLL_SEL */
4557
        temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 4558
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
2327 Serge 4559
        I915_WRITE(PCH_DPLL_SEL, temp);
4560
    }
4561
 
4562
    /* disable PCH DPLL */
4104 Serge 4563
		intel_disable_shared_dpll(intel_crtc);
2327 Serge 4564
 
3031 serge 4565
	ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 4566
	}
2327 Serge 4567
 
4568
    intel_crtc->active = false;
4560 Serge 4569
	intel_update_watermarks(crtc);
2327 Serge 4570
 
4571
    mutex_lock(&dev->struct_mutex);
4572
    intel_update_fbc(dev);
4573
    mutex_unlock(&dev->struct_mutex);
4574
}
4575
 
3243 Serge 4576
static void haswell_crtc_disable(struct drm_crtc *crtc)
4577
{
4578
	struct drm_device *dev = crtc->dev;
4579
	struct drm_i915_private *dev_priv = dev->dev_private;
4580
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4581
	struct intel_encoder *encoder;
3746 Serge 4582
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 4583
 
4584
	if (!intel_crtc->active)
4585
		return;
4586
 
5060 serge 4587
	intel_crtc_disable_planes(crtc);
4560 Serge 4588
 
5354 serge 4589
	drm_crtc_vblank_off(crtc);
4590
	assert_vblank_disabled(crtc);
4591
 
4560 Serge 4592
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4593
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 4594
		encoder->disable(encoder);
4560 Serge 4595
	}
3243 Serge 4596
 
4104 Serge 4597
	if (intel_crtc->config.has_pch_encoder)
5354 serge 4598
		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4599
						      false);
4600
	intel_disable_pipe(intel_crtc);
3243 Serge 4601
 
5097 serge 4602
	if (intel_crtc->config.dp_encoder_is_mst)
4603
		intel_ddi_set_vc_payload_alloc(crtc, false);
4604
 
3243 Serge 4605
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4606
 
5354 serge 4607
	if (IS_SKYLAKE(dev))
4608
		skylake_pfit_disable(intel_crtc);
4609
	else
4104 Serge 4610
	ironlake_pfit_disable(intel_crtc);
3243 Serge 4611
 
4612
	intel_ddi_disable_pipe_clock(intel_crtc);
4613
 
3746 Serge 4614
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 4615
		lpt_disable_pch_transcoder(dev_priv);
4616
		intel_ddi_fdi_disable(crtc);
4617
	}
4618
 
5060 serge 4619
	for_each_encoder_on_crtc(dev, crtc, encoder)
4620
		if (encoder->post_disable)
4621
			encoder->post_disable(encoder);
4622
 
3243 Serge 4623
	intel_crtc->active = false;
4560 Serge 4624
	intel_update_watermarks(crtc);
3243 Serge 4625
 
4626
	mutex_lock(&dev->struct_mutex);
4627
	intel_update_fbc(dev);
4628
	mutex_unlock(&dev->struct_mutex);
5060 serge 4629
 
4630
	if (intel_crtc_to_shared_dpll(intel_crtc))
4631
		intel_disable_shared_dpll(intel_crtc);
3243 Serge 4632
}
4633
 
3031 serge 4634
static void ironlake_crtc_off(struct drm_crtc *crtc)
2327 Serge 4635
{
4636
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 4637
	intel_put_shared_dpll(intel_crtc);
2327 Serge 4638
}
4639
 
3243 Serge 4640
 
4104 Serge 4641
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4642
{
4643
	struct drm_device *dev = crtc->base.dev;
4644
	struct drm_i915_private *dev_priv = dev->dev_private;
4645
	struct intel_crtc_config *pipe_config = &crtc->config;
4646
 
4647
	if (!crtc->config.gmch_pfit.control)
4648
		return;
4649
 
4650
	/*
4651
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4652
	 * according to register description and PRM.
4653
	 */
4654
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4655
	assert_pipe_disabled(dev_priv, crtc->pipe);
4656
 
4657
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4658
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4659
 
4660
	/* Border color in case we don't scale up to the full screen. Black by
4661
	 * default, change to something else for debugging. */
4662
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4663
}
4664
 
5060 serge 4665
static enum intel_display_power_domain port_to_power_domain(enum port port)
4560 Serge 4666
{
5060 serge 4667
	switch (port) {
4668
	case PORT_A:
4669
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4670
	case PORT_B:
4671
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4672
	case PORT_C:
4673
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4674
	case PORT_D:
4675
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4676
	default:
4677
		WARN_ON_ONCE(1);
4678
		return POWER_DOMAIN_PORT_OTHER;
4679
	}
4680
}
4681
 
4682
#define for_each_power_domain(domain, mask)				\
4683
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4684
		if ((1 << (domain)) & (mask))
4685
 
4686
enum intel_display_power_domain
4687
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4688
{
4689
	struct drm_device *dev = intel_encoder->base.dev;
4690
	struct intel_digital_port *intel_dig_port;
4691
 
4692
	switch (intel_encoder->type) {
4693
	case INTEL_OUTPUT_UNKNOWN:
4694
		/* Only DDI platforms should ever use this output type */
4695
		WARN_ON_ONCE(!HAS_DDI(dev));
4696
	case INTEL_OUTPUT_DISPLAYPORT:
4697
	case INTEL_OUTPUT_HDMI:
4698
	case INTEL_OUTPUT_EDP:
4699
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4700
		return port_to_power_domain(intel_dig_port->port);
4701
	case INTEL_OUTPUT_DP_MST:
4702
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4703
		return port_to_power_domain(intel_dig_port->port);
4704
	case INTEL_OUTPUT_ANALOG:
4705
		return POWER_DOMAIN_PORT_CRT;
4706
	case INTEL_OUTPUT_DSI:
4707
		return POWER_DOMAIN_PORT_DSI;
4708
	default:
4709
		return POWER_DOMAIN_PORT_OTHER;
4710
	}
4711
}
4712
 
4713
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4714
{
4715
	struct drm_device *dev = crtc->dev;
4716
	struct intel_encoder *intel_encoder;
4717
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4718
	enum pipe pipe = intel_crtc->pipe;
4719
	unsigned long mask;
4720
	enum transcoder transcoder;
4721
 
4722
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4723
 
4724
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4725
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4726
	if (intel_crtc->config.pch_pfit.enabled ||
4727
	    intel_crtc->config.pch_pfit.force_thru)
4728
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4729
 
4730
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4731
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4732
 
4733
	return mask;
4734
}
4735
 
4736
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4737
{
4738
	struct drm_i915_private *dev_priv = dev->dev_private;
4739
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4740
	struct intel_crtc *crtc;
4741
 
4742
	/*
4743
	 * First get all needed power domains, then put all unneeded, to avoid
4744
	 * any unnecessary toggling of the power wells.
4745
	 */
4746
	for_each_intel_crtc(dev, crtc) {
4747
		enum intel_display_power_domain domain;
4748
 
4749
		if (!crtc->base.enabled)
4750
			continue;
4751
 
4752
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4753
 
4754
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4755
			intel_display_power_get(dev_priv, domain);
4756
	}
4757
 
5354 serge 4758
	if (dev_priv->display.modeset_global_resources)
4759
		dev_priv->display.modeset_global_resources(dev);
4760
 
5060 serge 4761
	for_each_intel_crtc(dev, crtc) {
4762
		enum intel_display_power_domain domain;
4763
 
4764
		for_each_power_domain(domain, crtc->enabled_power_domains)
4765
			intel_display_power_put(dev_priv, domain);
4766
 
4767
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4768
	}
4769
 
4770
	intel_display_set_init_power(dev_priv, false);
4771
}
4772
 
4773
/* returns HPLL frequency in kHz */
4774
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4775
{
4560 Serge 4776
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4777
 
4778
	/* Obtain SKU information */
4779
	mutex_lock(&dev_priv->dpio_lock);
4780
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4781
		CCK_FUSE_HPLL_FREQ_MASK;
4782
	mutex_unlock(&dev_priv->dpio_lock);
4783
 
5060 serge 4784
	return vco_freq[hpll_freq] * 1000;
4560 Serge 4785
}
4786
 
5060 serge 4787
static void vlv_update_cdclk(struct drm_device *dev)
4788
{
4789
	struct drm_i915_private *dev_priv = dev->dev_private;
4790
 
4791
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5354 serge 4792
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5060 serge 4793
			 dev_priv->vlv_cdclk_freq);
4794
 
4795
	/*
4796
	 * Program the gmbus_freq based on the cdclk frequency.
4797
	 * BSpec erroneously claims we should aim for 4MHz, but
4798
	 * in fact 1MHz is the correct frequency.
4799
	 */
5354 serge 4800
	I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
5060 serge 4801
}
4802
 
4560 Serge 4803
/* Adjust CDclk dividers to allow high res or save power if possible */
4804
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4805
{
4806
	struct drm_i915_private *dev_priv = dev->dev_private;
4807
	u32 val, cmd;
4808
 
5060 serge 4809
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4810
 
4811
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4560 Serge 4812
		cmd = 2;
5060 serge 4813
	else if (cdclk == 266667)
4560 Serge 4814
		cmd = 1;
4815
	else
4816
		cmd = 0;
4817
 
4818
	mutex_lock(&dev_priv->rps.hw_lock);
4819
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4820
	val &= ~DSPFREQGUAR_MASK;
4821
	val |= (cmd << DSPFREQGUAR_SHIFT);
4822
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4823
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4824
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4825
		     50)) {
4826
		DRM_ERROR("timed out waiting for CDclk change\n");
4827
	}
4828
	mutex_unlock(&dev_priv->rps.hw_lock);
4829
 
5060 serge 4830
	if (cdclk == 400000) {
5354 serge 4831
		u32 divider;
4560 Serge 4832
 
5354 serge 4833
		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4560 Serge 4834
 
4835
		mutex_lock(&dev_priv->dpio_lock);
4836
		/* adjust cdclk divider */
4837
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5060 serge 4838
		val &= ~DISPLAY_FREQUENCY_VALUES;
4560 Serge 4839
		val |= divider;
4840
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5060 serge 4841
 
4842
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4843
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4844
			     50))
4845
			DRM_ERROR("timed out waiting for CDclk change\n");
4560 Serge 4846
		mutex_unlock(&dev_priv->dpio_lock);
4847
	}
4848
 
4849
	mutex_lock(&dev_priv->dpio_lock);
4850
	/* adjust self-refresh exit latency value */
4851
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4852
	val &= ~0x7f;
4853
 
4854
	/*
4855
	 * For high bandwidth configs, we set a higher latency in the bunit
4856
	 * so that the core display fetch happens in time to avoid underruns.
4857
	 */
5060 serge 4858
	if (cdclk == 400000)
4560 Serge 4859
		val |= 4500 / 250; /* 4.5 usec */
4860
	else
4861
		val |= 3000 / 250; /* 3.0 usec */
4862
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4863
	mutex_unlock(&dev_priv->dpio_lock);
4864
 
5060 serge 4865
	vlv_update_cdclk(dev);
4560 Serge 4866
}
4867
 
5354 serge 4868
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4869
{
4870
	struct drm_i915_private *dev_priv = dev->dev_private;
4871
	u32 val, cmd;
4872
 
4873
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4874
 
4875
	switch (cdclk) {
4876
	case 400000:
4877
		cmd = 3;
4878
		break;
4879
	case 333333:
4880
	case 320000:
4881
		cmd = 2;
4882
		break;
4883
	case 266667:
4884
		cmd = 1;
4885
		break;
4886
	case 200000:
4887
		cmd = 0;
4888
		break;
4889
	default:
4890
		WARN_ON(1);
4891
		return;
4892
	}
4893
 
4894
	mutex_lock(&dev_priv->rps.hw_lock);
4895
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4896
	val &= ~DSPFREQGUAR_MASK_CHV;
4897
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
4898
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4899
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4900
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
4901
		     50)) {
4902
		DRM_ERROR("timed out waiting for CDclk change\n");
4903
	}
4904
	mutex_unlock(&dev_priv->rps.hw_lock);
4905
 
4906
	vlv_update_cdclk(dev);
4907
}
4908
 
4560 Serge 4909
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4910
				 int max_pixclk)
4911
{
5354 serge 4912
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
4560 Serge 4913
 
5354 serge 4914
	/* FIXME: Punit isn't quite ready yet */
4915
	if (IS_CHERRYVIEW(dev_priv->dev))
4916
		return 400000;
4917
 
4560 Serge 4918
	/*
4919
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4920
	 *   200MHz
4921
	 *   267MHz
5060 serge 4922
	 *   320/333MHz (depends on HPLL freq)
4560 Serge 4923
	 *   400MHz
4924
	 * So we check to see whether we're above 90% of the lower bin and
4925
	 * adjust if needed.
5060 serge 4926
	 *
4927
	 * We seem to get an unstable or solid color picture at 200MHz.
4928
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4929
	 * are off.
4560 Serge 4930
	 */
5060 serge 4931
	if (max_pixclk > freq_320*9/10)
4932
		return 400000;
4933
	else if (max_pixclk > 266667*9/10)
4934
		return freq_320;
4935
	else if (max_pixclk > 0)
4936
		return 266667;
4937
	else
4938
		return 200000;
4560 Serge 4939
}
4940
 
5060 serge 4941
/* compute the max pixel clock for new configuration */
4942
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4560 Serge 4943
{
4944
	struct drm_device *dev = dev_priv->dev;
4945
	struct intel_crtc *intel_crtc;
4946
	int max_pixclk = 0;
4947
 
5060 serge 4948
	for_each_intel_crtc(dev, intel_crtc) {
4949
		if (intel_crtc->new_enabled)
4560 Serge 4950
			max_pixclk = max(max_pixclk,
5060 serge 4951
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4560 Serge 4952
	}
4953
 
4954
	return max_pixclk;
4955
}
4956
 
4957
static void valleyview_modeset_global_pipes(struct drm_device *dev,
5060 serge 4958
					    unsigned *prepare_pipes)
4560 Serge 4959
{
4960
	struct drm_i915_private *dev_priv = dev->dev_private;
4961
	struct intel_crtc *intel_crtc;
5060 serge 4962
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4560 Serge 4963
 
5060 serge 4964
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4965
	    dev_priv->vlv_cdclk_freq)
4560 Serge 4966
		return;
4967
 
5060 serge 4968
	/* disable/enable all currently active pipes while we change cdclk */
4969
	for_each_intel_crtc(dev, intel_crtc)
4560 Serge 4970
		if (intel_crtc->base.enabled)
4971
			*prepare_pipes |= (1 << intel_crtc->pipe);
4972
}
4973
 
4974
static void valleyview_modeset_global_resources(struct drm_device *dev)
4975
{
4976
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4977
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4560 Serge 4978
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4979
 
5354 serge 4980
	if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4981
		/*
4982
		 * FIXME: We can end up here with all power domains off, yet
4983
		 * with a CDCLK frequency other than the minimum. To account
4984
		 * for this take the PIPE-A power domain, which covers the HW
4985
		 * blocks needed for the following programming. This can be
4986
		 * removed once it's guaranteed that we get here either with
4987
		 * the minimum CDCLK set, or the required power domains
4988
		 * enabled.
4989
		 */
4990
		intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
4991
 
4992
		if (IS_CHERRYVIEW(dev))
4993
			cherryview_set_cdclk(dev, req_cdclk);
4994
		else
4560 Serge 4995
		valleyview_set_cdclk(dev, req_cdclk);
5354 serge 4996
 
4997
		intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
4998
	}
4560 Serge 4999
}
5000
 
4104 Serge 5001
static void valleyview_crtc_enable(struct drm_crtc *crtc)
5002
{
5003
	struct drm_device *dev = crtc->dev;
5354 serge 5004
	struct drm_i915_private *dev_priv = to_i915(dev);
4104 Serge 5005
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5006
	struct intel_encoder *encoder;
5007
	int pipe = intel_crtc->pipe;
4560 Serge 5008
	bool is_dsi;
4104 Serge 5009
 
5010
	WARN_ON(!crtc->enabled);
5011
 
5012
	if (intel_crtc->active)
5013
		return;
5014
 
5354 serge 5015
	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5060 serge 5016
 
5354 serge 5017
	if (!is_dsi) {
5018
		if (IS_CHERRYVIEW(dev))
5019
			chv_prepare_pll(intel_crtc, &intel_crtc->config);
5020
		else
5021
			vlv_prepare_pll(intel_crtc, &intel_crtc->config);
5022
	}
5060 serge 5023
 
5024
	if (intel_crtc->config.has_dp_encoder)
5025
		intel_dp_set_m_n(intel_crtc);
5026
 
5027
	intel_set_pipe_timings(intel_crtc);
5028
 
5354 serge 5029
	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
5030
		struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 5031
 
5354 serge 5032
		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5033
		I915_WRITE(CHV_CANVAS(pipe), 0);
5034
	}
5035
 
5060 serge 5036
	i9xx_set_pipeconf(intel_crtc);
5037
 
4104 Serge 5038
	intel_crtc->active = true;
5039
 
5354 serge 5040
	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 5041
 
4104 Serge 5042
	for_each_encoder_on_crtc(dev, crtc, encoder)
5043
		if (encoder->pre_pll_enable)
5044
			encoder->pre_pll_enable(encoder);
5045
 
5060 serge 5046
	if (!is_dsi) {
5047
		if (IS_CHERRYVIEW(dev))
5354 serge 5048
			chv_enable_pll(intel_crtc, &intel_crtc->config);
5060 serge 5049
		else
5354 serge 5050
			vlv_enable_pll(intel_crtc, &intel_crtc->config);
5060 serge 5051
	}
4104 Serge 5052
 
5053
	for_each_encoder_on_crtc(dev, crtc, encoder)
5054
		if (encoder->pre_enable)
5055
			encoder->pre_enable(encoder);
5056
 
5057
	i9xx_pfit_enable(intel_crtc);
5058
 
5059
	intel_crtc_load_lut(crtc);
5060
 
4560 Serge 5061
	intel_update_watermarks(crtc);
5060 serge 5062
	intel_enable_pipe(intel_crtc);
4104 Serge 5063
 
5064
	for_each_encoder_on_crtc(dev, crtc, encoder)
5065
		encoder->enable(encoder);
5060 serge 5066
 
5354 serge 5067
	assert_vblank_disabled(crtc);
5068
	drm_crtc_vblank_on(crtc);
5069
 
5060 serge 5070
	intel_crtc_enable_planes(crtc);
5071
 
5072
	/* Underruns don't raise interrupts, so check manually. */
5354 serge 5073
	i9xx_check_fifo_underruns(dev_priv);
4104 Serge 5074
}
5075
 
5060 serge 5076
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5077
{
5078
	struct drm_device *dev = crtc->base.dev;
5079
	struct drm_i915_private *dev_priv = dev->dev_private;
5080
 
5081
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
5082
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
5083
}
5084
 
2327 Serge 5085
static void i9xx_crtc_enable(struct drm_crtc *crtc)
5086
{
5087
    struct drm_device *dev = crtc->dev;
5354 serge 5088
	struct drm_i915_private *dev_priv = to_i915(dev);
2327 Serge 5089
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5090
	struct intel_encoder *encoder;
2327 Serge 5091
    int pipe = intel_crtc->pipe;
5092
 
3031 serge 5093
	WARN_ON(!crtc->enabled);
5094
 
2327 Serge 5095
    if (intel_crtc->active)
5096
        return;
5097
 
5060 serge 5098
	i9xx_set_pll_dividers(intel_crtc);
5099
 
5100
	if (intel_crtc->config.has_dp_encoder)
5101
		intel_dp_set_m_n(intel_crtc);
5102
 
5103
	intel_set_pipe_timings(intel_crtc);
5104
 
5105
	i9xx_set_pipeconf(intel_crtc);
5106
 
2327 Serge 5107
    intel_crtc->active = true;
5108
 
5060 serge 5109
	if (!IS_GEN2(dev))
5354 serge 5110
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 5111
 
3480 Serge 5112
	for_each_encoder_on_crtc(dev, crtc, encoder)
5113
		if (encoder->pre_enable)
5114
			encoder->pre_enable(encoder);
5115
 
4104 Serge 5116
	i9xx_enable_pll(intel_crtc);
5117
 
5118
	i9xx_pfit_enable(intel_crtc);
5119
 
5120
	intel_crtc_load_lut(crtc);
5121
 
4560 Serge 5122
	intel_update_watermarks(crtc);
5060 serge 5123
	intel_enable_pipe(intel_crtc);
2327 Serge 5124
 
5060 serge 5125
	for_each_encoder_on_crtc(dev, crtc, encoder)
5126
		encoder->enable(encoder);
3031 serge 5127
 
5354 serge 5128
	assert_vblank_disabled(crtc);
5129
	drm_crtc_vblank_on(crtc);
5130
 
5060 serge 5131
	intel_crtc_enable_planes(crtc);
4104 Serge 5132
 
5060 serge 5133
	/*
5134
	 * Gen2 reports pipe underruns whenever all planes are disabled.
5135
	 * So don't enable underrun reporting before at least some planes
5136
	 * are enabled.
5137
	 * FIXME: Need to fix the logic to work when we turn off all planes
5138
	 * but leave the pipe running.
5139
	 */
5140
	if (IS_GEN2(dev))
5354 serge 5141
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5060 serge 5142
 
5143
	/* Underruns don't raise interrupts, so check manually. */
5354 serge 5144
	i9xx_check_fifo_underruns(dev_priv);
2327 Serge 5145
}
5146
 
3746 Serge 5147
static void i9xx_pfit_disable(struct intel_crtc *crtc)
5148
{
5149
	struct drm_device *dev = crtc->base.dev;
5150
	struct drm_i915_private *dev_priv = dev->dev_private;
5151
 
4104 Serge 5152
	if (!crtc->config.gmch_pfit.control)
5153
		return;
5154
 
3746 Serge 5155
	assert_pipe_disabled(dev_priv, crtc->pipe);
5156
 
4104 Serge 5157
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
5158
			 I915_READ(PFIT_CONTROL));
3746 Serge 5159
		I915_WRITE(PFIT_CONTROL, 0);
5160
}
5161
 
2327 Serge 5162
static void i9xx_crtc_disable(struct drm_crtc *crtc)
5163
{
5164
    struct drm_device *dev = crtc->dev;
5165
    struct drm_i915_private *dev_priv = dev->dev_private;
5166
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 5167
	struct intel_encoder *encoder;
2327 Serge 5168
    int pipe = intel_crtc->pipe;
5169
 
5170
    if (!intel_crtc->active)
5171
        return;
5172
 
5060 serge 5173
	/*
5174
	 * Gen2 reports pipe underruns whenever all planes are disabled.
5175
	 * So diasble underrun reporting before all the planes get disabled.
5176
	 * FIXME: Need to fix the logic to work when we turn off all planes
5177
	 * but leave the pipe running.
5178
	 */
5179
	if (IS_GEN2(dev))
5354 serge 5180
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 5181
 
5182
	/*
5183
	 * Vblank time updates from the shadow to live plane control register
5184
	 * are blocked if the memory self-refresh mode is active at that
5185
	 * moment. So to make sure the plane gets truly disabled, disable
5186
	 * first the self-refresh mode. The self-refresh enable bit in turn
5187
	 * will be checked/applied by the HW only at the next frame start
5188
	 * event which is after the vblank start event, so we need to have a
5189
	 * wait-for-vblank between disabling the plane and the pipe.
5190
	 */
5191
	intel_set_memory_cxsr(dev_priv, false);
5192
	intel_crtc_disable_planes(crtc);
5193
 
5194
	/*
5195
	 * On gen2 planes are double buffered but the pipe isn't, so we must
5196
	 * wait for planes to fully turn off before disabling the pipe.
5197
	 * We also need to wait on all gmch platforms because of the
5198
	 * self-refresh mode constraint explained above.
5199
	 */
5200
		intel_wait_for_vblank(dev, pipe);
2327 Serge 5201
 
5354 serge 5202
	drm_crtc_vblank_off(crtc);
5203
	assert_vblank_disabled(crtc);
3480 Serge 5204
 
5354 serge 5205
	for_each_encoder_on_crtc(dev, crtc, encoder)
5206
		encoder->disable(encoder);
5207
 
5208
	intel_disable_pipe(intel_crtc);
5209
 
3746 Serge 5210
	i9xx_pfit_disable(intel_crtc);
3480 Serge 5211
 
4104 Serge 5212
	for_each_encoder_on_crtc(dev, crtc, encoder)
5213
		if (encoder->post_disable)
5214
			encoder->post_disable(encoder);
2327 Serge 5215
 
5354 serge 5216
	if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
5060 serge 5217
		if (IS_CHERRYVIEW(dev))
5218
			chv_disable_pll(dev_priv, pipe);
5219
		else if (IS_VALLEYVIEW(dev))
4557 Serge 5220
		vlv_disable_pll(dev_priv, pipe);
5060 serge 5221
		else
5354 serge 5222
			i9xx_disable_pll(intel_crtc);
5060 serge 5223
	}
4104 Serge 5224
 
5060 serge 5225
	if (!IS_GEN2(dev))
5354 serge 5226
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5060 serge 5227
 
2327 Serge 5228
    intel_crtc->active = false;
4560 Serge 5229
	intel_update_watermarks(crtc);
5230
 
5060 serge 5231
	mutex_lock(&dev->struct_mutex);
2327 Serge 5232
    intel_update_fbc(dev);
5060 serge 5233
	mutex_unlock(&dev->struct_mutex);
2327 Serge 5234
}
5235
 
3031 serge 5236
static void i9xx_crtc_off(struct drm_crtc *crtc)
2327 Serge 5237
{
5238
}
5239
 
5060 serge 5240
/* Master function to enable/disable CRTC and corresponding power wells */
5241
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5242
{
5243
	struct drm_device *dev = crtc->dev;
5244
	struct drm_i915_private *dev_priv = dev->dev_private;
5245
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5246
	enum intel_display_power_domain domain;
5247
	unsigned long domains;
5248
 
5249
	if (enable) {
5250
		if (!intel_crtc->active) {
5251
			domains = get_crtc_power_domains(crtc);
5252
			for_each_power_domain(domain, domains)
5253
				intel_display_power_get(dev_priv, domain);
5254
			intel_crtc->enabled_power_domains = domains;
5255
 
5256
			dev_priv->display.crtc_enable(crtc);
5257
		}
5258
	} else {
5259
		if (intel_crtc->active) {
5260
			dev_priv->display.crtc_disable(crtc);
5261
 
5262
			domains = intel_crtc->enabled_power_domains;
5263
			for_each_power_domain(domain, domains)
5264
				intel_display_power_put(dev_priv, domain);
5265
			intel_crtc->enabled_power_domains = 0;
5266
		}
5267
	}
2330 Serge 5268
}
2327 Serge 5269
 
3031 serge 5270
/**
5271
 * Sets the power management mode of the pipe and plane.
5272
 */
5273
void intel_crtc_update_dpms(struct drm_crtc *crtc)
5274
{
5275
	struct drm_device *dev = crtc->dev;
5276
	struct intel_encoder *intel_encoder;
5277
	bool enable = false;
5278
 
5279
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5280
		enable |= intel_encoder->connectors_active;
5281
 
5060 serge 5282
	intel_crtc_control(crtc, enable);
3031 serge 5283
}
5284
 
2330 Serge 5285
static void intel_crtc_disable(struct drm_crtc *crtc)
5286
{
5287
	struct drm_device *dev = crtc->dev;
3031 serge 5288
	struct drm_connector *connector;
5289
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 5290
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
5291
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2327 Serge 5292
 
3031 serge 5293
	/* crtc should still be enabled when we disable it. */
5294
	WARN_ON(!crtc->enabled);
2327 Serge 5295
 
4104 Serge 5296
	dev_priv->display.crtc_disable(crtc);
3031 serge 5297
	dev_priv->display.off(crtc);
5298
 
5060 serge 5299
	if (crtc->primary->fb) {
4280 Serge 5300
		mutex_lock(&dev->struct_mutex);
5060 serge 5301
		intel_unpin_fb_obj(old_obj);
5302
		i915_gem_track_fb(old_obj, NULL,
5303
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
4280 Serge 5304
		mutex_unlock(&dev->struct_mutex);
5060 serge 5305
		crtc->primary->fb = NULL;
4280 Serge 5306
	}
3031 serge 5307
 
5308
	/* Update computed state. */
5309
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5310
		if (!connector->encoder || !connector->encoder->crtc)
5311
			continue;
5312
 
5313
		if (connector->encoder->crtc != crtc)
5314
			continue;
5315
 
5316
		connector->dpms = DRM_MODE_DPMS_OFF;
5317
		to_intel_encoder(connector->encoder)->connectors_active = false;
2330 Serge 5318
	}
5319
}
2327 Serge 5320
 
3031 serge 5321
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 5322
{
3031 serge 5323
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5324
 
5325
	drm_encoder_cleanup(encoder);
5326
	kfree(intel_encoder);
2330 Serge 5327
}
2327 Serge 5328
 
4104 Serge 5329
/* Simple dpms helper for encoders with just one connector, no cloning and only
3031 serge 5330
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5331
 * state of the entire output pipe. */
4104 Serge 5332
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
2330 Serge 5333
{
3031 serge 5334
	if (mode == DRM_MODE_DPMS_ON) {
5335
		encoder->connectors_active = true;
5336
 
5337
		intel_crtc_update_dpms(encoder->base.crtc);
5338
	} else {
5339
		encoder->connectors_active = false;
5340
 
5341
		intel_crtc_update_dpms(encoder->base.crtc);
5342
	}
2330 Serge 5343
}
2327 Serge 5344
 
3031 serge 5345
/* Cross check the actual hw state with our own modeset state tracking (and it's
5346
 * internal consistency). */
5347
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 5348
{
3031 serge 5349
	if (connector->get_hw_state(connector)) {
5350
		struct intel_encoder *encoder = connector->encoder;
5351
		struct drm_crtc *crtc;
5352
		bool encoder_enabled;
5353
		enum pipe pipe;
5354
 
5355
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5356
			      connector->base.base.id,
5060 serge 5357
			      connector->base.name);
3031 serge 5358
 
5060 serge 5359
		/* there is no real hw state for MST connectors */
5360
		if (connector->mst_port)
5361
			return;
5362
 
3031 serge 5363
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5364
		     "wrong connector dpms state\n");
5365
		WARN(connector->base.encoder != &encoder->base,
5366
		     "active connector not linked to encoder\n");
5060 serge 5367
 
5368
		if (encoder) {
3031 serge 5369
		WARN(!encoder->connectors_active,
5370
		     "encoder->connectors_active not set\n");
5371
 
5372
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5373
		WARN(!encoder_enabled, "encoder not enabled\n");
5374
		if (WARN_ON(!encoder->base.crtc))
5375
			return;
5376
 
5377
		crtc = encoder->base.crtc;
5378
 
5379
		WARN(!crtc->enabled, "crtc not enabled\n");
5380
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5381
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5382
		     "encoder active on the wrong pipe\n");
5383
	}
5060 serge 5384
	}
2330 Serge 5385
}
2327 Serge 5386
 
3031 serge 5387
/* Even simpler default implementation, if there's really no special case to
5388
 * consider. */
5389
void intel_connector_dpms(struct drm_connector *connector, int mode)
2330 Serge 5390
{
3031 serge 5391
	/* All the simple cases only support two dpms states. */
5392
	if (mode != DRM_MODE_DPMS_ON)
5393
		mode = DRM_MODE_DPMS_OFF;
2342 Serge 5394
 
3031 serge 5395
	if (mode == connector->dpms)
5396
		return;
5397
 
5398
	connector->dpms = mode;
5399
 
5400
	/* Only need to change hw state when actually enabled */
4104 Serge 5401
	if (connector->encoder)
5402
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3031 serge 5403
 
5404
	intel_modeset_check_state(connector->dev);
2330 Serge 5405
}
2327 Serge 5406
 
3031 serge 5407
/* Simple connector->get_hw_state implementation for encoders that support only
5408
 * one connector and no cloning and hence the encoder state determines the state
5409
 * of the connector. */
5410
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 5411
{
3031 serge 5412
	enum pipe pipe = 0;
5413
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 5414
 
3031 serge 5415
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 5416
}
5417
 
4104 Serge 5418
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5419
				     struct intel_crtc_config *pipe_config)
5420
{
5421
	struct drm_i915_private *dev_priv = dev->dev_private;
5422
	struct intel_crtc *pipe_B_crtc =
5423
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5424
 
5425
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5426
		      pipe_name(pipe), pipe_config->fdi_lanes);
5427
	if (pipe_config->fdi_lanes > 4) {
5428
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5429
			      pipe_name(pipe), pipe_config->fdi_lanes);
5430
		return false;
5431
	}
5432
 
4560 Serge 5433
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 5434
		if (pipe_config->fdi_lanes > 2) {
5435
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5436
				      pipe_config->fdi_lanes);
5437
			return false;
5438
		} else {
5439
			return true;
5440
		}
5441
	}
5442
 
5443
	if (INTEL_INFO(dev)->num_pipes == 2)
5444
		return true;
5445
 
5446
	/* Ivybridge 3 pipe is really complicated */
5447
	switch (pipe) {
5448
	case PIPE_A:
5449
		return true;
5450
	case PIPE_B:
5451
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5452
		    pipe_config->fdi_lanes > 2) {
5453
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5454
				      pipe_name(pipe), pipe_config->fdi_lanes);
5455
			return false;
5456
		}
5457
		return true;
5458
	case PIPE_C:
5459
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5460
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5461
			if (pipe_config->fdi_lanes > 2) {
5462
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5463
					      pipe_name(pipe), pipe_config->fdi_lanes);
5464
				return false;
5465
			}
5466
		} else {
5467
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5468
			return false;
5469
		}
5470
		return true;
5471
	default:
5472
		BUG();
5473
	}
5474
}
5475
 
5476
#define RETRY 1
5477
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
3746 Serge 5478
				      struct intel_crtc_config *pipe_config)
2330 Serge 5479
{
4104 Serge 5480
	struct drm_device *dev = intel_crtc->base.dev;
3746 Serge 5481
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4104 Serge 5482
	int lane, link_bw, fdi_dotclock;
5483
	bool setup_ok, needs_recompute = false;
2330 Serge 5484
 
4104 Serge 5485
retry:
5486
	/* FDI is a binary signal running at ~2.7GHz, encoding
5487
	 * each output octet as 10 bits. The actual frequency
5488
	 * is stored as a divider into a 100MHz clock, and the
5489
	 * mode pixel clock is stored in units of 1KHz.
5490
	 * Hence the bw of each lane in terms of the mode signal
5491
	 * is:
5492
	 */
5493
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5494
 
4560 Serge 5495
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 5496
 
5497
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5498
					   pipe_config->pipe_bpp);
5499
 
5500
	pipe_config->fdi_lanes = lane;
5501
 
5502
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5503
			       link_bw, &pipe_config->fdi_m_n);
5504
 
5505
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5506
					    intel_crtc->pipe, pipe_config);
5507
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5508
		pipe_config->pipe_bpp -= 2*3;
5509
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5510
			      pipe_config->pipe_bpp);
5511
		needs_recompute = true;
5512
		pipe_config->bw_constrained = true;
5513
 
5514
		goto retry;
5515
	}
5516
 
5517
	if (needs_recompute)
5518
		return RETRY;
5519
 
5520
	return setup_ok ? 0 : -EINVAL;
5521
}
5522
 
5523
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5524
				   struct intel_crtc_config *pipe_config)
5525
{
5060 serge 5526
	pipe_config->ips_enabled = i915.enable_ips &&
4104 Serge 5527
				   hsw_crtc_supports_ips(crtc) &&
5528
				   pipe_config->pipe_bpp <= 24;
5529
}
5530
 
5531
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5532
				     struct intel_crtc_config *pipe_config)
5533
{
5534
	struct drm_device *dev = crtc->base.dev;
5354 serge 5535
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 5536
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5537
 
4560 Serge 5538
	/* FIXME should check pixel clock limits on all platforms */
5539
	if (INTEL_INFO(dev)->gen < 4) {
5540
		int clock_limit =
5541
			dev_priv->display.get_display_clock_speed(dev);
5542
 
5543
		/*
5544
		 * Enable pixel doubling when the dot clock
5545
		 * is > 90% of the (display) core speed.
5546
		 *
5547
		 * GDG double wide on either pipe,
5548
		 * otherwise pipe A only.
5549
		 */
5550
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5551
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5552
			clock_limit *= 2;
5553
			pipe_config->double_wide = true;
5554
		}
5555
 
5556
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4104 Serge 5557
			return -EINVAL;
2330 Serge 5558
	}
5559
 
4560 Serge 5560
	/*
5561
	 * Pipe horizontal size must be even in:
5562
	 * - DVO ganged mode
5563
	 * - LVDS dual channel mode
5564
	 * - Double wide pipe
5565
	 */
5354 serge 5566
	if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4560 Serge 5567
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5568
		pipe_config->pipe_src_w &= ~1;
5569
 
4104 Serge 5570
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5571
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 5572
	 */
5573
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5574
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4104 Serge 5575
		return -EINVAL;
3031 serge 5576
 
3746 Serge 5577
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5578
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5579
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5580
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5581
		 * for lvds. */
5582
		pipe_config->pipe_bpp = 8*3;
5583
	}
5584
 
4104 Serge 5585
	if (HAS_IPS(dev))
5586
		hsw_compute_ips_config(crtc, pipe_config);
5587
 
5588
	if (pipe_config->has_pch_encoder)
5589
		return ironlake_fdi_compute_config(crtc, pipe_config);
5590
 
5591
	return 0;
2330 Serge 5592
}
5593
 
3031 serge 5594
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5595
{
5060 serge 5596
	struct drm_i915_private *dev_priv = dev->dev_private;
5597
	u32 val;
5598
	int divider;
5599
 
5354 serge 5600
	/* FIXME: Punit isn't quite ready yet */
5601
	if (IS_CHERRYVIEW(dev))
5602
		return 400000;
5603
 
5604
	if (dev_priv->hpll_freq == 0)
5605
		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
5606
 
5060 serge 5607
	mutex_lock(&dev_priv->dpio_lock);
5608
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5609
	mutex_unlock(&dev_priv->dpio_lock);
5610
 
5611
	divider = val & DISPLAY_FREQUENCY_VALUES;
5612
 
5613
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5614
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5615
	     "cdclk change in progress\n");
5616
 
5354 serge 5617
	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
3031 serge 5618
}
5619
 
2327 Serge 5620
static int i945_get_display_clock_speed(struct drm_device *dev)
5621
{
5622
	return 400000;
5623
}
5624
 
5625
static int i915_get_display_clock_speed(struct drm_device *dev)
5626
{
5627
	return 333000;
5628
}
5629
 
5630
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5631
{
5632
	return 200000;
5633
}
5634
 
4104 Serge 5635
static int pnv_get_display_clock_speed(struct drm_device *dev)
5636
{
5637
	u16 gcfgc = 0;
5638
 
5639
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5640
 
5641
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5642
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5643
		return 267000;
5644
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5645
		return 333000;
5646
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5647
		return 444000;
5648
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5649
		return 200000;
5650
	default:
5651
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5652
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5653
		return 133000;
5654
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5655
		return 167000;
5656
	}
5657
}
5658
 
2327 Serge 5659
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5660
{
5661
	u16 gcfgc = 0;
5662
 
5663
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5664
 
5665
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5666
		return 133000;
5667
	else {
5668
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5669
		case GC_DISPLAY_CLOCK_333_MHZ:
5670
			return 333000;
5671
		default:
5672
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5673
			return 190000;
5674
		}
5675
	}
5676
}
5677
 
5678
static int i865_get_display_clock_speed(struct drm_device *dev)
5679
{
5680
	return 266000;
5681
}
5682
 
5683
static int i855_get_display_clock_speed(struct drm_device *dev)
5684
{
5685
	u16 hpllcc = 0;
5686
	/* Assume that the hardware is in the high speed state.  This
5687
	 * should be the default.
5688
	 */
5689
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5690
	case GC_CLOCK_133_200:
5691
	case GC_CLOCK_100_200:
5692
		return 200000;
5693
	case GC_CLOCK_166_250:
5694
		return 250000;
5695
	case GC_CLOCK_100_133:
5696
		return 133000;
5697
	}
5698
 
5699
	/* Shouldn't happen */
5700
	return 0;
5701
}
5702
 
5703
static int i830_get_display_clock_speed(struct drm_device *dev)
5704
{
5705
	return 133000;
5706
}
5707
 
5708
static void
3746 Serge 5709
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 5710
{
3746 Serge 5711
	while (*num > DATA_LINK_M_N_MASK ||
5712
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 5713
		*num >>= 1;
5714
		*den >>= 1;
5715
	}
5716
}
5717
 
3746 Serge 5718
static void compute_m_n(unsigned int m, unsigned int n,
5719
			uint32_t *ret_m, uint32_t *ret_n)
5720
{
5721
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5722
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5723
	intel_reduce_m_n_ratio(ret_m, ret_n);
5724
}
5725
 
3480 Serge 5726
void
5727
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5728
		       int pixel_clock, int link_clock,
5729
		       struct intel_link_m_n *m_n)
2327 Serge 5730
{
3480 Serge 5731
	m_n->tu = 64;
3746 Serge 5732
 
5733
	compute_m_n(bits_per_pixel * pixel_clock,
5734
		    link_clock * nlanes * 8,
5735
		    &m_n->gmch_m, &m_n->gmch_n);
5736
 
5737
	compute_m_n(pixel_clock, link_clock,
5738
		    &m_n->link_m, &m_n->link_n);
2327 Serge 5739
}
5740
 
5741
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5742
{
5060 serge 5743
	if (i915.panel_use_ssc >= 0)
5744
		return i915.panel_use_ssc != 0;
4104 Serge 5745
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 5746
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5747
}
5748
 
5354 serge 5749
static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
3031 serge 5750
{
5354 serge 5751
	struct drm_device *dev = crtc->base.dev;
3031 serge 5752
	struct drm_i915_private *dev_priv = dev->dev_private;
5753
	int refclk;
2327 Serge 5754
 
3031 serge 5755
	if (IS_VALLEYVIEW(dev)) {
4560 Serge 5756
		refclk = 100000;
5354 serge 5757
	} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
3031 serge 5758
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 5759
		refclk = dev_priv->vbt.lvds_ssc_freq;
5760
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 5761
	} else if (!IS_GEN2(dev)) {
5762
		refclk = 96000;
5763
	} else {
5764
		refclk = 48000;
5765
	}
2327 Serge 5766
 
3031 serge 5767
	return refclk;
5768
}
2327 Serge 5769
 
4104 Serge 5770
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 5771
{
4104 Serge 5772
	return (1 << dpll->n) << 16 | dpll->m2;
5773
}
3746 Serge 5774
 
4104 Serge 5775
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5776
{
5777
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 5778
}
2327 Serge 5779
 
3746 Serge 5780
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
3031 serge 5781
				     intel_clock_t *reduced_clock)
5782
{
3746 Serge 5783
	struct drm_device *dev = crtc->base.dev;
3031 serge 5784
	u32 fp, fp2 = 0;
2327 Serge 5785
 
3031 serge 5786
	if (IS_PINEVIEW(dev)) {
5354 serge 5787
		fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
3031 serge 5788
		if (reduced_clock)
4104 Serge 5789
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 5790
	} else {
5354 serge 5791
		fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
3031 serge 5792
		if (reduced_clock)
4104 Serge 5793
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 5794
	}
2327 Serge 5795
 
5354 serge 5796
	crtc->new_config->dpll_hw_state.fp0 = fp;
2327 Serge 5797
 
3746 Serge 5798
	crtc->lowfreq_avail = false;
5354 serge 5799
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5060 serge 5800
	    reduced_clock && i915.powersave) {
5354 serge 5801
		crtc->new_config->dpll_hw_state.fp1 = fp2;
3746 Serge 5802
		crtc->lowfreq_avail = true;
3031 serge 5803
	} else {
5354 serge 5804
		crtc->new_config->dpll_hw_state.fp1 = fp;
3031 serge 5805
	}
5806
}
2327 Serge 5807
 
4560 Serge 5808
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5809
		pipe)
4104 Serge 5810
{
5811
	u32 reg_val;
5812
 
5813
	/*
5814
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5815
	 * and set it to a reasonable value instead.
5816
	 */
4560 Serge 5817
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 5818
	reg_val &= 0xffffff00;
5819
	reg_val |= 0x00000030;
4560 Serge 5820
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 5821
 
4560 Serge 5822
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 5823
	reg_val &= 0x8cffffff;
5824
	reg_val = 0x8c000000;
4560 Serge 5825
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 5826
 
4560 Serge 5827
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 5828
	reg_val &= 0xffffff00;
4560 Serge 5829
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 5830
 
4560 Serge 5831
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 5832
	reg_val &= 0x00ffffff;
5833
	reg_val |= 0xb0000000;
4560 Serge 5834
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 5835
}
5836
 
5837
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5838
					 struct intel_link_m_n *m_n)
5839
{
5840
	struct drm_device *dev = crtc->base.dev;
5841
	struct drm_i915_private *dev_priv = dev->dev_private;
5842
	int pipe = crtc->pipe;
5843
 
5844
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5845
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5846
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5847
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5848
}
5849
 
5850
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5354 serge 5851
					 struct intel_link_m_n *m_n,
5852
					 struct intel_link_m_n *m2_n2)
4104 Serge 5853
{
5854
	struct drm_device *dev = crtc->base.dev;
5855
	struct drm_i915_private *dev_priv = dev->dev_private;
5856
	int pipe = crtc->pipe;
5857
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5858
 
5859
	if (INTEL_INFO(dev)->gen >= 5) {
5860
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5861
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5862
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5863
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5354 serge 5864
		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
5865
		 * for gen < 8) and if DRRS is supported (to make sure the
5866
		 * registers are not unnecessarily accessed).
5867
		 */
5868
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
5869
			crtc->config.has_drrs) {
5870
			I915_WRITE(PIPE_DATA_M2(transcoder),
5871
					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5872
			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5873
			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
5874
			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
5875
		}
4104 Serge 5876
	} else {
5877
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5878
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5879
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5880
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5881
	}
5882
}
5883
 
5354 serge 5884
void intel_dp_set_m_n(struct intel_crtc *crtc)
3031 serge 5885
{
3746 Serge 5886
	if (crtc->config.has_pch_encoder)
5887
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5888
	else
5354 serge 5889
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
5890
						   &crtc->config.dp_m2_n2);
3746 Serge 5891
}
5892
 
5354 serge 5893
static void vlv_update_pll(struct intel_crtc *crtc,
5894
			   struct intel_crtc_config *pipe_config)
3746 Serge 5895
{
5060 serge 5896
	u32 dpll, dpll_md;
5897
 
5898
	/*
5899
	 * Enable DPIO clock input. We should never disable the reference
5900
	 * clock for pipe B, since VGA hotplug / manual detection depends
5901
	 * on it.
5902
	 */
5903
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5904
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5905
	/* We should never disable this, set it here for state tracking */
5906
	if (crtc->pipe == PIPE_B)
5907
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5908
	dpll |= DPLL_VCO_ENABLE;
5354 serge 5909
	pipe_config->dpll_hw_state.dpll = dpll;
5060 serge 5910
 
5354 serge 5911
	dpll_md = (pipe_config->pixel_multiplier - 1)
5060 serge 5912
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 5913
	pipe_config->dpll_hw_state.dpll_md = dpll_md;
5060 serge 5914
}
5915
 
5354 serge 5916
static void vlv_prepare_pll(struct intel_crtc *crtc,
5917
			    const struct intel_crtc_config *pipe_config)
5060 serge 5918
{
3746 Serge 5919
	struct drm_device *dev = crtc->base.dev;
3031 serge 5920
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5921
	int pipe = crtc->pipe;
5060 serge 5922
	u32 mdiv;
3031 serge 5923
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5060 serge 5924
	u32 coreclk, reg_val;
2327 Serge 5925
 
3480 Serge 5926
	mutex_lock(&dev_priv->dpio_lock);
5927
 
5354 serge 5928
	bestn = pipe_config->dpll.n;
5929
	bestm1 = pipe_config->dpll.m1;
5930
	bestm2 = pipe_config->dpll.m2;
5931
	bestp1 = pipe_config->dpll.p1;
5932
	bestp2 = pipe_config->dpll.p2;
3031 serge 5933
 
4104 Serge 5934
	/* See eDP HDMI DPIO driver vbios notes doc */
5935
 
5936
	/* PLL B needs special handling */
5060 serge 5937
	if (pipe == PIPE_B)
4560 Serge 5938
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 5939
 
5940
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 5941
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 5942
 
5943
	/* Disable target IRef on PLL */
4560 Serge 5944
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 5945
	reg_val &= 0x00ffffff;
4560 Serge 5946
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 5947
 
5948
	/* Disable fast lock */
4560 Serge 5949
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 5950
 
5951
	/* Set idtafcrecal before PLL is enabled */
3031 serge 5952
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5953
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5954
	mdiv |= ((bestn << DPIO_N_SHIFT));
5955
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 5956
 
5957
	/*
5958
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5959
	 * but we don't support that).
5960
	 * Note: don't use the DAC post divider as it seems unstable.
5961
	 */
5962
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 5963
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 5964
 
3031 serge 5965
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 5966
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 5967
 
4104 Serge 5968
	/* Set HBR and RBR LPF coefficients */
5354 serge 5969
	if (pipe_config->port_clock == 162000 ||
5970
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
5971
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4560 Serge 5972
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 5973
				 0x009f0003);
5974
	else
4560 Serge 5975
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 5976
				 0x00d0000f);
3031 serge 5977
 
5354 serge 5978
	if (crtc->config.has_dp_encoder) {
4104 Serge 5979
		/* Use SSC source */
5060 serge 5980
		if (pipe == PIPE_A)
4560 Serge 5981
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5982
					 0x0df40000);
5983
		else
4560 Serge 5984
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5985
					 0x0df70000);
5986
	} else { /* HDMI or VGA */
5987
		/* Use bend source */
5060 serge 5988
		if (pipe == PIPE_A)
4560 Serge 5989
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5990
					 0x0df70000);
5991
		else
4560 Serge 5992
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5993
					 0x0df40000);
5994
	}
3031 serge 5995
 
4560 Serge 5996
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 5997
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5354 serge 5998
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
5999
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
4104 Serge 6000
		coreclk |= 0x01000000;
4560 Serge 6001
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 6002
 
4560 Serge 6003
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5060 serge 6004
	mutex_unlock(&dev_priv->dpio_lock);
6005
}
4104 Serge 6006
 
5354 serge 6007
static void chv_update_pll(struct intel_crtc *crtc,
6008
			   struct intel_crtc_config *pipe_config)
5060 serge 6009
{
5354 serge 6010
	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
6011
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
6012
		DPLL_VCO_ENABLE;
6013
	if (crtc->pipe != PIPE_A)
6014
		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6015
 
6016
	pipe_config->dpll_hw_state.dpll_md =
6017
		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6018
}
6019
 
6020
static void chv_prepare_pll(struct intel_crtc *crtc,
6021
			    const struct intel_crtc_config *pipe_config)
6022
{
5060 serge 6023
	struct drm_device *dev = crtc->base.dev;
6024
	struct drm_i915_private *dev_priv = dev->dev_private;
6025
	int pipe = crtc->pipe;
6026
	int dpll_reg = DPLL(crtc->pipe);
6027
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6028
	u32 loopfilter, intcoeff;
6029
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
6030
	int refclk;
6031
 
5354 serge 6032
	bestn = pipe_config->dpll.n;
6033
	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6034
	bestm1 = pipe_config->dpll.m1;
6035
	bestm2 = pipe_config->dpll.m2 >> 22;
6036
	bestp1 = pipe_config->dpll.p1;
6037
	bestp2 = pipe_config->dpll.p2;
5060 serge 6038
 
4560 Serge 6039
	/*
5060 serge 6040
	 * Enable Refclk and SSC
4560 Serge 6041
	 */
5060 serge 6042
	I915_WRITE(dpll_reg,
5354 serge 6043
		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
3031 serge 6044
 
5060 serge 6045
	mutex_lock(&dev_priv->dpio_lock);
3031 serge 6046
 
5060 serge 6047
	/* p1 and p2 divider */
6048
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6049
			5 << DPIO_CHV_S1_DIV_SHIFT |
6050
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6051
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6052
			1 << DPIO_CHV_K_DIV_SHIFT);
3243 Serge 6053
 
5060 serge 6054
	/* Feedback post-divider - m2 */
6055
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6056
 
6057
	/* Feedback refclk divider - n and m1 */
6058
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6059
			DPIO_CHV_M1_DIV_BY_2 |
6060
			1 << DPIO_CHV_N_DIV_SHIFT);
6061
 
6062
	/* M2 fraction division */
6063
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
6064
 
6065
	/* M2 fraction division enable */
6066
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
6067
		       DPIO_CHV_FRAC_DIV_EN |
6068
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
6069
 
6070
	/* Loop filter */
5354 serge 6071
	refclk = i9xx_get_refclk(crtc, 0);
5060 serge 6072
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
6073
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
6074
	if (refclk == 100000)
6075
		intcoeff = 11;
6076
	else if (refclk == 38400)
6077
		intcoeff = 10;
6078
	else
6079
		intcoeff = 9;
6080
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
6081
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
6082
 
6083
	/* AFC Recal */
6084
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
6085
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
6086
			DPIO_AFC_RECAL);
6087
 
3480 Serge 6088
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 6089
}
6090
 
5354 serge 6091
/**
6092
 * vlv_force_pll_on - forcibly enable just the PLL
6093
 * @dev_priv: i915 private structure
6094
 * @pipe: pipe PLL to enable
6095
 * @dpll: PLL configuration
6096
 *
6097
 * Enable the PLL for @pipe using the supplied @dpll config. To be used
6098
 * in cases where we need the PLL enabled even when @pipe is not going to
6099
 * be enabled.
6100
 */
6101
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
6102
		      const struct dpll *dpll)
6103
{
6104
	struct intel_crtc *crtc =
6105
		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6106
	struct intel_crtc_config pipe_config = {
6107
		.pixel_multiplier = 1,
6108
		.dpll = *dpll,
6109
	};
6110
 
6111
	if (IS_CHERRYVIEW(dev)) {
6112
		chv_update_pll(crtc, &pipe_config);
6113
		chv_prepare_pll(crtc, &pipe_config);
6114
		chv_enable_pll(crtc, &pipe_config);
6115
	} else {
6116
		vlv_update_pll(crtc, &pipe_config);
6117
		vlv_prepare_pll(crtc, &pipe_config);
6118
		vlv_enable_pll(crtc, &pipe_config);
6119
	}
6120
}
6121
 
6122
/**
6123
 * vlv_force_pll_off - forcibly disable just the PLL
6124
 * @dev_priv: i915 private structure
6125
 * @pipe: pipe PLL to disable
6126
 *
6127
 * Disable the PLL for @pipe. To be used in cases where we need
6128
 * the PLL enabled even when @pipe is not going to be enabled.
6129
 */
6130
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
6131
{
6132
	if (IS_CHERRYVIEW(dev))
6133
		chv_disable_pll(to_i915(dev), pipe);
6134
	else
6135
		vlv_disable_pll(to_i915(dev), pipe);
6136
}
6137
 
3746 Serge 6138
static void i9xx_update_pll(struct intel_crtc *crtc,
6139
			    intel_clock_t *reduced_clock,
3031 serge 6140
			    int num_connectors)
6141
{
3746 Serge 6142
	struct drm_device *dev = crtc->base.dev;
3031 serge 6143
	struct drm_i915_private *dev_priv = dev->dev_private;
6144
	u32 dpll;
6145
	bool is_sdvo;
5354 serge 6146
	struct dpll *clock = &crtc->new_config->dpll;
3031 serge 6147
 
3746 Serge 6148
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 6149
 
5354 serge 6150
	is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
6151
		intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
3031 serge 6152
 
6153
	dpll = DPLL_VGA_MODE_DIS;
6154
 
5354 serge 6155
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
3031 serge 6156
		dpll |= DPLLB_MODE_LVDS;
6157
	else
6158
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 6159
 
4104 Serge 6160
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5354 serge 6161
		dpll |= (crtc->new_config->pixel_multiplier - 1)
3746 Serge 6162
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
2342 Serge 6163
		}
4104 Serge 6164
 
6165
	if (is_sdvo)
6166
		dpll |= DPLL_SDVO_HIGH_SPEED;
6167
 
5354 serge 6168
	if (crtc->new_config->has_dp_encoder)
4104 Serge 6169
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 6170
 
3031 serge 6171
	/* compute bitmask from p1 value */
6172
	if (IS_PINEVIEW(dev))
6173
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
6174
	else {
6175
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6176
		if (IS_G4X(dev) && reduced_clock)
6177
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6178
	}
6179
	switch (clock->p2) {
6180
	case 5:
6181
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6182
		break;
6183
	case 7:
6184
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6185
		break;
6186
	case 10:
6187
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6188
		break;
6189
	case 14:
6190
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6191
		break;
6192
	}
6193
	if (INTEL_INFO(dev)->gen >= 4)
6194
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 6195
 
5354 serge 6196
	if (crtc->new_config->sdvo_tv_clock)
3031 serge 6197
		dpll |= PLL_REF_INPUT_TVCLKINBC;
5354 serge 6198
	else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
3031 serge 6199
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6200
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6201
	else
6202
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 6203
 
3031 serge 6204
	dpll |= DPLL_VCO_ENABLE;
5354 serge 6205
	crtc->new_config->dpll_hw_state.dpll = dpll;
2327 Serge 6206
 
4104 Serge 6207
	if (INTEL_INFO(dev)->gen >= 4) {
5354 serge 6208
		u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
4104 Serge 6209
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5354 serge 6210
		crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
4104 Serge 6211
	}
3031 serge 6212
}
2327 Serge 6213
 
3746 Serge 6214
static void i8xx_update_pll(struct intel_crtc *crtc,
6215
			    intel_clock_t *reduced_clock,
3031 serge 6216
			    int num_connectors)
6217
{
3746 Serge 6218
	struct drm_device *dev = crtc->base.dev;
3031 serge 6219
	struct drm_i915_private *dev_priv = dev->dev_private;
6220
	u32 dpll;
5354 serge 6221
	struct dpll *clock = &crtc->new_config->dpll;
2327 Serge 6222
 
3746 Serge 6223
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 6224
 
3031 serge 6225
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 6226
 
5354 serge 6227
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
3031 serge 6228
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6229
	} else {
6230
		if (clock->p1 == 2)
6231
			dpll |= PLL_P1_DIVIDE_BY_TWO;
6232
		else
6233
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6234
		if (clock->p2 == 4)
6235
			dpll |= PLL_P2_DIVIDE_BY_4;
6236
	}
2327 Serge 6237
 
5354 serge 6238
	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
4104 Serge 6239
		dpll |= DPLL_DVO_2X_MODE;
6240
 
5354 serge 6241
	if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
3031 serge 6242
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6243
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6244
	else
6245
		dpll |= PLL_REF_INPUT_DREFCLK;
6246
 
6247
	dpll |= DPLL_VCO_ENABLE;
5354 serge 6248
	crtc->new_config->dpll_hw_state.dpll = dpll;
3031 serge 6249
}
6250
 
4104 Serge 6251
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 6252
{
6253
	struct drm_device *dev = intel_crtc->base.dev;
6254
	struct drm_i915_private *dev_priv = dev->dev_private;
6255
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 6256
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4104 Serge 6257
	struct drm_display_mode *adjusted_mode =
6258
		&intel_crtc->config.adjusted_mode;
5060 serge 6259
	uint32_t crtc_vtotal, crtc_vblank_end;
6260
	int vsyncshift = 0;
3243 Serge 6261
 
4104 Serge 6262
	/* We need to be careful not to changed the adjusted mode, for otherwise
6263
	 * the hw state checker will get angry at the mismatch. */
6264
	crtc_vtotal = adjusted_mode->crtc_vtotal;
6265
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6266
 
5060 serge 6267
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3243 Serge 6268
		/* the chip adds 2 halflines automatically */
4104 Serge 6269
		crtc_vtotal -= 1;
6270
		crtc_vblank_end -= 1;
5060 serge 6271
 
5354 serge 6272
		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
5060 serge 6273
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6274
		else
6275
			vsyncshift = adjusted_mode->crtc_hsync_start -
6276
				adjusted_mode->crtc_htotal / 2;
6277
		if (vsyncshift < 0)
6278
			vsyncshift += adjusted_mode->crtc_htotal;
3243 Serge 6279
	}
6280
 
6281
	if (INTEL_INFO(dev)->gen > 3)
6282
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6283
 
6284
	I915_WRITE(HTOTAL(cpu_transcoder),
6285
		   (adjusted_mode->crtc_hdisplay - 1) |
6286
		   ((adjusted_mode->crtc_htotal - 1) << 16));
6287
	I915_WRITE(HBLANK(cpu_transcoder),
6288
		   (adjusted_mode->crtc_hblank_start - 1) |
6289
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6290
	I915_WRITE(HSYNC(cpu_transcoder),
6291
		   (adjusted_mode->crtc_hsync_start - 1) |
6292
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6293
 
6294
	I915_WRITE(VTOTAL(cpu_transcoder),
6295
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 6296
		   ((crtc_vtotal - 1) << 16));
3243 Serge 6297
	I915_WRITE(VBLANK(cpu_transcoder),
6298
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 6299
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 6300
	I915_WRITE(VSYNC(cpu_transcoder),
6301
		   (adjusted_mode->crtc_vsync_start - 1) |
6302
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6303
 
6304
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6305
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6306
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
6307
	 * bits. */
6308
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
6309
	    (pipe == PIPE_B || pipe == PIPE_C))
6310
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6311
 
6312
	/* pipesrc controls the size that is scaled from, which should
6313
	 * always be the user's requested size.
6314
	 */
6315
	I915_WRITE(PIPESRC(pipe),
4560 Serge 6316
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
6317
		   (intel_crtc->config.pipe_src_h - 1));
3243 Serge 6318
}
6319
 
4104 Serge 6320
static void intel_get_pipe_timings(struct intel_crtc *crtc,
6321
				   struct intel_crtc_config *pipe_config)
6322
{
6323
	struct drm_device *dev = crtc->base.dev;
6324
	struct drm_i915_private *dev_priv = dev->dev_private;
6325
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6326
	uint32_t tmp;
6327
 
6328
	tmp = I915_READ(HTOTAL(cpu_transcoder));
6329
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6330
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6331
	tmp = I915_READ(HBLANK(cpu_transcoder));
6332
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6333
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6334
	tmp = I915_READ(HSYNC(cpu_transcoder));
6335
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6336
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6337
 
6338
	tmp = I915_READ(VTOTAL(cpu_transcoder));
6339
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6340
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6341
	tmp = I915_READ(VBLANK(cpu_transcoder));
6342
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6343
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6344
	tmp = I915_READ(VSYNC(cpu_transcoder));
6345
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6346
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6347
 
6348
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6349
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6350
		pipe_config->adjusted_mode.crtc_vtotal += 1;
6351
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
6352
	}
6353
 
6354
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 6355
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6356
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6357
 
6358
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
6359
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 6360
}
6361
 
5060 serge 6362
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
4104 Serge 6363
					     struct intel_crtc_config *pipe_config)
6364
{
5060 serge 6365
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
6366
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
6367
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
6368
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4104 Serge 6369
 
5060 serge 6370
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
6371
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
6372
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
6373
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4104 Serge 6374
 
5060 serge 6375
	mode->flags = pipe_config->adjusted_mode.flags;
4104 Serge 6376
 
5060 serge 6377
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
6378
	mode->flags |= pipe_config->adjusted_mode.flags;
4104 Serge 6379
}
6380
 
3746 Serge 6381
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6382
{
6383
	struct drm_device *dev = intel_crtc->base.dev;
6384
	struct drm_i915_private *dev_priv = dev->dev_private;
6385
	uint32_t pipeconf;
6386
 
4104 Serge 6387
	pipeconf = 0;
3746 Serge 6388
 
5354 serge 6389
	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
6390
	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
6391
		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
4104 Serge 6392
 
4560 Serge 6393
	if (intel_crtc->config.double_wide)
3746 Serge 6394
			pipeconf |= PIPECONF_DOUBLE_WIDE;
6395
 
4104 Serge 6396
	/* only g4x and later have fancy bpc/dither controls */
6397
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6398
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6399
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6400
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 6401
				    PIPECONF_DITHER_TYPE_SP;
6402
 
4104 Serge 6403
		switch (intel_crtc->config.pipe_bpp) {
6404
		case 18:
6405
			pipeconf |= PIPECONF_6BPC;
6406
			break;
6407
		case 24:
6408
			pipeconf |= PIPECONF_8BPC;
6409
			break;
6410
		case 30:
6411
			pipeconf |= PIPECONF_10BPC;
6412
			break;
6413
		default:
6414
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6415
			BUG();
3746 Serge 6416
		}
6417
	}
6418
 
6419
	if (HAS_PIPE_CXSR(dev)) {
6420
		if (intel_crtc->lowfreq_avail) {
6421
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6422
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6423
		} else {
6424
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6425
		}
6426
	}
6427
 
5060 serge 6428
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6429
		if (INTEL_INFO(dev)->gen < 4 ||
5354 serge 6430
		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
3746 Serge 6431
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6432
	else
5060 serge 6433
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6434
	} else
3746 Serge 6435
		pipeconf |= PIPECONF_PROGRESSIVE;
6436
 
4104 Serge 6437
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
3746 Serge 6438
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6439
 
6440
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6441
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6442
}
6443
 
5354 serge 6444
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
3031 serge 6445
{
5354 serge 6446
	struct drm_device *dev = crtc->base.dev;
3031 serge 6447
	struct drm_i915_private *dev_priv = dev->dev_private;
6448
	int refclk, num_connectors = 0;
6449
	intel_clock_t clock, reduced_clock;
4104 Serge 6450
	bool ok, has_reduced_clock = false;
4560 Serge 6451
	bool is_lvds = false, is_dsi = false;
3031 serge 6452
	struct intel_encoder *encoder;
6453
	const intel_limit_t *limit;
6454
 
5354 serge 6455
	for_each_intel_encoder(dev, encoder) {
6456
		if (encoder->new_crtc != crtc)
6457
			continue;
6458
 
3031 serge 6459
		switch (encoder->type) {
6460
		case INTEL_OUTPUT_LVDS:
6461
			is_lvds = true;
6462
			break;
4560 Serge 6463
		case INTEL_OUTPUT_DSI:
6464
			is_dsi = true;
6465
			break;
5354 serge 6466
		default:
6467
			break;
3031 serge 6468
		}
6469
 
6470
		num_connectors++;
6471
	}
6472
 
4560 Serge 6473
	if (is_dsi)
5060 serge 6474
		return 0;
4560 Serge 6475
 
5354 serge 6476
	if (!crtc->new_config->clock_set) {
3031 serge 6477
	refclk = i9xx_get_refclk(crtc, num_connectors);
6478
 
6479
	/*
4560 Serge 6480
		 * Returns a set of divisors for the desired target clock with
6481
		 * the given refclk, or FALSE.  The returned values represent
6482
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6483
		 * 2) / p1 / p2.
3031 serge 6484
	 */
6485
	limit = intel_limit(crtc, refclk);
4104 Serge 6486
	ok = dev_priv->display.find_dpll(limit, crtc,
5354 serge 6487
						 crtc->new_config->port_clock,
4104 Serge 6488
					 refclk, NULL, &clock);
4560 Serge 6489
		if (!ok) {
3031 serge 6490
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6491
		return -EINVAL;
6492
	}
6493
 
6494
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6495
		/*
4560 Serge 6496
			 * Ensure we match the reduced clock's P to the target
6497
			 * clock.  If the clocks don't match, we can't switch
6498
			 * the display clock by using the FP0/FP1. In such case
6499
			 * we will disable the LVDS downclock feature.
3031 serge 6500
		*/
4104 Serge 6501
		has_reduced_clock =
6502
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 6503
						    dev_priv->lvds_downclock,
4104 Serge 6504
						    refclk, &clock,
3031 serge 6505
						    &reduced_clock);
6506
	}
3746 Serge 6507
	/* Compat-code for transition, will disappear. */
5354 serge 6508
		crtc->new_config->dpll.n = clock.n;
6509
		crtc->new_config->dpll.m1 = clock.m1;
6510
		crtc->new_config->dpll.m2 = clock.m2;
6511
		crtc->new_config->dpll.p1 = clock.p1;
6512
		crtc->new_config->dpll.p2 = clock.p2;
3746 Serge 6513
	}
3031 serge 6514
 
4560 Serge 6515
	if (IS_GEN2(dev)) {
5354 serge 6516
		i8xx_update_pll(crtc,
3243 Serge 6517
				has_reduced_clock ? &reduced_clock : NULL,
6518
				num_connectors);
5060 serge 6519
	} else if (IS_CHERRYVIEW(dev)) {
5354 serge 6520
		chv_update_pll(crtc, crtc->new_config);
4560 Serge 6521
	} else if (IS_VALLEYVIEW(dev)) {
5354 serge 6522
		vlv_update_pll(crtc, crtc->new_config);
4560 Serge 6523
	} else {
5354 serge 6524
		i9xx_update_pll(crtc,
3031 serge 6525
				has_reduced_clock ? &reduced_clock : NULL,
6526
				num_connectors);
4560 Serge 6527
	}
3031 serge 6528
 
5060 serge 6529
	return 0;
2327 Serge 6530
}
6531
 
4104 Serge 6532
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6533
				 struct intel_crtc_config *pipe_config)
6534
{
6535
	struct drm_device *dev = crtc->base.dev;
6536
	struct drm_i915_private *dev_priv = dev->dev_private;
6537
	uint32_t tmp;
6538
 
4560 Serge 6539
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6540
		return;
6541
 
4104 Serge 6542
	tmp = I915_READ(PFIT_CONTROL);
6543
	if (!(tmp & PFIT_ENABLE))
6544
		return;
6545
 
6546
	/* Check whether the pfit is attached to our pipe. */
6547
	if (INTEL_INFO(dev)->gen < 4) {
6548
		if (crtc->pipe != PIPE_B)
6549
			return;
6550
	} else {
6551
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6552
			return;
6553
	}
6554
 
6555
	pipe_config->gmch_pfit.control = tmp;
6556
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6557
	if (INTEL_INFO(dev)->gen < 5)
6558
		pipe_config->gmch_pfit.lvds_border_bits =
6559
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6560
}
6561
 
4398 Serge 6562
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6563
			       struct intel_crtc_config *pipe_config)
6564
{
6565
	struct drm_device *dev = crtc->base.dev;
6566
	struct drm_i915_private *dev_priv = dev->dev_private;
6567
	int pipe = pipe_config->cpu_transcoder;
6568
	intel_clock_t clock;
6569
	u32 mdiv;
6570
	int refclk = 100000;
6571
 
5060 serge 6572
	/* In case of MIPI DPLL will not even be used */
6573
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6574
		return;
6575
 
4398 Serge 6576
	mutex_lock(&dev_priv->dpio_lock);
4560 Serge 6577
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4398 Serge 6578
	mutex_unlock(&dev_priv->dpio_lock);
6579
 
6580
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6581
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6582
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6583
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6584
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6585
 
4560 Serge 6586
	vlv_clock(refclk, &clock);
4398 Serge 6587
 
4560 Serge 6588
	/* clock.dot is the fast clock */
6589
	pipe_config->port_clock = clock.dot / 5;
4398 Serge 6590
}
6591
 
5060 serge 6592
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6593
				  struct intel_plane_config *plane_config)
6594
{
6595
	struct drm_device *dev = crtc->base.dev;
6596
	struct drm_i915_private *dev_priv = dev->dev_private;
6597
	u32 val, base, offset;
6598
	int pipe = crtc->pipe, plane = crtc->plane;
6599
	int fourcc, pixel_format;
6600
	int aligned_height;
6601
 
6602
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6603
	if (!crtc->base.primary->fb) {
6604
		DRM_DEBUG_KMS("failed to alloc fb\n");
6605
		return;
6606
	}
6607
 
6608
	val = I915_READ(DSPCNTR(plane));
6609
 
6610
	if (INTEL_INFO(dev)->gen >= 4)
6611
		if (val & DISPPLANE_TILED)
6612
			plane_config->tiled = true;
6613
 
6614
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6615
	fourcc = intel_format_to_fourcc(pixel_format);
6616
	crtc->base.primary->fb->pixel_format = fourcc;
6617
	crtc->base.primary->fb->bits_per_pixel =
6618
		drm_format_plane_cpp(fourcc, 0) * 8;
6619
 
6620
	if (INTEL_INFO(dev)->gen >= 4) {
6621
		if (plane_config->tiled)
6622
			offset = I915_READ(DSPTILEOFF(plane));
6623
		else
6624
			offset = I915_READ(DSPLINOFF(plane));
6625
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6626
	} else {
6627
		base = I915_READ(DSPADDR(plane));
6628
	}
6629
	plane_config->base = base;
6630
 
6631
	val = I915_READ(PIPESRC(pipe));
6632
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6633
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6634
 
6635
	val = I915_READ(DSPSTRIDE(pipe));
5354 serge 6636
	crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
5060 serge 6637
 
6638
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6639
					    plane_config->tiled);
6640
 
6641
	plane_config->size = 16*1024*1024;
6642
 
6643
 
6644
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6645
		      pipe, plane, crtc->base.primary->fb->width,
6646
		      crtc->base.primary->fb->height,
6647
		      crtc->base.primary->fb->bits_per_pixel, base,
6648
		      crtc->base.primary->fb->pitches[0],
6649
		      plane_config->size);
6650
 
6651
}
6652
 
6653
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6654
			       struct intel_crtc_config *pipe_config)
6655
{
6656
	struct drm_device *dev = crtc->base.dev;
6657
	struct drm_i915_private *dev_priv = dev->dev_private;
6658
	int pipe = pipe_config->cpu_transcoder;
6659
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6660
	intel_clock_t clock;
6661
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6662
	int refclk = 100000;
6663
 
6664
	mutex_lock(&dev_priv->dpio_lock);
6665
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6666
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6667
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6668
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6669
	mutex_unlock(&dev_priv->dpio_lock);
6670
 
6671
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6672
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6673
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6674
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6675
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6676
 
6677
	chv_clock(refclk, &clock);
6678
 
6679
	/* clock.dot is the fast clock */
6680
	pipe_config->port_clock = clock.dot / 5;
6681
}
6682
 
3746 Serge 6683
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6684
				 struct intel_crtc_config *pipe_config)
6685
{
6686
	struct drm_device *dev = crtc->base.dev;
6687
	struct drm_i915_private *dev_priv = dev->dev_private;
6688
	uint32_t tmp;
6689
 
5354 serge 6690
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 6691
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6692
		return false;
6693
 
4104 Serge 6694
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6695
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6696
 
3746 Serge 6697
	tmp = I915_READ(PIPECONF(crtc->pipe));
6698
	if (!(tmp & PIPECONF_ENABLE))
6699
		return false;
6700
 
4280 Serge 6701
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6702
		switch (tmp & PIPECONF_BPC_MASK) {
6703
		case PIPECONF_6BPC:
6704
			pipe_config->pipe_bpp = 18;
6705
			break;
6706
		case PIPECONF_8BPC:
6707
			pipe_config->pipe_bpp = 24;
6708
			break;
6709
		case PIPECONF_10BPC:
6710
			pipe_config->pipe_bpp = 30;
6711
			break;
6712
		default:
6713
			break;
6714
		}
6715
	}
6716
 
5060 serge 6717
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6718
		pipe_config->limited_color_range = true;
6719
 
4560 Serge 6720
	if (INTEL_INFO(dev)->gen < 4)
6721
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6722
 
4104 Serge 6723
	intel_get_pipe_timings(crtc, pipe_config);
6724
 
6725
	i9xx_get_pfit_config(crtc, pipe_config);
6726
 
6727
	if (INTEL_INFO(dev)->gen >= 4) {
6728
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6729
		pipe_config->pixel_multiplier =
6730
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6731
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6732
		pipe_config->dpll_hw_state.dpll_md = tmp;
6733
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6734
		tmp = I915_READ(DPLL(crtc->pipe));
6735
		pipe_config->pixel_multiplier =
6736
			((tmp & SDVO_MULTIPLIER_MASK)
6737
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6738
	} else {
6739
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6740
		 * port and will be fixed up in the encoder->get_config
6741
		 * function. */
6742
		pipe_config->pixel_multiplier = 1;
6743
	}
6744
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6745
	if (!IS_VALLEYVIEW(dev)) {
5354 serge 6746
		/*
6747
		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
6748
		 * on 830. Filter it out here so that we don't
6749
		 * report errors due to that.
6750
		 */
6751
		if (IS_I830(dev))
6752
			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
6753
 
4104 Serge 6754
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6755
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6756
	} else {
6757
		/* Mask out read-only status bits. */
6758
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6759
						     DPLL_PORTC_READY_MASK |
6760
						     DPLL_PORTB_READY_MASK);
6761
	}
6762
 
5060 serge 6763
	if (IS_CHERRYVIEW(dev))
6764
		chv_crtc_clock_get(crtc, pipe_config);
6765
	else if (IS_VALLEYVIEW(dev))
4560 Serge 6766
		vlv_crtc_clock_get(crtc, pipe_config);
6767
	else
6768
		i9xx_crtc_clock_get(crtc, pipe_config);
6769
 
3746 Serge 6770
	return true;
6771
}
6772
 
3243 Serge 6773
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 6774
{
6775
	struct drm_i915_private *dev_priv = dev->dev_private;
6776
	struct intel_encoder *encoder;
3746 Serge 6777
	u32 val, final;
2327 Serge 6778
	bool has_lvds = false;
2342 Serge 6779
	bool has_cpu_edp = false;
6780
	bool has_panel = false;
6781
	bool has_ck505 = false;
6782
	bool can_ssc = false;
2327 Serge 6783
 
6784
	/* We need to take the global config into account */
5354 serge 6785
	for_each_intel_encoder(dev, encoder) {
2327 Serge 6786
			switch (encoder->type) {
6787
			case INTEL_OUTPUT_LVDS:
2342 Serge 6788
			has_panel = true;
2327 Serge 6789
				has_lvds = true;
2342 Serge 6790
			break;
2327 Serge 6791
			case INTEL_OUTPUT_EDP:
2342 Serge 6792
			has_panel = true;
4104 Serge 6793
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 6794
				has_cpu_edp = true;
2327 Serge 6795
				break;
5354 serge 6796
		default:
6797
			break;
2327 Serge 6798
			}
6799
		}
2342 Serge 6800
 
6801
	if (HAS_PCH_IBX(dev)) {
4104 Serge 6802
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 6803
		can_ssc = has_ck505;
6804
	} else {
6805
		has_ck505 = false;
6806
		can_ssc = true;
2327 Serge 6807
	}
6808
 
4104 Serge 6809
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6810
		      has_panel, has_lvds, has_ck505);
2342 Serge 6811
 
2327 Serge 6812
	/* Ironlake: try to setup display ref clock before DPLL
6813
	 * enabling. This is only under driver's control after
6814
	 * PCH B stepping, previous chipset stepping should be
6815
	 * ignoring this setting.
6816
	 */
3746 Serge 6817
	val = I915_READ(PCH_DREF_CONTROL);
6818
 
6819
	/* As we must carefully and slowly disable/enable each source in turn,
6820
	 * compute the final state we want first and check if we need to
6821
	 * make any changes at all.
6822
	 */
6823
	final = val;
6824
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6825
	if (has_ck505)
6826
		final |= DREF_NONSPREAD_CK505_ENABLE;
6827
	else
6828
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6829
 
6830
	final &= ~DREF_SSC_SOURCE_MASK;
6831
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6832
	final &= ~DREF_SSC1_ENABLE;
6833
 
6834
	if (has_panel) {
6835
		final |= DREF_SSC_SOURCE_ENABLE;
6836
 
6837
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6838
			final |= DREF_SSC1_ENABLE;
6839
 
6840
		if (has_cpu_edp) {
6841
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6842
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6843
			else
6844
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6845
		} else
6846
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6847
	} else {
6848
		final |= DREF_SSC_SOURCE_DISABLE;
6849
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6850
	}
6851
 
6852
	if (final == val)
6853
		return;
6854
 
2327 Serge 6855
	/* Always enable nonspread source */
3746 Serge 6856
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 6857
 
6858
	if (has_ck505)
3746 Serge 6859
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 6860
	else
3746 Serge 6861
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 6862
 
6863
	if (has_panel) {
3746 Serge 6864
		val &= ~DREF_SSC_SOURCE_MASK;
6865
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 6866
 
2342 Serge 6867
		/* SSC must be turned on before enabling the CPU output  */
6868
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6869
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 6870
			val |= DREF_SSC1_ENABLE;
3031 serge 6871
		} else
3746 Serge 6872
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 6873
 
2342 Serge 6874
		/* Get SSC going before enabling the outputs */
3746 Serge 6875
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 6876
			POSTING_READ(PCH_DREF_CONTROL);
6877
			udelay(200);
2342 Serge 6878
 
3746 Serge 6879
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 6880
 
6881
		/* Enable CPU source on CPU attached eDP */
2342 Serge 6882
		if (has_cpu_edp) {
6883
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6884
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 6885
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5060 serge 6886
			} else
3746 Serge 6887
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 6888
		} else
3746 Serge 6889
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 6890
 
3746 Serge 6891
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 6892
		POSTING_READ(PCH_DREF_CONTROL);
6893
		udelay(200);
2327 Serge 6894
		} else {
2342 Serge 6895
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6896
 
3746 Serge 6897
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 6898
 
6899
		/* Turn off CPU output */
3746 Serge 6900
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 6901
 
3746 Serge 6902
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 6903
		POSTING_READ(PCH_DREF_CONTROL);
6904
		udelay(200);
2342 Serge 6905
 
6906
		/* Turn off the SSC source */
3746 Serge 6907
		val &= ~DREF_SSC_SOURCE_MASK;
6908
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 6909
 
6910
		/* Turn off SSC1 */
3746 Serge 6911
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 6912
 
3746 Serge 6913
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 6914
		POSTING_READ(PCH_DREF_CONTROL);
6915
		udelay(200);
2327 Serge 6916
	}
3746 Serge 6917
 
6918
	BUG_ON(val != final);
2327 Serge 6919
}
6920
 
4104 Serge 6921
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 6922
{
4104 Serge 6923
	uint32_t tmp;
3243 Serge 6924
 
6925
		tmp = I915_READ(SOUTH_CHICKEN2);
6926
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6927
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6928
 
6929
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6930
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6931
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6932
 
6933
		tmp = I915_READ(SOUTH_CHICKEN2);
6934
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6935
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6936
 
6937
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 6938
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
3243 Serge 6939
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 6940
}
3243 Serge 6941
 
4104 Serge 6942
/* WaMPhyProgramming:hsw */
6943
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6944
{
6945
	uint32_t tmp;
6946
 
3243 Serge 6947
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6948
	tmp &= ~(0xFF << 24);
6949
	tmp |= (0x12 << 24);
6950
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6951
 
6952
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6953
	tmp |= (1 << 11);
6954
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6955
 
6956
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6957
	tmp |= (1 << 11);
6958
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6959
 
6960
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6961
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6962
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6963
 
6964
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6965
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6966
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6967
 
6968
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6969
		tmp &= ~(7 << 13);
6970
		tmp |= (5 << 13);
6971
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6972
 
6973
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6974
		tmp &= ~(7 << 13);
6975
		tmp |= (5 << 13);
6976
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6977
 
6978
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6979
	tmp &= ~0xFF;
6980
	tmp |= 0x1C;
6981
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6982
 
6983
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6984
	tmp &= ~0xFF;
6985
	tmp |= 0x1C;
6986
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6987
 
6988
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6989
	tmp &= ~(0xFF << 16);
6990
	tmp |= (0x1C << 16);
6991
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6992
 
6993
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6994
	tmp &= ~(0xFF << 16);
6995
	tmp |= (0x1C << 16);
6996
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6997
 
6998
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6999
		tmp |= (1 << 27);
7000
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
7001
 
7002
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
7003
		tmp |= (1 << 27);
7004
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
7005
 
7006
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
7007
		tmp &= ~(0xF << 28);
7008
		tmp |= (4 << 28);
7009
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
7010
 
7011
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
7012
		tmp &= ~(0xF << 28);
7013
		tmp |= (4 << 28);
7014
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 7015
}
3243 Serge 7016
 
4104 Serge 7017
/* Implements 3 different sequences from BSpec chapter "Display iCLK
7018
 * Programming" based on the parameters passed:
7019
 * - Sequence to enable CLKOUT_DP
7020
 * - Sequence to enable CLKOUT_DP without spread
7021
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
7022
 */
7023
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
7024
				 bool with_fdi)
7025
{
7026
	struct drm_i915_private *dev_priv = dev->dev_private;
7027
	uint32_t reg, tmp;
3480 Serge 7028
 
4104 Serge 7029
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
7030
		with_spread = true;
7031
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
7032
		 with_fdi, "LP PCH doesn't have FDI\n"))
7033
		with_fdi = false;
7034
 
7035
	mutex_lock(&dev_priv->dpio_lock);
7036
 
7037
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7038
	tmp &= ~SBI_SSCCTL_DISABLE;
7039
	tmp |= SBI_SSCCTL_PATHALT;
7040
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7041
 
7042
	udelay(24);
7043
 
7044
	if (with_spread) {
7045
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7046
		tmp &= ~SBI_SSCCTL_PATHALT;
7047
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7048
 
7049
		if (with_fdi) {
7050
			lpt_reset_fdi_mphy(dev_priv);
7051
			lpt_program_fdi_mphy(dev_priv);
7052
		}
7053
	}
7054
 
7055
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7056
	       SBI_GEN0 : SBI_DBUFF0;
7057
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7058
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7059
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7060
 
3480 Serge 7061
	mutex_unlock(&dev_priv->dpio_lock);
3243 Serge 7062
}
7063
 
4104 Serge 7064
/* Sequence to disable CLKOUT_DP */
7065
static void lpt_disable_clkout_dp(struct drm_device *dev)
7066
{
7067
	struct drm_i915_private *dev_priv = dev->dev_private;
7068
	uint32_t reg, tmp;
7069
 
7070
	mutex_lock(&dev_priv->dpio_lock);
7071
 
7072
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
7073
	       SBI_GEN0 : SBI_DBUFF0;
7074
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
7075
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
7076
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7077
 
7078
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
7079
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
7080
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
7081
			tmp |= SBI_SSCCTL_PATHALT;
7082
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7083
			udelay(32);
7084
		}
7085
		tmp |= SBI_SSCCTL_DISABLE;
7086
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
7087
	}
7088
 
7089
	mutex_unlock(&dev_priv->dpio_lock);
7090
}
7091
 
7092
static void lpt_init_pch_refclk(struct drm_device *dev)
7093
{
7094
	struct intel_encoder *encoder;
7095
	bool has_vga = false;
7096
 
5354 serge 7097
	for_each_intel_encoder(dev, encoder) {
4104 Serge 7098
		switch (encoder->type) {
7099
		case INTEL_OUTPUT_ANALOG:
7100
			has_vga = true;
7101
			break;
5354 serge 7102
		default:
7103
			break;
4104 Serge 7104
		}
7105
	}
7106
 
7107
	if (has_vga)
7108
		lpt_enable_clkout_dp(dev, true, true);
7109
	else
7110
		lpt_disable_clkout_dp(dev);
7111
}
7112
 
3243 Serge 7113
/*
7114
 * Initialize reference clocks when the driver loads
7115
 */
7116
void intel_init_pch_refclk(struct drm_device *dev)
7117
{
7118
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
7119
		ironlake_init_pch_refclk(dev);
7120
	else if (HAS_PCH_LPT(dev))
7121
		lpt_init_pch_refclk(dev);
7122
}
7123
 
2342 Serge 7124
static int ironlake_get_refclk(struct drm_crtc *crtc)
7125
{
7126
	struct drm_device *dev = crtc->dev;
7127
	struct drm_i915_private *dev_priv = dev->dev_private;
7128
	struct intel_encoder *encoder;
7129
	int num_connectors = 0;
7130
	bool is_lvds = false;
7131
 
5354 serge 7132
	for_each_intel_encoder(dev, encoder) {
7133
		if (encoder->new_crtc != to_intel_crtc(crtc))
7134
			continue;
7135
 
2342 Serge 7136
		switch (encoder->type) {
7137
		case INTEL_OUTPUT_LVDS:
7138
			is_lvds = true;
7139
			break;
5354 serge 7140
		default:
7141
			break;
2342 Serge 7142
		}
7143
		num_connectors++;
7144
	}
7145
 
7146
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 7147
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 7148
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 7149
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 7150
	}
7151
 
7152
	return 120000;
7153
}
7154
 
4104 Serge 7155
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 7156
{
7157
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
7158
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7159
	int pipe = intel_crtc->pipe;
7160
	uint32_t val;
7161
 
4104 Serge 7162
	val = 0;
3031 serge 7163
 
3746 Serge 7164
	switch (intel_crtc->config.pipe_bpp) {
3031 serge 7165
	case 18:
3480 Serge 7166
		val |= PIPECONF_6BPC;
3031 serge 7167
		break;
7168
	case 24:
3480 Serge 7169
		val |= PIPECONF_8BPC;
3031 serge 7170
		break;
7171
	case 30:
3480 Serge 7172
		val |= PIPECONF_10BPC;
3031 serge 7173
		break;
7174
	case 36:
3480 Serge 7175
		val |= PIPECONF_12BPC;
3031 serge 7176
		break;
7177
	default:
3243 Serge 7178
		/* Case prevented by intel_choose_pipe_bpp_dither. */
7179
		BUG();
3031 serge 7180
	}
7181
 
4104 Serge 7182
	if (intel_crtc->config.dither)
3031 serge 7183
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7184
 
4104 Serge 7185
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 7186
		val |= PIPECONF_INTERLACED_ILK;
7187
	else
7188
		val |= PIPECONF_PROGRESSIVE;
7189
 
3746 Serge 7190
	if (intel_crtc->config.limited_color_range)
3480 Serge 7191
		val |= PIPECONF_COLOR_RANGE_SELECT;
7192
 
3031 serge 7193
	I915_WRITE(PIPECONF(pipe), val);
7194
	POSTING_READ(PIPECONF(pipe));
7195
}
7196
 
3480 Serge 7197
/*
7198
 * Set up the pipe CSC unit.
7199
 *
7200
 * Currently only full range RGB to limited range RGB conversion
7201
 * is supported, but eventually this should handle various
7202
 * RGB<->YCbCr scenarios as well.
7203
 */
3746 Serge 7204
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 7205
{
7206
	struct drm_device *dev = crtc->dev;
7207
	struct drm_i915_private *dev_priv = dev->dev_private;
7208
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7209
	int pipe = intel_crtc->pipe;
7210
	uint16_t coeff = 0x7800; /* 1.0 */
7211
 
7212
	/*
7213
	 * TODO: Check what kind of values actually come out of the pipe
7214
	 * with these coeff/postoff values and adjust to get the best
7215
	 * accuracy. Perhaps we even need to take the bpc value into
7216
	 * consideration.
7217
	 */
7218
 
3746 Serge 7219
	if (intel_crtc->config.limited_color_range)
3480 Serge 7220
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
7221
 
7222
	/*
7223
	 * GY/GU and RY/RU should be the other way around according
7224
	 * to BSpec, but reality doesn't agree. Just set them up in
7225
	 * a way that results in the correct picture.
7226
	 */
7227
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
7228
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
7229
 
7230
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
7231
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
7232
 
7233
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
7234
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
7235
 
7236
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
7237
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
7238
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
7239
 
7240
	if (INTEL_INFO(dev)->gen > 6) {
7241
		uint16_t postoff = 0;
7242
 
3746 Serge 7243
		if (intel_crtc->config.limited_color_range)
4398 Serge 7244
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 7245
 
7246
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
7247
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
7248
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
7249
 
7250
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
7251
	} else {
7252
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
7253
 
3746 Serge 7254
		if (intel_crtc->config.limited_color_range)
3480 Serge 7255
			mode |= CSC_BLACK_SCREEN_OFFSET;
7256
 
7257
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
7258
	}
7259
}
7260
 
4104 Serge 7261
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 7262
{
4560 Serge 7263
	struct drm_device *dev = crtc->dev;
7264
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 7265
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 7266
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 7267
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 7268
	uint32_t val;
7269
 
4104 Serge 7270
	val = 0;
3243 Serge 7271
 
4560 Serge 7272
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
3243 Serge 7273
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
7274
 
4104 Serge 7275
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 7276
		val |= PIPECONF_INTERLACED_ILK;
7277
	else
7278
		val |= PIPECONF_PROGRESSIVE;
7279
 
7280
	I915_WRITE(PIPECONF(cpu_transcoder), val);
7281
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 7282
 
7283
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7284
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 7285
 
5354 serge 7286
	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
4560 Serge 7287
		val = 0;
7288
 
7289
		switch (intel_crtc->config.pipe_bpp) {
7290
		case 18:
7291
			val |= PIPEMISC_DITHER_6_BPC;
7292
			break;
7293
		case 24:
7294
			val |= PIPEMISC_DITHER_8_BPC;
7295
			break;
7296
		case 30:
7297
			val |= PIPEMISC_DITHER_10_BPC;
7298
			break;
7299
		case 36:
7300
			val |= PIPEMISC_DITHER_12_BPC;
7301
			break;
7302
		default:
7303
			/* Case prevented by pipe_config_set_bpp. */
7304
			BUG();
7305
		}
7306
 
7307
		if (intel_crtc->config.dither)
7308
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
7309
 
7310
		I915_WRITE(PIPEMISC(pipe), val);
7311
	}
3243 Serge 7312
}
7313
 
3031 serge 7314
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7315
				    intel_clock_t *clock,
7316
				    bool *has_reduced_clock,
7317
				    intel_clock_t *reduced_clock)
7318
{
7319
	struct drm_device *dev = crtc->dev;
7320
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 7321
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 7322
	int refclk;
7323
	const intel_limit_t *limit;
4104 Serge 7324
	bool ret, is_lvds = false;
3031 serge 7325
 
5354 serge 7326
	is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
3031 serge 7327
 
7328
	refclk = ironlake_get_refclk(crtc);
7329
 
7330
	/*
7331
	 * Returns a set of divisors for the desired target clock with the given
7332
	 * refclk, or FALSE.  The returned values represent the clock equation:
7333
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7334
	 */
5354 serge 7335
	limit = intel_limit(intel_crtc, refclk);
7336
	ret = dev_priv->display.find_dpll(limit, intel_crtc,
7337
					  intel_crtc->new_config->port_clock,
4104 Serge 7338
					  refclk, NULL, clock);
3031 serge 7339
	if (!ret)
7340
		return false;
7341
 
7342
	if (is_lvds && dev_priv->lvds_downclock_avail) {
7343
		/*
7344
		 * Ensure we match the reduced clock's P to the target clock.
7345
		 * If the clocks don't match, we can't switch the display clock
7346
		 * by using the FP0/FP1. In such case we will disable the LVDS
7347
		 * downclock feature.
7348
		*/
4104 Serge 7349
		*has_reduced_clock =
5354 serge 7350
			dev_priv->display.find_dpll(limit, intel_crtc,
3031 serge 7351
						     dev_priv->lvds_downclock,
4104 Serge 7352
						    refclk, clock,
3031 serge 7353
						     reduced_clock);
7354
	}
7355
 
7356
	return true;
7357
}
7358
 
3243 Serge 7359
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
7360
{
7361
	/*
7362
	 * Account for spread spectrum to avoid
7363
	 * oversubscribing the link. Max center spread
7364
	 * is 2.5%; use 5% for safety's sake.
7365
	 */
7366
	u32 bps = target_clock * bpp * 21 / 20;
5060 serge 7367
	return DIV_ROUND_UP(bps, link_bw * 8);
3243 Serge 7368
}
7369
 
4104 Serge 7370
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 7371
{
4104 Serge 7372
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 7373
}
7374
 
3243 Serge 7375
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
4104 Serge 7376
				      u32 *fp,
3746 Serge 7377
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 7378
{
7379
	struct drm_crtc *crtc = &intel_crtc->base;
7380
	struct drm_device *dev = crtc->dev;
7381
	struct drm_i915_private *dev_priv = dev->dev_private;
7382
	struct intel_encoder *intel_encoder;
7383
	uint32_t dpll;
3746 Serge 7384
	int factor, num_connectors = 0;
4104 Serge 7385
	bool is_lvds = false, is_sdvo = false;
3243 Serge 7386
 
5354 serge 7387
	for_each_intel_encoder(dev, intel_encoder) {
7388
		if (intel_encoder->new_crtc != to_intel_crtc(crtc))
7389
			continue;
7390
 
3243 Serge 7391
		switch (intel_encoder->type) {
7392
		case INTEL_OUTPUT_LVDS:
7393
			is_lvds = true;
7394
			break;
7395
		case INTEL_OUTPUT_SDVO:
7396
		case INTEL_OUTPUT_HDMI:
7397
			is_sdvo = true;
7398
			break;
5354 serge 7399
		default:
7400
			break;
3243 Serge 7401
		}
7402
 
7403
		num_connectors++;
7404
	}
7405
 
2327 Serge 7406
    /* Enable autotuning of the PLL clock (if permissible) */
7407
    factor = 21;
7408
    if (is_lvds) {
7409
        if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 7410
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 7411
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
2327 Serge 7412
            factor = 25;
5354 serge 7413
	} else if (intel_crtc->new_config->sdvo_tv_clock)
2327 Serge 7414
        factor = 20;
7415
 
5354 serge 7416
	if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
3746 Serge 7417
		*fp |= FP_CB_TUNE;
2327 Serge 7418
 
3746 Serge 7419
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7420
		*fp2 |= FP_CB_TUNE;
7421
 
2327 Serge 7422
    dpll = 0;
7423
 
7424
    if (is_lvds)
7425
        dpll |= DPLLB_MODE_LVDS;
7426
    else
7427
        dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 7428
 
5354 serge 7429
	dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
3746 Serge 7430
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 7431
 
4104 Serge 7432
	if (is_sdvo)
7433
		dpll |= DPLL_SDVO_HIGH_SPEED;
5354 serge 7434
	if (intel_crtc->new_config->has_dp_encoder)
4104 Serge 7435
		dpll |= DPLL_SDVO_HIGH_SPEED;
7436
 
2327 Serge 7437
    /* compute bitmask from p1 value */
5354 serge 7438
	dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2327 Serge 7439
    /* also FPA1 */
5354 serge 7440
	dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 7441
 
5354 serge 7442
	switch (intel_crtc->new_config->dpll.p2) {
2327 Serge 7443
    case 5:
7444
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7445
        break;
7446
    case 7:
7447
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7448
        break;
7449
    case 10:
7450
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7451
        break;
7452
    case 14:
7453
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7454
        break;
7455
    }
7456
 
4104 Serge 7457
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
2327 Serge 7458
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7459
    else
7460
        dpll |= PLL_REF_INPUT_DREFCLK;
7461
 
4104 Serge 7462
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 7463
}
7464
 
5354 serge 7465
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
3243 Serge 7466
{
5354 serge 7467
	struct drm_device *dev = crtc->base.dev;
3243 Serge 7468
	intel_clock_t clock, reduced_clock;
4104 Serge 7469
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 7470
	bool ok, has_reduced_clock = false;
3746 Serge 7471
	bool is_lvds = false;
4104 Serge 7472
	struct intel_shared_dpll *pll;
3243 Serge 7473
 
5354 serge 7474
	is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
3243 Serge 7475
 
7476
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7477
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7478
 
5354 serge 7479
	ok = ironlake_compute_clocks(&crtc->base, &clock,
3243 Serge 7480
				     &has_reduced_clock, &reduced_clock);
5354 serge 7481
	if (!ok && !crtc->new_config->clock_set) {
3243 Serge 7482
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7483
		return -EINVAL;
7484
	}
3746 Serge 7485
	/* Compat-code for transition, will disappear. */
5354 serge 7486
	if (!crtc->new_config->clock_set) {
7487
		crtc->new_config->dpll.n = clock.n;
7488
		crtc->new_config->dpll.m1 = clock.m1;
7489
		crtc->new_config->dpll.m2 = clock.m2;
7490
		crtc->new_config->dpll.p1 = clock.p1;
7491
		crtc->new_config->dpll.p2 = clock.p2;
3746 Serge 7492
	}
3243 Serge 7493
 
4104 Serge 7494
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5354 serge 7495
	if (crtc->new_config->has_pch_encoder) {
7496
		fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
3243 Serge 7497
	if (has_reduced_clock)
4104 Serge 7498
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 7499
 
5354 serge 7500
		dpll = ironlake_compute_dpll(crtc,
4104 Serge 7501
					     &fp, &reduced_clock,
5060 serge 7502
					     has_reduced_clock ? &fp2 : NULL);
3243 Serge 7503
 
5354 serge 7504
		crtc->new_config->dpll_hw_state.dpll = dpll;
7505
		crtc->new_config->dpll_hw_state.fp0 = fp;
4104 Serge 7506
		if (has_reduced_clock)
5354 serge 7507
			crtc->new_config->dpll_hw_state.fp1 = fp2;
4104 Serge 7508
		else
5354 serge 7509
			crtc->new_config->dpll_hw_state.fp1 = fp;
2327 Serge 7510
 
5354 serge 7511
		pll = intel_get_shared_dpll(crtc);
3031 serge 7512
		if (pll == NULL) {
4104 Serge 7513
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5354 serge 7514
					 pipe_name(crtc->pipe));
2342 Serge 7515
			return -EINVAL;
2327 Serge 7516
        }
5354 serge 7517
	}
2327 Serge 7518
 
5060 serge 7519
	if (is_lvds && has_reduced_clock && i915.powersave)
5354 serge 7520
		crtc->lowfreq_avail = true;
4104 Serge 7521
	else
5354 serge 7522
		crtc->lowfreq_avail = false;
2327 Serge 7523
 
5060 serge 7524
	return 0;
4104 Serge 7525
}
3243 Serge 7526
 
4560 Serge 7527
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7528
					 struct intel_link_m_n *m_n)
4104 Serge 7529
{
7530
	struct drm_device *dev = crtc->base.dev;
7531
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 7532
	enum pipe pipe = crtc->pipe;
4104 Serge 7533
 
4560 Serge 7534
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7535
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7536
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7537
		& ~TU_SIZE_MASK;
7538
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7539
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7540
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7541
}
7542
 
7543
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7544
					 enum transcoder transcoder,
5354 serge 7545
					 struct intel_link_m_n *m_n,
7546
					 struct intel_link_m_n *m2_n2)
4560 Serge 7547
{
7548
	struct drm_device *dev = crtc->base.dev;
7549
	struct drm_i915_private *dev_priv = dev->dev_private;
7550
	enum pipe pipe = crtc->pipe;
7551
 
7552
	if (INTEL_INFO(dev)->gen >= 5) {
7553
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7554
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7555
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 7556
					& ~TU_SIZE_MASK;
4560 Serge 7557
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7558
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 7559
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5354 serge 7560
		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
7561
		 * gen < 8) and if DRRS is supported (to make sure the
7562
		 * registers are not unnecessarily read).
7563
		 */
7564
		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
7565
			crtc->config.has_drrs) {
7566
			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
7567
			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
7568
			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
7569
					& ~TU_SIZE_MASK;
7570
			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
7571
			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
7572
					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7573
		}
4560 Serge 7574
	} else {
7575
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7576
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7577
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7578
			& ~TU_SIZE_MASK;
7579
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7580
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7581
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7582
	}
3243 Serge 7583
}
7584
 
4560 Serge 7585
void intel_dp_get_m_n(struct intel_crtc *crtc,
7586
		      struct intel_crtc_config *pipe_config)
7587
{
7588
	if (crtc->config.has_pch_encoder)
7589
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7590
	else
7591
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 7592
					     &pipe_config->dp_m_n,
7593
					     &pipe_config->dp_m2_n2);
4560 Serge 7594
}
7595
 
7596
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7597
					struct intel_crtc_config *pipe_config)
7598
{
7599
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5354 serge 7600
				     &pipe_config->fdi_m_n, NULL);
4560 Serge 7601
}
7602
 
5354 serge 7603
static void skylake_get_pfit_config(struct intel_crtc *crtc,
7604
				    struct intel_crtc_config *pipe_config)
7605
{
7606
	struct drm_device *dev = crtc->base.dev;
7607
	struct drm_i915_private *dev_priv = dev->dev_private;
7608
	uint32_t tmp;
7609
 
7610
	tmp = I915_READ(PS_CTL(crtc->pipe));
7611
 
7612
	if (tmp & PS_ENABLE) {
7613
		pipe_config->pch_pfit.enabled = true;
7614
		pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
7615
		pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
7616
	}
7617
}
7618
 
4104 Serge 7619
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7620
				     struct intel_crtc_config *pipe_config)
7621
{
7622
	struct drm_device *dev = crtc->base.dev;
7623
	struct drm_i915_private *dev_priv = dev->dev_private;
7624
	uint32_t tmp;
7625
 
7626
	tmp = I915_READ(PF_CTL(crtc->pipe));
7627
 
7628
	if (tmp & PF_ENABLE) {
7629
		pipe_config->pch_pfit.enabled = true;
7630
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7631
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7632
 
7633
		/* We currently do not free assignements of panel fitters on
7634
		 * ivb/hsw (since we don't use the higher upscaling modes which
7635
		 * differentiates them) so just WARN about this case for now. */
7636
		if (IS_GEN7(dev)) {
7637
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7638
				PF_PIPE_SEL_IVB(crtc->pipe));
7639
		}
7640
	}
7641
}
7642
 
5060 serge 7643
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7644
				      struct intel_plane_config *plane_config)
7645
{
7646
	struct drm_device *dev = crtc->base.dev;
7647
	struct drm_i915_private *dev_priv = dev->dev_private;
7648
	u32 val, base, offset;
7649
	int pipe = crtc->pipe, plane = crtc->plane;
7650
	int fourcc, pixel_format;
7651
	int aligned_height;
7652
 
7653
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7654
	if (!crtc->base.primary->fb) {
7655
		DRM_DEBUG_KMS("failed to alloc fb\n");
7656
		return;
7657
	}
7658
 
7659
	val = I915_READ(DSPCNTR(plane));
7660
 
7661
	if (INTEL_INFO(dev)->gen >= 4)
7662
		if (val & DISPPLANE_TILED)
7663
			plane_config->tiled = true;
7664
 
7665
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7666
	fourcc = intel_format_to_fourcc(pixel_format);
7667
	crtc->base.primary->fb->pixel_format = fourcc;
7668
	crtc->base.primary->fb->bits_per_pixel =
7669
		drm_format_plane_cpp(fourcc, 0) * 8;
7670
 
7671
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7672
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7673
		offset = I915_READ(DSPOFFSET(plane));
7674
	} else {
7675
		if (plane_config->tiled)
7676
			offset = I915_READ(DSPTILEOFF(plane));
7677
		else
7678
			offset = I915_READ(DSPLINOFF(plane));
7679
	}
7680
	plane_config->base = base;
7681
 
7682
	val = I915_READ(PIPESRC(pipe));
7683
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7684
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7685
 
7686
	val = I915_READ(DSPSTRIDE(pipe));
5354 serge 7687
	crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
5060 serge 7688
 
7689
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7690
					    plane_config->tiled);
7691
 
5354 serge 7692
	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
7693
					aligned_height);
5060 serge 7694
 
7695
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7696
		      pipe, plane, crtc->base.primary->fb->width,
7697
		      crtc->base.primary->fb->height,
7698
		      crtc->base.primary->fb->bits_per_pixel, base,
7699
		      crtc->base.primary->fb->pitches[0],
7700
		      plane_config->size);
7701
}
7702
 
3746 Serge 7703
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7704
				     struct intel_crtc_config *pipe_config)
7705
{
7706
	struct drm_device *dev = crtc->base.dev;
7707
	struct drm_i915_private *dev_priv = dev->dev_private;
7708
	uint32_t tmp;
7709
 
5354 serge 7710
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 7711
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7712
		return false;
7713
 
4104 Serge 7714
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7715
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7716
 
3746 Serge 7717
	tmp = I915_READ(PIPECONF(crtc->pipe));
7718
	if (!(tmp & PIPECONF_ENABLE))
7719
		return false;
7720
 
4280 Serge 7721
	switch (tmp & PIPECONF_BPC_MASK) {
7722
	case PIPECONF_6BPC:
7723
		pipe_config->pipe_bpp = 18;
7724
		break;
7725
	case PIPECONF_8BPC:
7726
		pipe_config->pipe_bpp = 24;
7727
		break;
7728
	case PIPECONF_10BPC:
7729
		pipe_config->pipe_bpp = 30;
7730
		break;
7731
	case PIPECONF_12BPC:
7732
		pipe_config->pipe_bpp = 36;
7733
		break;
7734
	default:
7735
		break;
7736
	}
7737
 
5060 serge 7738
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7739
		pipe_config->limited_color_range = true;
7740
 
4104 Serge 7741
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7742
		struct intel_shared_dpll *pll;
7743
 
3746 Serge 7744
		pipe_config->has_pch_encoder = true;
7745
 
4104 Serge 7746
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7747
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7748
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7749
 
7750
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7751
 
7752
		if (HAS_PCH_IBX(dev_priv->dev)) {
7753
			pipe_config->shared_dpll =
7754
				(enum intel_dpll_id) crtc->pipe;
7755
		} else {
7756
			tmp = I915_READ(PCH_DPLL_SEL);
7757
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7758
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7759
			else
7760
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7761
		}
7762
 
7763
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7764
 
7765
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7766
					   &pipe_config->dpll_hw_state));
7767
 
7768
		tmp = pipe_config->dpll_hw_state.dpll;
7769
		pipe_config->pixel_multiplier =
7770
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7771
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 7772
 
7773
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 7774
	} else {
7775
		pipe_config->pixel_multiplier = 1;
7776
	}
7777
 
7778
	intel_get_pipe_timings(crtc, pipe_config);
7779
 
7780
	ironlake_get_pfit_config(crtc, pipe_config);
7781
 
3746 Serge 7782
	return true;
7783
}
7784
 
4104 Serge 7785
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7786
{
7787
	struct drm_device *dev = dev_priv->dev;
7788
	struct intel_crtc *crtc;
7789
 
5060 serge 7790
	for_each_intel_crtc(dev, crtc)
4539 Serge 7791
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 7792
		     pipe_name(crtc->pipe));
7793
 
7794
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
5060 serge 7795
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7796
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7797
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
4104 Serge 7798
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7799
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7800
	     "CPU PWM1 enabled\n");
5060 serge 7801
	if (IS_HASWELL(dev))
4104 Serge 7802
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7803
	     "CPU PWM2 enabled\n");
7804
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7805
	     "PCH PWM1 enabled\n");
7806
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7807
	     "Utility pin enabled\n");
7808
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7809
 
5060 serge 7810
	/*
7811
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7812
	 * interrupts remain enabled. We used to check for that, but since it's
7813
	 * gen-specific and since we only disable LCPLL after we fully disable
7814
	 * the interrupts, the check below should be enough.
7815
	 */
7816
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4104 Serge 7817
}
7818
 
5060 serge 7819
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7820
{
7821
	struct drm_device *dev = dev_priv->dev;
7822
 
7823
	if (IS_HASWELL(dev))
7824
		return I915_READ(D_COMP_HSW);
7825
	else
7826
		return I915_READ(D_COMP_BDW);
7827
}
7828
 
7829
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7830
{
7831
	struct drm_device *dev = dev_priv->dev;
7832
 
7833
	if (IS_HASWELL(dev)) {
7834
		mutex_lock(&dev_priv->rps.hw_lock);
7835
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7836
					    val))
7837
			DRM_ERROR("Failed to write to D_COMP\n");
7838
		mutex_unlock(&dev_priv->rps.hw_lock);
7839
	} else {
7840
		I915_WRITE(D_COMP_BDW, val);
7841
		POSTING_READ(D_COMP_BDW);
7842
	}
7843
}
7844
 
4104 Serge 7845
/*
7846
 * This function implements pieces of two sequences from BSpec:
7847
 * - Sequence for display software to disable LCPLL
7848
 * - Sequence for display software to allow package C8+
7849
 * The steps implemented here are just the steps that actually touch the LCPLL
7850
 * register. Callers should take care of disabling all the display engine
7851
 * functions, doing the mode unset, fixing interrupts, etc.
7852
 */
4560 Serge 7853
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4104 Serge 7854
		       bool switch_to_fclk, bool allow_power_down)
7855
{
7856
	uint32_t val;
7857
 
7858
	assert_can_disable_lcpll(dev_priv);
7859
 
7860
	val = I915_READ(LCPLL_CTL);
7861
 
7862
	if (switch_to_fclk) {
7863
		val |= LCPLL_CD_SOURCE_FCLK;
7864
		I915_WRITE(LCPLL_CTL, val);
7865
 
7866
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7867
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7868
			DRM_ERROR("Switching to FCLK failed\n");
7869
 
7870
		val = I915_READ(LCPLL_CTL);
7871
	}
7872
 
7873
	val |= LCPLL_PLL_DISABLE;
7874
	I915_WRITE(LCPLL_CTL, val);
7875
	POSTING_READ(LCPLL_CTL);
7876
 
7877
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7878
		DRM_ERROR("LCPLL still locked\n");
7879
 
5060 serge 7880
	val = hsw_read_dcomp(dev_priv);
4104 Serge 7881
	val |= D_COMP_COMP_DISABLE;
5060 serge 7882
	hsw_write_dcomp(dev_priv, val);
7883
	ndelay(100);
4104 Serge 7884
 
5060 serge 7885
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7886
		     1))
4104 Serge 7887
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7888
 
7889
	if (allow_power_down) {
7890
		val = I915_READ(LCPLL_CTL);
7891
		val |= LCPLL_POWER_DOWN_ALLOW;
7892
		I915_WRITE(LCPLL_CTL, val);
7893
		POSTING_READ(LCPLL_CTL);
7894
	}
7895
}
7896
 
7897
/*
7898
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7899
 * source.
7900
 */
4560 Serge 7901
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 7902
{
7903
	uint32_t val;
7904
 
7905
	val = I915_READ(LCPLL_CTL);
7906
 
7907
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7908
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7909
		return;
7910
 
5060 serge 7911
	/*
7912
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7913
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7914
	 *
7915
	 * The other problem is that hsw_restore_lcpll() is called as part of
7916
	 * the runtime PM resume sequence, so we can't just call
7917
	 * gen6_gt_force_wake_get() because that function calls
7918
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7919
	 * while we are on the resume sequence. So to solve this problem we have
7920
	 * to call special forcewake code that doesn't touch runtime PM and
7921
	 * doesn't enable the forcewake delayed work.
7922
	 */
5354 serge 7923
	spin_lock_irq(&dev_priv->uncore.lock);
5060 serge 7924
	if (dev_priv->uncore.forcewake_count++ == 0)
7925
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
5354 serge 7926
	spin_unlock_irq(&dev_priv->uncore.lock);
4104 Serge 7927
 
7928
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7929
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7930
		I915_WRITE(LCPLL_CTL, val);
7931
		POSTING_READ(LCPLL_CTL);
7932
	}
7933
 
5060 serge 7934
	val = hsw_read_dcomp(dev_priv);
4104 Serge 7935
	val |= D_COMP_COMP_FORCE;
7936
	val &= ~D_COMP_COMP_DISABLE;
5060 serge 7937
	hsw_write_dcomp(dev_priv, val);
4104 Serge 7938
 
7939
	val = I915_READ(LCPLL_CTL);
7940
	val &= ~LCPLL_PLL_DISABLE;
7941
	I915_WRITE(LCPLL_CTL, val);
7942
 
7943
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7944
		DRM_ERROR("LCPLL not locked yet\n");
7945
 
7946
	if (val & LCPLL_CD_SOURCE_FCLK) {
7947
		val = I915_READ(LCPLL_CTL);
7948
		val &= ~LCPLL_CD_SOURCE_FCLK;
7949
		I915_WRITE(LCPLL_CTL, val);
7950
 
7951
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7952
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7953
			DRM_ERROR("Switching back to LCPLL failed\n");
7954
	}
7955
 
5060 serge 7956
	/* See the big comment above. */
5354 serge 7957
	spin_lock_irq(&dev_priv->uncore.lock);
5060 serge 7958
	if (--dev_priv->uncore.forcewake_count == 0)
7959
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
5354 serge 7960
	spin_unlock_irq(&dev_priv->uncore.lock);
4104 Serge 7961
}
7962
 
5060 serge 7963
/*
7964
 * Package states C8 and deeper are really deep PC states that can only be
7965
 * reached when all the devices on the system allow it, so even if the graphics
7966
 * device allows PC8+, it doesn't mean the system will actually get to these
7967
 * states. Our driver only allows PC8+ when going into runtime PM.
7968
 *
7969
 * The requirements for PC8+ are that all the outputs are disabled, the power
7970
 * well is disabled and most interrupts are disabled, and these are also
7971
 * requirements for runtime PM. When these conditions are met, we manually do
7972
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7973
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7974
 * hang the machine.
7975
 *
7976
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7977
 * the state of some registers, so when we come back from PC8+ we need to
7978
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7979
 * need to take care of the registers kept by RC6. Notice that this happens even
7980
 * if we don't put the device in PCI D3 state (which is what currently happens
7981
 * because of the runtime PM support).
7982
 *
7983
 * For more, read "Display Sequences for Package C8" on the hardware
7984
 * documentation.
7985
 */
7986
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 7987
{
7988
	struct drm_device *dev = dev_priv->dev;
7989
	uint32_t val;
7990
 
7991
	DRM_DEBUG_KMS("Enabling package C8+\n");
7992
 
7993
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7994
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7995
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7996
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7997
	}
7998
 
7999
	lpt_disable_clkout_dp(dev);
8000
	hsw_disable_lcpll(dev_priv, true, true);
8001
}
8002
 
5060 serge 8003
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 8004
{
8005
	struct drm_device *dev = dev_priv->dev;
8006
	uint32_t val;
8007
 
8008
	DRM_DEBUG_KMS("Disabling package C8+\n");
8009
 
8010
	hsw_restore_lcpll(dev_priv);
8011
	lpt_init_pch_refclk(dev);
8012
 
8013
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
8014
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
8015
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
8016
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8017
	}
8018
 
8019
	intel_prepare_ddi(dev);
8020
}
8021
 
5354 serge 8022
static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
4104 Serge 8023
{
5354 serge 8024
	if (!intel_ddi_pll_select(crtc))
8025
		return -EINVAL;
8026
 
8027
	crtc->lowfreq_avail = false;
8028
 
8029
	return 0;
4104 Serge 8030
}
8031
 
5354 serge 8032
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
8033
				enum port port,
8034
				struct intel_crtc_config *pipe_config)
4104 Serge 8035
{
5354 serge 8036
	u32 temp;
8037
 
8038
	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8039
	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
8040
 
8041
	switch (pipe_config->ddi_pll_sel) {
8042
	case SKL_DPLL1:
8043
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
8044
		break;
8045
	case SKL_DPLL2:
8046
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
8047
		break;
8048
	case SKL_DPLL3:
8049
		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
8050
		break;
8051
	}
4104 Serge 8052
}
8053
 
5354 serge 8054
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
8055
				enum port port,
8056
				struct intel_crtc_config *pipe_config)
4104 Serge 8057
{
5354 serge 8058
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
4104 Serge 8059
 
5354 serge 8060
	switch (pipe_config->ddi_pll_sel) {
8061
	case PORT_CLK_SEL_WRPLL1:
8062
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
8063
		break;
8064
	case PORT_CLK_SEL_WRPLL2:
8065
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
8066
		break;
8067
	}
4104 Serge 8068
}
8069
 
5060 serge 8070
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
8071
				       struct intel_crtc_config *pipe_config)
4104 Serge 8072
{
5060 serge 8073
	struct drm_device *dev = crtc->base.dev;
4104 Serge 8074
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 8075
	struct intel_shared_dpll *pll;
8076
	enum port port;
8077
	uint32_t tmp;
4104 Serge 8078
 
5060 serge 8079
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4560 Serge 8080
 
5060 serge 8081
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
4104 Serge 8082
 
5354 serge 8083
	if (IS_SKYLAKE(dev))
8084
		skylake_get_ddi_pll(dev_priv, port, pipe_config);
8085
	else
8086
		haswell_get_ddi_pll(dev_priv, port, pipe_config);
4104 Serge 8087
 
5060 serge 8088
	if (pipe_config->shared_dpll >= 0) {
8089
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
4560 Serge 8090
 
5060 serge 8091
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
8092
					   &pipe_config->dpll_hw_state));
4104 Serge 8093
	}
8094
 
4560 Serge 8095
	/*
5060 serge 8096
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
8097
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
8098
	 * the PCH transcoder is on.
4560 Serge 8099
	 */
5354 serge 8100
	if (INTEL_INFO(dev)->gen < 9 &&
8101
	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
5060 serge 8102
		pipe_config->has_pch_encoder = true;
4560 Serge 8103
 
5060 serge 8104
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
8105
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8106
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3480 Serge 8107
 
5060 serge 8108
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
3480 Serge 8109
	}
4560 Serge 8110
}
8111
 
3746 Serge 8112
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
8113
				    struct intel_crtc_config *pipe_config)
8114
{
8115
	struct drm_device *dev = crtc->base.dev;
8116
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 8117
	enum intel_display_power_domain pfit_domain;
3746 Serge 8118
	uint32_t tmp;
8119
 
5354 serge 8120
	if (!intel_display_power_is_enabled(dev_priv,
5060 serge 8121
					 POWER_DOMAIN_PIPE(crtc->pipe)))
8122
		return false;
8123
 
4104 Serge 8124
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8125
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8126
 
8127
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8128
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
8129
		enum pipe trans_edp_pipe;
8130
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8131
		default:
8132
			WARN(1, "unknown pipe linked to edp transcoder\n");
8133
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
8134
		case TRANS_DDI_EDP_INPUT_A_ON:
8135
			trans_edp_pipe = PIPE_A;
8136
			break;
8137
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
8138
			trans_edp_pipe = PIPE_B;
8139
			break;
8140
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
8141
			trans_edp_pipe = PIPE_C;
8142
			break;
8143
		}
8144
 
8145
		if (trans_edp_pipe == crtc->pipe)
8146
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
8147
	}
8148
 
5354 serge 8149
	if (!intel_display_power_is_enabled(dev_priv,
4104 Serge 8150
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
8151
		return false;
8152
 
8153
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 8154
	if (!(tmp & PIPECONF_ENABLE))
8155
		return false;
8156
 
5060 serge 8157
	haswell_get_ddi_port_state(crtc, pipe_config);
3746 Serge 8158
 
4104 Serge 8159
	intel_get_pipe_timings(crtc, pipe_config);
8160
 
8161
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
5354 serge 8162
	if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
8163
		if (IS_SKYLAKE(dev))
8164
			skylake_get_pfit_config(crtc, pipe_config);
8165
		else
4104 Serge 8166
		ironlake_get_pfit_config(crtc, pipe_config);
5354 serge 8167
	}
4104 Serge 8168
 
4560 Serge 8169
	if (IS_HASWELL(dev))
4104 Serge 8170
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
8171
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
8172
 
5354 serge 8173
	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
8174
		pipe_config->pixel_multiplier =
8175
			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
8176
	} else {
4104 Serge 8177
	pipe_config->pixel_multiplier = 1;
4560 Serge 8178
	}
8179
 
2342 Serge 8180
	return true;
8181
}
8182
 
5354 serge 8183
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
2342 Serge 8184
{
5354 serge 8185
	struct drm_device *dev = crtc->dev;
8186
	struct drm_i915_private *dev_priv = dev->dev_private;
8187
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8188
	uint32_t cntl = 0, size = 0;
2342 Serge 8189
 
5354 serge 8190
	if (base) {
8191
		unsigned int width = intel_crtc->cursor_width;
8192
		unsigned int height = intel_crtc->cursor_height;
8193
		unsigned int stride = roundup_pow_of_two(width) * 4;
2342 Serge 8194
 
5354 serge 8195
		switch (stride) {
8196
		default:
8197
			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
8198
				  width, stride);
8199
			stride = 256;
8200
			/* fallthrough */
8201
		case 256:
8202
		case 512:
8203
		case 1024:
8204
		case 2048:
8205
			break;
4560 Serge 8206
	}
3031 serge 8207
 
5354 serge 8208
		cntl |= CURSOR_ENABLE |
8209
			CURSOR_GAMMA_ENABLE |
8210
			CURSOR_FORMAT_ARGB |
8211
			CURSOR_STRIDE(stride);
3031 serge 8212
 
5354 serge 8213
		size = (height << 12) | width;
2342 Serge 8214
	}
8215
 
5354 serge 8216
	if (intel_crtc->cursor_cntl != 0 &&
8217
	    (intel_crtc->cursor_base != base ||
8218
	     intel_crtc->cursor_size != size ||
8219
	     intel_crtc->cursor_cntl != cntl)) {
8220
		/* On these chipsets we can only modify the base/size/stride
8221
		 * whilst the cursor is disabled.
3031 serge 8222
		 */
5060 serge 8223
			I915_WRITE(_CURACNTR, 0);
8224
			POSTING_READ(_CURACNTR);
8225
			intel_crtc->cursor_cntl = 0;
8226
		}
8227
 
5354 serge 8228
	if (intel_crtc->cursor_base != base) {
3031 serge 8229
		I915_WRITE(_CURABASE, base);
5354 serge 8230
		intel_crtc->cursor_base = base;
5060 serge 8231
	}
2327 Serge 8232
 
5354 serge 8233
	if (intel_crtc->cursor_size != size) {
8234
		I915_WRITE(CURSIZE, size);
8235
		intel_crtc->cursor_size = size;
8236
	}
8237
 
5060 serge 8238
	if (intel_crtc->cursor_cntl != cntl) {
3031 serge 8239
	I915_WRITE(_CURACNTR, cntl);
5060 serge 8240
		POSTING_READ(_CURACNTR);
8241
		intel_crtc->cursor_cntl = cntl;
8242
	}
3031 serge 8243
}
2327 Serge 8244
 
3031 serge 8245
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8246
{
8247
	struct drm_device *dev = crtc->dev;
8248
	struct drm_i915_private *dev_priv = dev->dev_private;
8249
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8250
	int pipe = intel_crtc->pipe;
5060 serge 8251
	uint32_t cntl;
2327 Serge 8252
 
5060 serge 8253
	cntl = 0;
3031 serge 8254
		if (base) {
5060 serge 8255
		cntl = MCURSOR_GAMMA_ENABLE;
8256
		switch (intel_crtc->cursor_width) {
8257
			case 64:
8258
				cntl |= CURSOR_MODE_64_ARGB_AX;
8259
				break;
8260
			case 128:
8261
				cntl |= CURSOR_MODE_128_ARGB_AX;
8262
				break;
8263
			case 256:
8264
				cntl |= CURSOR_MODE_256_ARGB_AX;
8265
				break;
8266
			default:
8267
				WARN_ON(1);
8268
				return;
8269
			}
3031 serge 8270
			cntl |= pipe << 28; /* Connect to correct pipe */
2327 Serge 8271
 
5060 serge 8272
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3480 Serge 8273
			cntl |= CURSOR_PIPE_CSC_ENABLE;
5354 serge 8274
	}
5060 serge 8275
 
5354 serge 8276
	if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
8277
		cntl |= CURSOR_ROTATE_180;
8278
 
5060 serge 8279
	if (intel_crtc->cursor_cntl != cntl) {
8280
		I915_WRITE(CURCNTR(pipe), cntl);
8281
		POSTING_READ(CURCNTR(pipe));
8282
		intel_crtc->cursor_cntl = cntl;
4104 Serge 8283
		}
2327 Serge 8284
 
3031 serge 8285
	/* and commit changes on next vblank */
5060 serge 8286
	I915_WRITE(CURBASE(pipe), base);
8287
	POSTING_READ(CURBASE(pipe));
5354 serge 8288
 
8289
	intel_crtc->cursor_base = base;
3031 serge 8290
}
2327 Serge 8291
 
3031 serge 8292
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5060 serge 8293
void intel_crtc_update_cursor(struct drm_crtc *crtc,
3031 serge 8294
				     bool on)
8295
{
8296
	struct drm_device *dev = crtc->dev;
8297
	struct drm_i915_private *dev_priv = dev->dev_private;
8298
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8299
	int pipe = intel_crtc->pipe;
5060 serge 8300
	int x = crtc->cursor_x;
8301
	int y = crtc->cursor_y;
4560 Serge 8302
	u32 base = 0, pos = 0;
2327 Serge 8303
 
4560 Serge 8304
	if (on)
8305
		base = intel_crtc->cursor_addr;
2327 Serge 8306
 
4560 Serge 8307
	if (x >= intel_crtc->config.pipe_src_w)
3031 serge 8308
			base = 0;
2327 Serge 8309
 
4560 Serge 8310
	if (y >= intel_crtc->config.pipe_src_h)
3031 serge 8311
		base = 0;
2327 Serge 8312
 
3031 serge 8313
	if (x < 0) {
4560 Serge 8314
		if (x + intel_crtc->cursor_width <= 0)
3031 serge 8315
			base = 0;
2327 Serge 8316
 
3031 serge 8317
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8318
		x = -x;
8319
	}
8320
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 8321
 
3031 serge 8322
	if (y < 0) {
4560 Serge 8323
		if (y + intel_crtc->cursor_height <= 0)
3031 serge 8324
			base = 0;
2327 Serge 8325
 
3031 serge 8326
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8327
		y = -y;
8328
	}
8329
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 8330
 
5060 serge 8331
	if (base == 0 && intel_crtc->cursor_base == 0)
3031 serge 8332
		return;
2327 Serge 8333
 
5060 serge 8334
	I915_WRITE(CURPOS(pipe), pos);
8335
 
5354 serge 8336
	/* ILK+ do this automagically */
8337
	if (HAS_GMCH_DISPLAY(dev) &&
8338
		to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
8339
		base += (intel_crtc->cursor_height *
8340
			intel_crtc->cursor_width - 1) * 4;
8341
	}
8342
 
8343
	if (IS_845G(dev) || IS_I865G(dev))
5060 serge 8344
		i845_update_cursor(crtc, base);
8345
	else
4560 Serge 8346
		i9xx_update_cursor(crtc, base);
3031 serge 8347
}
2327 Serge 8348
 
5354 serge 8349
static bool cursor_size_ok(struct drm_device *dev,
8350
			   uint32_t width, uint32_t height)
8351
{
8352
	if (width == 0 || height == 0)
8353
		return false;
8354
 
8355
	/*
8356
	 * 845g/865g are special in that they are only limited by
8357
	 * the width of their cursors, the height is arbitrary up to
8358
	 * the precision of the register. Everything else requires
8359
	 * square cursors, limited to a few power-of-two sizes.
5060 serge 8360
 */
5354 serge 8361
	if (IS_845G(dev) || IS_I865G(dev)) {
8362
		if ((width & 63) != 0)
8363
			return false;
8364
 
8365
		if (width > (IS_845G(dev) ? 64 : 512))
8366
			return false;
8367
 
8368
		if (height > 1023)
8369
			return false;
8370
	} else {
8371
		switch (width | height) {
8372
		case 256:
8373
		case 128:
8374
			if (IS_GEN2(dev))
8375
				return false;
8376
		case 64:
8377
			break;
8378
		default:
8379
			return false;
8380
		}
8381
	}
8382
 
8383
	return true;
8384
}
8385
 
5060 serge 8386
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8387
				     struct drm_i915_gem_object *obj,
3031 serge 8388
				 uint32_t width, uint32_t height)
8389
{
8390
	struct drm_device *dev = crtc->dev;
5354 serge 8391
	struct drm_i915_private *dev_priv = to_i915(dev);
3031 serge 8392
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 8393
	enum pipe pipe = intel_crtc->pipe;
8394
	unsigned old_width;
3031 serge 8395
	uint32_t addr;
8396
	int ret;
2327 Serge 8397
 
3031 serge 8398
	/* if we want to turn off the cursor ignore width and height */
5060 serge 8399
	if (!obj) {
3031 serge 8400
		DRM_DEBUG_KMS("cursor off\n");
8401
		addr = 0;
8402
		mutex_lock(&dev->struct_mutex);
8403
		goto finish;
8404
	}
2327 Serge 8405
 
3031 serge 8406
	/* we only need to pin inside GTT if cursor is non-phy */
8407
	mutex_lock(&dev->struct_mutex);
5060 serge 8408
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
3746 Serge 8409
		unsigned alignment;
8410
 
5097 serge 8411
		/*
8412
		 * Global gtt pte registers are special registers which actually
8413
		 * forward writes to a chunk of system memory. Which means that
8414
		 * there is no risk that the register values disappear as soon
8415
		 * as we call intel_runtime_pm_put(), so it is correct to wrap
8416
		 * only the pin/unpin/fence and not more.
8417
		 */
8418
		intel_runtime_pm_get(dev_priv);
8419
 
3746 Serge 8420
		/* Note that the w/a also requires 2 PTE of padding following
8421
		 * the bo. We currently fill all unused PTE with the shadow
8422
		 * page and so we should always have valid PTE following the
8423
		 * cursor preventing the VT-d warning.
8424
		 */
8425
		alignment = 0;
8426
		if (need_vtd_wa(dev))
8427
			alignment = 64*1024;
8428
 
8429
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
3031 serge 8430
		if (ret) {
5060 serge 8431
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
5097 serge 8432
			intel_runtime_pm_put(dev_priv);
3031 serge 8433
			goto fail_locked;
8434
		}
2327 Serge 8435
 
3031 serge 8436
		ret = i915_gem_object_put_fence(obj);
8437
		if (ret) {
5060 serge 8438
			DRM_DEBUG_KMS("failed to release fence for cursor");
5097 serge 8439
			intel_runtime_pm_put(dev_priv);
3031 serge 8440
			goto fail_unpin;
8441
		}
2327 Serge 8442
 
4104 Serge 8443
		addr = i915_gem_obj_ggtt_offset(obj);
5097 serge 8444
 
8445
		intel_runtime_pm_put(dev_priv);
3031 serge 8446
	} else {
5354 serge 8447
       int align = IS_I830(dev) ? 16 * 1024 : 256;
8448
       ret = 1;//i915_gem_object_attach_phys(obj, align);
8449
       if (ret) {
8450
           DRM_DEBUG_KMS("failed to attach phys object\n");
8451
           goto fail_locked;
8452
       }
8453
       addr = obj->phys_handle->busaddr;
3031 serge 8454
	}
2327 Serge 8455
 
3031 serge 8456
 finish:
8457
	if (intel_crtc->cursor_bo) {
5060 serge 8458
		if (!INTEL_INFO(dev)->cursor_needs_physical)
4104 Serge 8459
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
3031 serge 8460
	}
2327 Serge 8461
 
5060 serge 8462
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8463
			  INTEL_FRONTBUFFER_CURSOR(pipe));
3031 serge 8464
	mutex_unlock(&dev->struct_mutex);
2327 Serge 8465
 
5060 serge 8466
	old_width = intel_crtc->cursor_width;
8467
 
3031 serge 8468
	intel_crtc->cursor_addr = addr;
8469
	intel_crtc->cursor_bo = obj;
8470
	intel_crtc->cursor_width = width;
8471
	intel_crtc->cursor_height = height;
2327 Serge 8472
 
5060 serge 8473
	if (intel_crtc->active) {
8474
		if (old_width != width)
8475
			intel_update_watermarks(crtc);
4104 Serge 8476
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
5354 serge 8477
 
8478
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
5060 serge 8479
	}
2327 Serge 8480
 
3031 serge 8481
	return 0;
8482
fail_unpin:
4104 Serge 8483
	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 8484
fail_locked:
8485
	mutex_unlock(&dev->struct_mutex);
8486
	return ret;
8487
}
2327 Serge 8488
 
2330 Serge 8489
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8490
				 u16 *blue, uint32_t start, uint32_t size)
8491
{
8492
	int end = (start + size > 256) ? 256 : start + size, i;
8493
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8494
 
2330 Serge 8495
	for (i = start; i < end; i++) {
8496
		intel_crtc->lut_r[i] = red[i] >> 8;
8497
		intel_crtc->lut_g[i] = green[i] >> 8;
8498
		intel_crtc->lut_b[i] = blue[i] >> 8;
8499
	}
2327 Serge 8500
 
2330 Serge 8501
	intel_crtc_load_lut(crtc);
8502
}
2327 Serge 8503
 
2330 Serge 8504
/* VESA 640x480x72Hz mode to set on the pipe */
8505
static struct drm_display_mode load_detect_mode = {
8506
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8507
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8508
};
2327 Serge 8509
 
4560 Serge 8510
struct drm_framebuffer *
5060 serge 8511
__intel_framebuffer_create(struct drm_device *dev,
3031 serge 8512
			 struct drm_mode_fb_cmd2 *mode_cmd,
8513
			 struct drm_i915_gem_object *obj)
8514
{
8515
	struct intel_framebuffer *intel_fb;
8516
	int ret;
2327 Serge 8517
 
3031 serge 8518
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8519
	if (!intel_fb) {
5354 serge 8520
		drm_gem_object_unreference(&obj->base);
3031 serge 8521
		return ERR_PTR(-ENOMEM);
8522
	}
2327 Serge 8523
 
3031 serge 8524
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 8525
	if (ret)
8526
		goto err;
8527
 
8528
	return &intel_fb->base;
8529
err:
5354 serge 8530
	drm_gem_object_unreference(&obj->base);
3031 serge 8531
		kfree(intel_fb);
4560 Serge 8532
 
3031 serge 8533
		return ERR_PTR(ret);
8534
}
2327 Serge 8535
 
5060 serge 8536
static struct drm_framebuffer *
8537
intel_framebuffer_create(struct drm_device *dev,
8538
			 struct drm_mode_fb_cmd2 *mode_cmd,
8539
			 struct drm_i915_gem_object *obj)
8540
{
8541
	struct drm_framebuffer *fb;
8542
	int ret;
8543
 
8544
	ret = i915_mutex_lock_interruptible(dev);
8545
	if (ret)
8546
		return ERR_PTR(ret);
8547
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8548
	mutex_unlock(&dev->struct_mutex);
8549
 
8550
	return fb;
8551
}
8552
 
2330 Serge 8553
static u32
8554
intel_framebuffer_pitch_for_width(int width, int bpp)
8555
{
8556
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8557
	return ALIGN(pitch, 64);
8558
}
2327 Serge 8559
 
2330 Serge 8560
static u32
8561
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8562
{
8563
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5060 serge 8564
	return PAGE_ALIGN(pitch * mode->vdisplay);
2330 Serge 8565
}
2327 Serge 8566
 
2330 Serge 8567
static struct drm_framebuffer *
8568
intel_framebuffer_create_for_mode(struct drm_device *dev,
8569
				  struct drm_display_mode *mode,
8570
				  int depth, int bpp)
8571
{
8572
	struct drm_i915_gem_object *obj;
3243 Serge 8573
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 8574
 
5060 serge 8575
	obj = i915_gem_alloc_object(dev,
8576
				    intel_framebuffer_size_for_mode(mode, bpp));
8577
	if (obj == NULL)
8578
		return ERR_PTR(-ENOMEM);
8579
 
8580
	mode_cmd.width = mode->hdisplay;
8581
	mode_cmd.height = mode->vdisplay;
8582
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8583
								bpp);
8584
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8585
 
8586
	return intel_framebuffer_create(dev, &mode_cmd, obj);
2330 Serge 8587
}
2327 Serge 8588
 
2330 Serge 8589
static struct drm_framebuffer *
8590
mode_fits_in_fbdev(struct drm_device *dev,
8591
		   struct drm_display_mode *mode)
8592
{
4560 Serge 8593
#ifdef CONFIG_DRM_I915_FBDEV
2330 Serge 8594
	struct drm_i915_private *dev_priv = dev->dev_private;
8595
	struct drm_i915_gem_object *obj;
8596
	struct drm_framebuffer *fb;
2327 Serge 8597
 
5060 serge 8598
	if (!dev_priv->fbdev)
4280 Serge 8599
		return NULL;
2327 Serge 8600
 
5060 serge 8601
	if (!dev_priv->fbdev->fb)
2330 Serge 8602
		return NULL;
2327 Serge 8603
 
5060 serge 8604
	obj = dev_priv->fbdev->fb->obj;
8605
	BUG_ON(!obj);
8606
 
8607
	fb = &dev_priv->fbdev->fb->base;
3031 serge 8608
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8609
							       fb->bits_per_pixel))
4280 Serge 8610
		return NULL;
2327 Serge 8611
 
3031 serge 8612
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8613
		return NULL;
8614
 
4280 Serge 8615
	return fb;
4560 Serge 8616
#else
8617
	return NULL;
8618
#endif
2330 Serge 8619
}
2327 Serge 8620
 
3031 serge 8621
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 8622
				struct drm_display_mode *mode,
5060 serge 8623
				struct intel_load_detect_pipe *old,
8624
				struct drm_modeset_acquire_ctx *ctx)
2330 Serge 8625
{
8626
	struct intel_crtc *intel_crtc;
3031 serge 8627
	struct intel_encoder *intel_encoder =
8628
		intel_attached_encoder(connector);
2330 Serge 8629
	struct drm_crtc *possible_crtc;
8630
	struct drm_encoder *encoder = &intel_encoder->base;
8631
	struct drm_crtc *crtc = NULL;
8632
	struct drm_device *dev = encoder->dev;
3031 serge 8633
	struct drm_framebuffer *fb;
5060 serge 8634
	struct drm_mode_config *config = &dev->mode_config;
8635
	int ret, i = -1;
2327 Serge 8636
 
2330 Serge 8637
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 8638
		      connector->base.id, connector->name,
8639
		      encoder->base.id, encoder->name);
2327 Serge 8640
 
5060 serge 8641
retry:
8642
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8643
	if (ret)
8644
		goto fail_unlock;
8645
 
2330 Serge 8646
	/*
8647
	 * Algorithm gets a little messy:
8648
	 *
8649
	 *   - if the connector already has an assigned crtc, use it (but make
8650
	 *     sure it's on first)
8651
	 *
8652
	 *   - try to find the first unused crtc that can drive this connector,
8653
	 *     and use that if we find one
8654
	 */
2327 Serge 8655
 
2330 Serge 8656
	/* See if we already have a CRTC for this connector */
8657
	if (encoder->crtc) {
8658
		crtc = encoder->crtc;
2327 Serge 8659
 
5060 serge 8660
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8661
		if (ret)
8662
			goto fail_unlock;
5354 serge 8663
		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8664
		if (ret)
8665
			goto fail_unlock;
3480 Serge 8666
 
3031 serge 8667
		old->dpms_mode = connector->dpms;
2330 Serge 8668
		old->load_detect_temp = false;
2327 Serge 8669
 
2330 Serge 8670
		/* Make sure the crtc and connector are running */
3031 serge 8671
		if (connector->dpms != DRM_MODE_DPMS_ON)
8672
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 8673
 
2330 Serge 8674
		return true;
8675
	}
2327 Serge 8676
 
2330 Serge 8677
	/* Find an unused one (if possible) */
5060 serge 8678
	for_each_crtc(dev, possible_crtc) {
2330 Serge 8679
		i++;
8680
		if (!(encoder->possible_crtcs & (1 << i)))
8681
			continue;
5060 serge 8682
		if (possible_crtc->enabled)
8683
			continue;
8684
		/* This can occur when applying the pipe A quirk on resume. */
8685
		if (to_intel_crtc(possible_crtc)->new_enabled)
8686
			continue;
8687
 
2330 Serge 8688
			crtc = possible_crtc;
8689
			break;
8690
		}
2327 Serge 8691
 
2330 Serge 8692
	/*
8693
	 * If we didn't find an unused CRTC, don't use any.
8694
	 */
8695
	if (!crtc) {
8696
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5060 serge 8697
		goto fail_unlock;
2330 Serge 8698
	}
2327 Serge 8699
 
5060 serge 8700
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8701
	if (ret)
8702
		goto fail_unlock;
5354 serge 8703
	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8704
	if (ret)
8705
		goto fail_unlock;
3031 serge 8706
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8707
	to_intel_connector(connector)->new_encoder = intel_encoder;
2327 Serge 8708
 
2330 Serge 8709
	intel_crtc = to_intel_crtc(crtc);
5060 serge 8710
	intel_crtc->new_enabled = true;
8711
	intel_crtc->new_config = &intel_crtc->config;
3031 serge 8712
	old->dpms_mode = connector->dpms;
2330 Serge 8713
	old->load_detect_temp = true;
8714
	old->release_fb = NULL;
2327 Serge 8715
 
2330 Serge 8716
	if (!mode)
8717
		mode = &load_detect_mode;
2327 Serge 8718
 
2330 Serge 8719
	/* We need a framebuffer large enough to accommodate all accesses
8720
	 * that the plane may generate whilst we perform load detection.
8721
	 * We can not rely on the fbcon either being present (we get called
8722
	 * during its initialisation to detect all boot displays, or it may
8723
	 * not even exist) or that it is large enough to satisfy the
8724
	 * requested mode.
8725
	 */
3031 serge 8726
	fb = mode_fits_in_fbdev(dev, mode);
8727
	if (fb == NULL) {
2330 Serge 8728
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 8729
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8730
		old->release_fb = fb;
2330 Serge 8731
	} else
8732
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 8733
	if (IS_ERR(fb)) {
2330 Serge 8734
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5060 serge 8735
		goto fail;
2330 Serge 8736
	}
2327 Serge 8737
 
3480 Serge 8738
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
2330 Serge 8739
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8740
		if (old->release_fb)
8741
			old->release_fb->funcs->destroy(old->release_fb);
5060 serge 8742
		goto fail;
2330 Serge 8743
	}
2327 Serge 8744
 
2330 Serge 8745
	/* let the connector get through one full cycle before testing */
8746
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8747
	return true;
5060 serge 8748
 
8749
 fail:
8750
	intel_crtc->new_enabled = crtc->enabled;
8751
	if (intel_crtc->new_enabled)
8752
		intel_crtc->new_config = &intel_crtc->config;
8753
	else
8754
		intel_crtc->new_config = NULL;
8755
fail_unlock:
8756
	if (ret == -EDEADLK) {
8757
		drm_modeset_backoff(ctx);
8758
		goto retry;
8759
	}
8760
 
8761
	return false;
2330 Serge 8762
}
2327 Serge 8763
 
3031 serge 8764
void intel_release_load_detect_pipe(struct drm_connector *connector,
2330 Serge 8765
				    struct intel_load_detect_pipe *old)
8766
{
3031 serge 8767
	struct intel_encoder *intel_encoder =
8768
		intel_attached_encoder(connector);
2330 Serge 8769
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 8770
	struct drm_crtc *crtc = encoder->crtc;
5060 serge 8771
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8772
 
2330 Serge 8773
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 8774
		      connector->base.id, connector->name,
8775
		      encoder->base.id, encoder->name);
2327 Serge 8776
 
2330 Serge 8777
	if (old->load_detect_temp) {
3031 serge 8778
		to_intel_connector(connector)->new_encoder = NULL;
8779
		intel_encoder->new_crtc = NULL;
5060 serge 8780
		intel_crtc->new_enabled = false;
8781
		intel_crtc->new_config = NULL;
3031 serge 8782
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8783
 
3480 Serge 8784
		if (old->release_fb) {
8785
			drm_framebuffer_unregister_private(old->release_fb);
8786
			drm_framebuffer_unreference(old->release_fb);
8787
		}
2327 Serge 8788
 
2330 Serge 8789
		return;
8790
	}
2327 Serge 8791
 
2330 Serge 8792
	/* Switch crtc and encoder back off if necessary */
3031 serge 8793
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8794
		connector->funcs->dpms(connector, old->dpms_mode);
2330 Serge 8795
}
2327 Serge 8796
 
4560 Serge 8797
static int i9xx_pll_refclk(struct drm_device *dev,
8798
			   const struct intel_crtc_config *pipe_config)
8799
{
8800
	struct drm_i915_private *dev_priv = dev->dev_private;
8801
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8802
 
8803
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8804
		return dev_priv->vbt.lvds_ssc_freq;
8805
	else if (HAS_PCH_SPLIT(dev))
8806
		return 120000;
8807
	else if (!IS_GEN2(dev))
8808
		return 96000;
8809
	else
8810
		return 48000;
8811
}
8812
 
2330 Serge 8813
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 8814
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8815
				struct intel_crtc_config *pipe_config)
2330 Serge 8816
{
4104 Serge 8817
	struct drm_device *dev = crtc->base.dev;
2330 Serge 8818
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 8819
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 8820
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 8821
	u32 fp;
8822
	intel_clock_t clock;
4560 Serge 8823
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 8824
 
2330 Serge 8825
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 8826
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 8827
	else
4560 Serge 8828
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 8829
 
2330 Serge 8830
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8831
	if (IS_PINEVIEW(dev)) {
8832
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8833
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8834
	} else {
8835
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8836
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8837
	}
2327 Serge 8838
 
2330 Serge 8839
	if (!IS_GEN2(dev)) {
8840
		if (IS_PINEVIEW(dev))
8841
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8842
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8843
		else
8844
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8845
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 8846
 
2330 Serge 8847
		switch (dpll & DPLL_MODE_MASK) {
8848
		case DPLLB_MODE_DAC_SERIAL:
8849
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8850
				5 : 10;
8851
			break;
8852
		case DPLLB_MODE_LVDS:
8853
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8854
				7 : 14;
8855
			break;
8856
		default:
8857
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8858
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 8859
			return;
2330 Serge 8860
		}
2327 Serge 8861
 
4104 Serge 8862
		if (IS_PINEVIEW(dev))
4560 Serge 8863
			pineview_clock(refclk, &clock);
4104 Serge 8864
		else
4560 Serge 8865
			i9xx_clock(refclk, &clock);
2330 Serge 8866
	} else {
4560 Serge 8867
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8868
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 8869
 
2330 Serge 8870
		if (is_lvds) {
8871
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8872
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 8873
 
8874
			if (lvds & LVDS_CLKB_POWER_UP)
8875
				clock.p2 = 7;
8876
			else
2330 Serge 8877
			clock.p2 = 14;
8878
		} else {
8879
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8880
				clock.p1 = 2;
8881
			else {
8882
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8883
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8884
			}
8885
			if (dpll & PLL_P2_DIVIDE_BY_4)
8886
				clock.p2 = 4;
8887
			else
8888
				clock.p2 = 2;
4560 Serge 8889
		}
2327 Serge 8890
 
4560 Serge 8891
		i9xx_clock(refclk, &clock);
2330 Serge 8892
	}
2327 Serge 8893
 
4560 Serge 8894
	/*
8895
	 * This value includes pixel_multiplier. We will use
8896
	 * port_clock to compute adjusted_mode.crtc_clock in the
8897
	 * encoder's get_config() function.
8898
	 */
8899
	pipe_config->port_clock = clock.dot;
4104 Serge 8900
}
8901
 
4560 Serge 8902
int intel_dotclock_calculate(int link_freq,
8903
			     const struct intel_link_m_n *m_n)
4104 Serge 8904
{
8905
	/*
8906
	 * The calculation for the data clock is:
4560 Serge 8907
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 8908
	 * But we want to avoid losing precison if possible, so:
4560 Serge 8909
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 8910
	 *
8911
	 * and the link clock is simpler:
4560 Serge 8912
	 * link_clock = (m * link_clock) / n
2330 Serge 8913
	 */
2327 Serge 8914
 
4560 Serge 8915
	if (!m_n->link_n)
8916
		return 0;
4104 Serge 8917
 
4560 Serge 8918
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8919
}
4104 Serge 8920
 
4560 Serge 8921
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8922
				   struct intel_crtc_config *pipe_config)
8923
{
8924
	struct drm_device *dev = crtc->base.dev;
4104 Serge 8925
 
4560 Serge 8926
	/* read out port_clock from the DPLL */
8927
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 8928
 
4560 Serge 8929
	/*
8930
	 * This value does not include pixel_multiplier.
8931
	 * We will check that port_clock and adjusted_mode.crtc_clock
8932
	 * agree once we know their relationship in the encoder's
8933
	 * get_config() function.
8934
	 */
8935
	pipe_config->adjusted_mode.crtc_clock =
8936
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8937
					 &pipe_config->fdi_m_n);
2330 Serge 8938
}
2327 Serge 8939
 
2330 Serge 8940
/** Returns the currently programmed mode of the given pipe. */
8941
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8942
					     struct drm_crtc *crtc)
8943
{
8944
	struct drm_i915_private *dev_priv = dev->dev_private;
8945
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 8946
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2330 Serge 8947
	struct drm_display_mode *mode;
4104 Serge 8948
	struct intel_crtc_config pipe_config;
3243 Serge 8949
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8950
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8951
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8952
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 8953
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 8954
 
2330 Serge 8955
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8956
	if (!mode)
8957
		return NULL;
8958
 
4104 Serge 8959
	/*
8960
	 * Construct a pipe_config sufficient for getting the clock info
8961
	 * back out of crtc_clock_get.
8962
	 *
8963
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8964
	 * to use a real value here instead.
8965
	 */
4560 Serge 8966
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
4104 Serge 8967
	pipe_config.pixel_multiplier = 1;
4560 Serge 8968
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8969
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8970
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
4104 Serge 8971
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8972
 
4560 Serge 8973
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
2330 Serge 8974
	mode->hdisplay = (htot & 0xffff) + 1;
8975
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8976
	mode->hsync_start = (hsync & 0xffff) + 1;
8977
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8978
	mode->vdisplay = (vtot & 0xffff) + 1;
8979
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8980
	mode->vsync_start = (vsync & 0xffff) + 1;
8981
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8982
 
8983
	drm_mode_set_name(mode);
8984
 
8985
	return mode;
8986
}
8987
 
3031 serge 8988
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8989
{
8990
	struct drm_device *dev = crtc->dev;
5060 serge 8991
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8992
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8993
 
5060 serge 8994
	if (!HAS_GMCH_DISPLAY(dev))
3031 serge 8995
		return;
2327 Serge 8996
 
3031 serge 8997
	if (!dev_priv->lvds_downclock_avail)
8998
		return;
2327 Serge 8999
 
3031 serge 9000
	/*
9001
	 * Since this is called by a timer, we should never get here in
9002
	 * the manual case.
9003
	 */
9004
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
9005
		int pipe = intel_crtc->pipe;
9006
		int dpll_reg = DPLL(pipe);
9007
		int dpll;
2327 Serge 9008
 
3031 serge 9009
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
2327 Serge 9010
 
3031 serge 9011
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 9012
 
3031 serge 9013
		dpll = I915_READ(dpll_reg);
9014
		dpll |= DISPLAY_RATE_SELECT_FPA1;
9015
		I915_WRITE(dpll_reg, dpll);
9016
		intel_wait_for_vblank(dev, pipe);
9017
		dpll = I915_READ(dpll_reg);
9018
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
9019
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
9020
	}
2327 Serge 9021
 
3031 serge 9022
}
2327 Serge 9023
 
3031 serge 9024
void intel_mark_busy(struct drm_device *dev)
9025
{
4104 Serge 9026
	struct drm_i915_private *dev_priv = dev->dev_private;
9027
 
5060 serge 9028
	if (dev_priv->mm.busy)
9029
		return;
9030
 
9031
	intel_runtime_pm_get(dev_priv);
4104 Serge 9032
	i915_update_gfx_val(dev_priv);
5060 serge 9033
	dev_priv->mm.busy = true;
3031 serge 9034
}
2327 Serge 9035
 
3031 serge 9036
void intel_mark_idle(struct drm_device *dev)
9037
{
4104 Serge 9038
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9039
	struct drm_crtc *crtc;
2327 Serge 9040
 
5060 serge 9041
	if (!dev_priv->mm.busy)
3031 serge 9042
		return;
2327 Serge 9043
 
5060 serge 9044
	dev_priv->mm.busy = false;
9045
 
9046
	if (!i915.powersave)
9047
		goto out;
9048
 
9049
	for_each_crtc(dev, crtc) {
9050
		if (!crtc->primary->fb)
3031 serge 9051
			continue;
2327 Serge 9052
 
3480 Serge 9053
		intel_decrease_pllclock(crtc);
3031 serge 9054
	}
4560 Serge 9055
 
5060 serge 9056
	if (INTEL_INFO(dev)->gen >= 6)
4560 Serge 9057
		gen6_rps_idle(dev->dev_private);
5060 serge 9058
 
9059
out:
9060
	intel_runtime_pm_put(dev_priv);
3031 serge 9061
}
2327 Serge 9062
 
2330 Serge 9063
static void intel_crtc_destroy(struct drm_crtc *crtc)
9064
{
9065
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9066
	struct drm_device *dev = crtc->dev;
9067
	struct intel_unpin_work *work;
2327 Serge 9068
 
5354 serge 9069
	spin_lock_irq(&dev->event_lock);
2330 Serge 9070
	work = intel_crtc->unpin_work;
9071
	intel_crtc->unpin_work = NULL;
5354 serge 9072
	spin_unlock_irq(&dev->event_lock);
2327 Serge 9073
 
2330 Serge 9074
	if (work) {
4293 Serge 9075
		cancel_work_sync(&work->work);
2330 Serge 9076
		kfree(work);
9077
	}
2327 Serge 9078
 
2330 Serge 9079
	drm_crtc_cleanup(crtc);
2327 Serge 9080
 
2330 Serge 9081
	kfree(intel_crtc);
9082
}
2327 Serge 9083
 
3031 serge 9084
#if 0
9085
static void intel_unpin_work_fn(struct work_struct *__work)
9086
{
9087
	struct intel_unpin_work *work =
9088
		container_of(__work, struct intel_unpin_work, work);
3243 Serge 9089
	struct drm_device *dev = work->crtc->dev;
5060 serge 9090
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
2327 Serge 9091
 
3243 Serge 9092
	mutex_lock(&dev->struct_mutex);
3031 serge 9093
	intel_unpin_fb_obj(work->old_fb_obj);
9094
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9095
	drm_gem_object_unreference(&work->old_fb_obj->base);
2327 Serge 9096
 
3243 Serge 9097
	intel_update_fbc(dev);
9098
	mutex_unlock(&dev->struct_mutex);
9099
 
5354 serge 9100
	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9101
 
3243 Serge 9102
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9103
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9104
 
3031 serge 9105
	kfree(work);
9106
}
2327 Serge 9107
 
3031 serge 9108
static void do_intel_finish_page_flip(struct drm_device *dev,
9109
				      struct drm_crtc *crtc)
9110
{
9111
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9112
	struct intel_unpin_work *work;
9113
	unsigned long flags;
2327 Serge 9114
 
3031 serge 9115
	/* Ignore early vblank irqs */
9116
	if (intel_crtc == NULL)
9117
		return;
2327 Serge 9118
 
5354 serge 9119
	/*
9120
	 * This is called both by irq handlers and the reset code (to complete
9121
	 * lost pageflips) so needs the full irqsave spinlocks.
9122
	 */
3031 serge 9123
	spin_lock_irqsave(&dev->event_lock, flags);
9124
	work = intel_crtc->unpin_work;
3243 Serge 9125
 
9126
	/* Ensure we don't miss a work->pending update ... */
9127
	smp_rmb();
9128
 
9129
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 9130
		spin_unlock_irqrestore(&dev->event_lock, flags);
9131
		return;
9132
	}
2327 Serge 9133
 
5354 serge 9134
	page_flip_completed(intel_crtc);
3243 Serge 9135
 
3031 serge 9136
	spin_unlock_irqrestore(&dev->event_lock, flags);
9137
}
2327 Serge 9138
 
3031 serge 9139
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9140
{
5060 serge 9141
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9142
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 9143
 
3031 serge 9144
	do_intel_finish_page_flip(dev, crtc);
9145
}
2327 Serge 9146
 
3031 serge 9147
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9148
{
5060 serge 9149
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9150
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 9151
 
3031 serge 9152
	do_intel_finish_page_flip(dev, crtc);
9153
}
2327 Serge 9154
 
5060 serge 9155
/* Is 'a' after or equal to 'b'? */
9156
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9157
{
9158
	return !((a - b) & 0x80000000);
9159
}
9160
 
9161
static bool page_flip_finished(struct intel_crtc *crtc)
9162
{
9163
	struct drm_device *dev = crtc->base.dev;
9164
	struct drm_i915_private *dev_priv = dev->dev_private;
9165
 
5354 serge 9166
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9167
	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9168
		return true;
9169
 
5060 serge 9170
	/*
9171
	 * The relevant registers doen't exist on pre-ctg.
9172
	 * As the flip done interrupt doesn't trigger for mmio
9173
	 * flips on gmch platforms, a flip count check isn't
9174
	 * really needed there. But since ctg has the registers,
9175
	 * include it in the check anyway.
9176
	 */
9177
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9178
		return true;
9179
 
9180
	/*
9181
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9182
	 * used the same base address. In that case the mmio flip might
9183
	 * have completed, but the CS hasn't even executed the flip yet.
9184
	 *
9185
	 * A flip count check isn't enough as the CS might have updated
9186
	 * the base address just after start of vblank, but before we
9187
	 * managed to process the interrupt. This means we'd complete the
9188
	 * CS flip too soon.
9189
	 *
9190
	 * Combining both checks should get us a good enough result. It may
9191
	 * still happen that the CS flip has been executed, but has not
9192
	 * yet actually completed. But in case the base address is the same
9193
	 * anyway, we don't really care.
9194
	 */
9195
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9196
		crtc->unpin_work->gtt_offset &&
9197
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9198
				    crtc->unpin_work->flip_count);
9199
}
9200
 
3031 serge 9201
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9202
{
5060 serge 9203
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9204
	struct intel_crtc *intel_crtc =
9205
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9206
	unsigned long flags;
2327 Serge 9207
 
5354 serge 9208
 
9209
	/*
9210
	 * This is called both by irq handlers and the reset code (to complete
9211
	 * lost pageflips) so needs the full irqsave spinlocks.
9212
	 *
9213
	 * NB: An MMIO update of the plane base pointer will also
3243 Serge 9214
	 * generate a page-flip completion irq, i.e. every modeset
9215
	 * is also accompanied by a spurious intel_prepare_page_flip().
9216
	 */
3031 serge 9217
	spin_lock_irqsave(&dev->event_lock, flags);
5060 serge 9218
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
3243 Serge 9219
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 9220
	spin_unlock_irqrestore(&dev->event_lock, flags);
9221
}
2327 Serge 9222
 
5060 serge 9223
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
3243 Serge 9224
{
9225
	/* Ensure that the work item is consistent when activating it ... */
9226
	smp_wmb();
9227
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9228
	/* and that it is marked active as soon as the irq could fire. */
9229
	smp_wmb();
9230
}
9231
 
3031 serge 9232
static int intel_gen2_queue_flip(struct drm_device *dev,
9233
				 struct drm_crtc *crtc,
9234
				 struct drm_framebuffer *fb,
4104 Serge 9235
				 struct drm_i915_gem_object *obj,
5060 serge 9236
				 struct intel_engine_cs *ring,
4104 Serge 9237
				 uint32_t flags)
3031 serge 9238
{
9239
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9240
	u32 flip_mask;
9241
	int ret;
2327 Serge 9242
 
3031 serge 9243
	ret = intel_ring_begin(ring, 6);
9244
	if (ret)
5060 serge 9245
		return ret;
2327 Serge 9246
 
3031 serge 9247
	/* Can't queue multiple flips, so wait for the previous
9248
	 * one to finish before executing the next.
9249
	 */
9250
	if (intel_crtc->plane)
9251
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9252
	else
9253
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9254
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9255
	intel_ring_emit(ring, MI_NOOP);
9256
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9257
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9258
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9259
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9260
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 9261
 
9262
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9263
	__intel_ring_advance(ring);
3031 serge 9264
	return 0;
9265
}
2327 Serge 9266
 
3031 serge 9267
static int intel_gen3_queue_flip(struct drm_device *dev,
9268
				 struct drm_crtc *crtc,
9269
				 struct drm_framebuffer *fb,
4104 Serge 9270
				 struct drm_i915_gem_object *obj,
5060 serge 9271
				 struct intel_engine_cs *ring,
4104 Serge 9272
				 uint32_t flags)
3031 serge 9273
{
9274
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9275
	u32 flip_mask;
9276
	int ret;
2327 Serge 9277
 
3031 serge 9278
	ret = intel_ring_begin(ring, 6);
9279
	if (ret)
5060 serge 9280
		return ret;
2327 Serge 9281
 
3031 serge 9282
	if (intel_crtc->plane)
9283
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9284
	else
9285
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9286
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9287
	intel_ring_emit(ring, MI_NOOP);
9288
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9289
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9290
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9291
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9292
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 9293
 
3243 Serge 9294
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9295
	__intel_ring_advance(ring);
3031 serge 9296
	return 0;
9297
}
2327 Serge 9298
 
3031 serge 9299
static int intel_gen4_queue_flip(struct drm_device *dev,
9300
				 struct drm_crtc *crtc,
9301
				 struct drm_framebuffer *fb,
4104 Serge 9302
				 struct drm_i915_gem_object *obj,
5060 serge 9303
				 struct intel_engine_cs *ring,
4104 Serge 9304
				 uint32_t flags)
3031 serge 9305
{
9306
	struct drm_i915_private *dev_priv = dev->dev_private;
9307
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9308
	uint32_t pf, pipesrc;
9309
	int ret;
2327 Serge 9310
 
3031 serge 9311
	ret = intel_ring_begin(ring, 4);
9312
	if (ret)
5060 serge 9313
		return ret;
2327 Serge 9314
 
3031 serge 9315
	/* i965+ uses the linear or tiled offsets from the
9316
	 * Display Registers (which do not change across a page-flip)
9317
	 * so we need only reprogram the base address.
9318
	 */
9319
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9320
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9321
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9322
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
3031 serge 9323
			obj->tiling_mode);
2327 Serge 9324
 
3031 serge 9325
	/* XXX Enabling the panel-fitter across page-flip is so far
9326
	 * untested on non-native modes, so ignore it for now.
9327
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9328
	 */
9329
	pf = 0;
9330
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9331
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 9332
 
9333
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9334
	__intel_ring_advance(ring);
3031 serge 9335
	return 0;
9336
}
2327 Serge 9337
 
3031 serge 9338
static int intel_gen6_queue_flip(struct drm_device *dev,
9339
				 struct drm_crtc *crtc,
9340
				 struct drm_framebuffer *fb,
4104 Serge 9341
				 struct drm_i915_gem_object *obj,
5060 serge 9342
				 struct intel_engine_cs *ring,
4104 Serge 9343
				 uint32_t flags)
3031 serge 9344
{
9345
	struct drm_i915_private *dev_priv = dev->dev_private;
9346
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9347
	uint32_t pf, pipesrc;
9348
	int ret;
2327 Serge 9349
 
3031 serge 9350
	ret = intel_ring_begin(ring, 4);
9351
	if (ret)
5060 serge 9352
		return ret;
2327 Serge 9353
 
3031 serge 9354
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9355
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9356
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5060 serge 9357
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
2327 Serge 9358
 
3031 serge 9359
	/* Contrary to the suggestions in the documentation,
9360
	 * "Enable Panel Fitter" does not seem to be required when page
9361
	 * flipping with a non-native mode, and worse causes a normal
9362
	 * modeset to fail.
9363
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9364
	 */
9365
	pf = 0;
9366
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9367
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 9368
 
9369
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9370
	__intel_ring_advance(ring);
3031 serge 9371
	return 0;
9372
}
2327 Serge 9373
 
3031 serge 9374
static int intel_gen7_queue_flip(struct drm_device *dev,
9375
				 struct drm_crtc *crtc,
9376
				 struct drm_framebuffer *fb,
4104 Serge 9377
				 struct drm_i915_gem_object *obj,
5060 serge 9378
				 struct intel_engine_cs *ring,
4104 Serge 9379
				 uint32_t flags)
3031 serge 9380
{
9381
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9382
	uint32_t plane_bit = 0;
4104 Serge 9383
	int len, ret;
2327 Serge 9384
 
5060 serge 9385
	switch (intel_crtc->plane) {
3031 serge 9386
	case PLANE_A:
9387
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9388
		break;
9389
	case PLANE_B:
9390
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9391
		break;
9392
	case PLANE_C:
9393
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9394
		break;
9395
	default:
9396
		WARN_ONCE(1, "unknown plane in flip command\n");
5060 serge 9397
		return -ENODEV;
3031 serge 9398
	}
2327 Serge 9399
 
4104 Serge 9400
	len = 4;
5060 serge 9401
	if (ring->id == RCS) {
4104 Serge 9402
		len += 6;
5060 serge 9403
		/*
9404
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9405
		 * 48bits addresses, and we need a NOOP for the batch size to
9406
		 * stay even.
9407
		 */
9408
		if (IS_GEN8(dev))
9409
			len += 2;
9410
	}
4104 Serge 9411
 
5060 serge 9412
	/*
9413
	 * BSpec MI_DISPLAY_FLIP for IVB:
9414
	 * "The full packet must be contained within the same cache line."
9415
	 *
9416
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9417
	 * cacheline, if we ever start emitting more commands before
9418
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9419
	 * then do the cacheline alignment, and finally emit the
9420
	 * MI_DISPLAY_FLIP.
9421
	 */
9422
	ret = intel_ring_cacheline_align(ring);
9423
	if (ret)
9424
		return ret;
9425
 
4104 Serge 9426
	ret = intel_ring_begin(ring, len);
3031 serge 9427
	if (ret)
5060 serge 9428
		return ret;
2327 Serge 9429
 
4104 Serge 9430
	/* Unmask the flip-done completion message. Note that the bspec says that
9431
	 * we should do this for both the BCS and RCS, and that we must not unmask
9432
	 * more than one flip event at any time (or ensure that one flip message
9433
	 * can be sent by waiting for flip-done prior to queueing new flips).
9434
	 * Experimentation says that BCS works despite DERRMR masking all
9435
	 * flip-done completion events and that unmasking all planes at once
9436
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9437
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9438
	 */
9439
	if (ring->id == RCS) {
9440
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9441
		intel_ring_emit(ring, DERRMR);
9442
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9443
					DERRMR_PIPEB_PRI_FLIP_DONE |
9444
					DERRMR_PIPEC_PRI_FLIP_DONE));
5060 serge 9445
		if (IS_GEN8(dev))
9446
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9447
					      MI_SRM_LRM_GLOBAL_GTT);
9448
		else
4560 Serge 9449
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9450
				MI_SRM_LRM_GLOBAL_GTT);
4104 Serge 9451
		intel_ring_emit(ring, DERRMR);
9452
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
5060 serge 9453
		if (IS_GEN8(dev)) {
9454
			intel_ring_emit(ring, 0);
9455
			intel_ring_emit(ring, MI_NOOP);
9456
		}
4104 Serge 9457
	}
9458
 
3031 serge 9459
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9460
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5060 serge 9461
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9462
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 9463
 
9464
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9465
	__intel_ring_advance(ring);
3031 serge 9466
	return 0;
9467
}
2327 Serge 9468
 
3031 serge 9469
static int intel_default_queue_flip(struct drm_device *dev,
9470
				    struct drm_crtc *crtc,
9471
				    struct drm_framebuffer *fb,
4104 Serge 9472
				    struct drm_i915_gem_object *obj,
5060 serge 9473
				    struct intel_engine_cs *ring,
4104 Serge 9474
				    uint32_t flags)
3031 serge 9475
{
9476
	return -ENODEV;
9477
}
2327 Serge 9478
 
3031 serge 9479
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9480
				struct drm_framebuffer *fb,
4104 Serge 9481
				struct drm_pending_vblank_event *event,
9482
				uint32_t page_flip_flags)
3031 serge 9483
{
9484
	struct drm_device *dev = crtc->dev;
9485
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 9486
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9487
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3031 serge 9488
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 9489
	enum pipe pipe = intel_crtc->pipe;
3031 serge 9490
	struct intel_unpin_work *work;
5060 serge 9491
	struct intel_engine_cs *ring;
3031 serge 9492
	int ret;
2327 Serge 9493
 
5060 serge 9494
	/*
9495
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9496
	 * check to be safe.  In the future we may enable pageflipping from
9497
	 * a disabled primary plane.
9498
	 */
9499
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9500
		return -EBUSY;
9501
 
3031 serge 9502
	/* Can't change pixel format via MI display flips. */
5060 serge 9503
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
3031 serge 9504
		return -EINVAL;
2327 Serge 9505
 
3031 serge 9506
	/*
9507
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9508
	 * Note that pitch changes could also affect these register.
9509
	 */
9510
	if (INTEL_INFO(dev)->gen > 3 &&
5060 serge 9511
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9512
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
3031 serge 9513
		return -EINVAL;
2327 Serge 9514
 
5354 serge 9515
	if (i915_terminally_wedged(&dev_priv->gpu_error))
9516
		goto out_hang;
9517
 
4560 Serge 9518
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 9519
	if (work == NULL)
9520
		return -ENOMEM;
2327 Serge 9521
 
3031 serge 9522
	work->event = event;
3243 Serge 9523
	work->crtc = crtc;
5060 serge 9524
	work->old_fb_obj = intel_fb_obj(old_fb);
3031 serge 9525
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 9526
 
5060 serge 9527
	ret = drm_crtc_vblank_get(crtc);
3031 serge 9528
	if (ret)
9529
		goto free_work;
2327 Serge 9530
 
3031 serge 9531
	/* We borrow the event spin lock for protecting unpin_work */
5354 serge 9532
	spin_lock_irq(&dev->event_lock);
3031 serge 9533
	if (intel_crtc->unpin_work) {
5354 serge 9534
		/* Before declaring the flip queue wedged, check if
9535
		 * the hardware completed the operation behind our backs.
9536
		 */
9537
		if (__intel_pageflip_stall_check(dev, crtc)) {
9538
			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
9539
			page_flip_completed(intel_crtc);
9540
		} else {
9541
			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9542
			spin_unlock_irq(&dev->event_lock);
9543
 
9544
			drm_crtc_vblank_put(crtc);
3031 serge 9545
		kfree(work);
9546
		return -EBUSY;
9547
	}
5354 serge 9548
	}
3031 serge 9549
	intel_crtc->unpin_work = work;
5354 serge 9550
	spin_unlock_irq(&dev->event_lock);
2327 Serge 9551
 
3243 Serge 9552
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9553
		flush_workqueue(dev_priv->wq);
9554
 
3031 serge 9555
	ret = i915_mutex_lock_interruptible(dev);
9556
	if (ret)
9557
		goto cleanup;
2327 Serge 9558
 
3031 serge 9559
	/* Reference the objects for the scheduled work. */
9560
	drm_gem_object_reference(&work->old_fb_obj->base);
9561
	drm_gem_object_reference(&obj->base);
2327 Serge 9562
 
5060 serge 9563
	crtc->primary->fb = fb;
2327 Serge 9564
 
3031 serge 9565
	work->pending_flip_obj = obj;
2327 Serge 9566
 
3243 Serge 9567
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 9568
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 9569
 
5060 serge 9570
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9571
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9572
 
9573
	if (IS_VALLEYVIEW(dev)) {
9574
		ring = &dev_priv->ring[BCS];
9575
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9576
			/* vlv: DISPLAY_FLIP fails to change tiling */
9577
			ring = NULL;
9578
	} else if (IS_IVYBRIDGE(dev)) {
9579
		ring = &dev_priv->ring[BCS];
9580
	} else if (INTEL_INFO(dev)->gen >= 7) {
9581
		ring = obj->ring;
9582
		if (ring == NULL || ring->id != RCS)
9583
			ring = &dev_priv->ring[BCS];
9584
	} else {
9585
		ring = &dev_priv->ring[RCS];
9586
	}
9587
 
5354 serge 9588
	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
3031 serge 9589
	if (ret)
9590
		goto cleanup_pending;
9591
 
5060 serge 9592
	work->gtt_offset =
9593
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9594
 
5354 serge 9595
	if (use_mmio_flip(ring, obj)) {
5060 serge 9596
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9597
					    page_flip_flags);
5354 serge 9598
		if (ret)
9599
			goto cleanup_unpin;
9600
 
9601
		work->flip_queued_seqno = obj->last_write_seqno;
9602
		work->flip_queued_ring = obj->ring;
9603
	} else {
5060 serge 9604
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9605
				page_flip_flags);
9606
	if (ret)
9607
		goto cleanup_unpin;
9608
 
5354 serge 9609
		work->flip_queued_seqno = intel_ring_get_seqno(ring);
9610
		work->flip_queued_ring = ring;
9611
	}
9612
 
9613
	work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
9614
	work->enable_stall_check = true;
9615
 
5060 serge 9616
	i915_gem_track_fb(work->old_fb_obj, obj,
9617
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9618
 
3031 serge 9619
	intel_disable_fbc(dev);
5060 serge 9620
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
3031 serge 9621
	mutex_unlock(&dev->struct_mutex);
9622
 
9623
	trace_i915_flip_request(intel_crtc->plane, obj);
9624
 
9625
	return 0;
9626
 
5060 serge 9627
cleanup_unpin:
9628
	intel_unpin_fb_obj(obj);
3031 serge 9629
cleanup_pending:
3243 Serge 9630
	atomic_dec(&intel_crtc->unpin_work_count);
5060 serge 9631
	crtc->primary->fb = old_fb;
3031 serge 9632
	drm_gem_object_unreference(&work->old_fb_obj->base);
9633
	drm_gem_object_unreference(&obj->base);
9634
	mutex_unlock(&dev->struct_mutex);
9635
 
9636
cleanup:
5354 serge 9637
	spin_lock_irq(&dev->event_lock);
3031 serge 9638
	intel_crtc->unpin_work = NULL;
5354 serge 9639
	spin_unlock_irq(&dev->event_lock);
3031 serge 9640
 
5060 serge 9641
	drm_crtc_vblank_put(crtc);
3031 serge 9642
free_work:
9643
	kfree(work);
9644
 
5060 serge 9645
	if (ret == -EIO) {
9646
out_hang:
5354 serge 9647
//       intel_crtc_wait_for_pending_flips(crtc);
5060 serge 9648
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
5354 serge 9649
		if (ret == 0 && event) {
9650
			spin_lock_irq(&dev->event_lock);
5060 serge 9651
			drm_send_vblank_event(dev, pipe, event);
5354 serge 9652
			spin_unlock_irq(&dev->event_lock);
9653
		}
5060 serge 9654
	}
3031 serge 9655
	return ret;
9656
}
9657
#endif
9658
 
9659
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9660
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9661
	.load_lut = intel_crtc_load_lut,
9662
};
9663
 
9664
/**
9665
 * intel_modeset_update_staged_output_state
9666
 *
9667
 * Updates the staged output configuration state, e.g. after we've read out the
9668
 * current hw state.
9669
 */
9670
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9671
{
5060 serge 9672
	struct intel_crtc *crtc;
3031 serge 9673
	struct intel_encoder *encoder;
9674
	struct intel_connector *connector;
9675
 
9676
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9677
			    base.head) {
9678
		connector->new_encoder =
9679
			to_intel_encoder(connector->base.encoder);
9680
	}
9681
 
5354 serge 9682
	for_each_intel_encoder(dev, encoder) {
3031 serge 9683
		encoder->new_crtc =
9684
			to_intel_crtc(encoder->base.crtc);
9685
	}
5060 serge 9686
 
9687
	for_each_intel_crtc(dev, crtc) {
9688
		crtc->new_enabled = crtc->base.enabled;
9689
 
9690
		if (crtc->new_enabled)
9691
			crtc->new_config = &crtc->config;
9692
		else
9693
			crtc->new_config = NULL;
9694
	}
3031 serge 9695
}
9696
 
9697
/**
9698
 * intel_modeset_commit_output_state
9699
 *
9700
 * This function copies the stage display pipe configuration to the real one.
9701
 */
9702
static void intel_modeset_commit_output_state(struct drm_device *dev)
9703
{
5060 serge 9704
	struct intel_crtc *crtc;
3031 serge 9705
	struct intel_encoder *encoder;
9706
	struct intel_connector *connector;
9707
 
9708
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9709
			    base.head) {
9710
		connector->base.encoder = &connector->new_encoder->base;
9711
	}
9712
 
5354 serge 9713
	for_each_intel_encoder(dev, encoder) {
3031 serge 9714
		encoder->base.crtc = &encoder->new_crtc->base;
9715
	}
5060 serge 9716
 
9717
	for_each_intel_crtc(dev, crtc) {
9718
		crtc->base.enabled = crtc->new_enabled;
9719
	}
3031 serge 9720
}
9721
 
4104 Serge 9722
static void
5060 serge 9723
connected_sink_compute_bpp(struct intel_connector *connector,
4104 Serge 9724
			   struct intel_crtc_config *pipe_config)
9725
{
9726
	int bpp = pipe_config->pipe_bpp;
9727
 
9728
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9729
		connector->base.base.id,
5060 serge 9730
		connector->base.name);
4104 Serge 9731
 
9732
	/* Don't use an invalid EDID bpc value */
9733
	if (connector->base.display_info.bpc &&
9734
	    connector->base.display_info.bpc * 3 < bpp) {
9735
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9736
			      bpp, connector->base.display_info.bpc*3);
9737
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9738
	}
9739
 
9740
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9741
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9742
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9743
			      bpp);
9744
		pipe_config->pipe_bpp = 24;
9745
	}
9746
}
9747
 
3746 Serge 9748
static int
4104 Serge 9749
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
3746 Serge 9750
		    struct drm_framebuffer *fb,
9751
		    struct intel_crtc_config *pipe_config)
9752
{
4104 Serge 9753
	struct drm_device *dev = crtc->base.dev;
9754
	struct intel_connector *connector;
3746 Serge 9755
	int bpp;
9756
 
9757
	switch (fb->pixel_format) {
9758
	case DRM_FORMAT_C8:
9759
		bpp = 8*3; /* since we go through a colormap */
9760
		break;
9761
	case DRM_FORMAT_XRGB1555:
9762
	case DRM_FORMAT_ARGB1555:
9763
		/* checked in intel_framebuffer_init already */
9764
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9765
			return -EINVAL;
9766
	case DRM_FORMAT_RGB565:
9767
		bpp = 6*3; /* min is 18bpp */
9768
		break;
9769
	case DRM_FORMAT_XBGR8888:
9770
	case DRM_FORMAT_ABGR8888:
9771
		/* checked in intel_framebuffer_init already */
9772
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9773
			return -EINVAL;
9774
	case DRM_FORMAT_XRGB8888:
9775
	case DRM_FORMAT_ARGB8888:
9776
		bpp = 8*3;
9777
		break;
9778
	case DRM_FORMAT_XRGB2101010:
9779
	case DRM_FORMAT_ARGB2101010:
9780
	case DRM_FORMAT_XBGR2101010:
9781
	case DRM_FORMAT_ABGR2101010:
9782
		/* checked in intel_framebuffer_init already */
9783
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9784
			return -EINVAL;
9785
		bpp = 10*3;
9786
		break;
9787
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9788
	default:
9789
		DRM_DEBUG_KMS("unsupported depth\n");
9790
		return -EINVAL;
9791
	}
9792
 
9793
	pipe_config->pipe_bpp = bpp;
9794
 
9795
	/* Clamp display bpp to EDID value */
9796
	list_for_each_entry(connector, &dev->mode_config.connector_list,
4104 Serge 9797
			    base.head) {
9798
		if (!connector->new_encoder ||
9799
		    connector->new_encoder->new_crtc != crtc)
3746 Serge 9800
			continue;
9801
 
4104 Serge 9802
		connected_sink_compute_bpp(connector, pipe_config);
3746 Serge 9803
	}
9804
 
9805
	return bpp;
9806
}
9807
 
4560 Serge 9808
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9809
{
9810
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9811
			"type: 0x%x flags: 0x%x\n",
9812
		mode->crtc_clock,
9813
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9814
		mode->crtc_hsync_end, mode->crtc_htotal,
9815
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9816
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9817
}
9818
 
4104 Serge 9819
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9820
				   struct intel_crtc_config *pipe_config,
9821
				   const char *context)
9822
{
9823
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9824
		      context, pipe_name(crtc->pipe));
9825
 
9826
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9827
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9828
		      pipe_config->pipe_bpp, pipe_config->dither);
9829
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9830
		      pipe_config->has_pch_encoder,
9831
		      pipe_config->fdi_lanes,
9832
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9833
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9834
		      pipe_config->fdi_m_n.tu);
4560 Serge 9835
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9836
		      pipe_config->has_dp_encoder,
9837
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9838
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9839
		      pipe_config->dp_m_n.tu);
5354 serge 9840
 
9841
	DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
9842
		      pipe_config->has_dp_encoder,
9843
		      pipe_config->dp_m2_n2.gmch_m,
9844
		      pipe_config->dp_m2_n2.gmch_n,
9845
		      pipe_config->dp_m2_n2.link_m,
9846
		      pipe_config->dp_m2_n2.link_n,
9847
		      pipe_config->dp_m2_n2.tu);
9848
 
9849
	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
9850
		      pipe_config->has_audio,
9851
		      pipe_config->has_infoframe);
9852
 
4104 Serge 9853
	DRM_DEBUG_KMS("requested mode:\n");
9854
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9855
	DRM_DEBUG_KMS("adjusted mode:\n");
9856
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
4560 Serge 9857
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9858
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9859
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9860
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
4104 Serge 9861
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9862
		      pipe_config->gmch_pfit.control,
9863
		      pipe_config->gmch_pfit.pgm_ratios,
9864
		      pipe_config->gmch_pfit.lvds_border_bits);
9865
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9866
		      pipe_config->pch_pfit.pos,
9867
		      pipe_config->pch_pfit.size,
9868
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9869
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 9870
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 9871
}
9872
 
5060 serge 9873
static bool encoders_cloneable(const struct intel_encoder *a,
9874
			       const struct intel_encoder *b)
4104 Serge 9875
{
5060 serge 9876
	/* masks could be asymmetric, so check both ways */
9877
	return a == b || (a->cloneable & (1 << b->type) &&
9878
			  b->cloneable & (1 << a->type));
9879
}
9880
 
9881
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9882
					 struct intel_encoder *encoder)
9883
{
9884
	struct drm_device *dev = crtc->base.dev;
9885
	struct intel_encoder *source_encoder;
9886
 
5354 serge 9887
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 9888
		if (source_encoder->new_crtc != crtc)
9889
			continue;
9890
 
9891
		if (!encoders_cloneable(encoder, source_encoder))
9892
			return false;
9893
	}
9894
 
9895
	return true;
9896
}
9897
 
9898
static bool check_encoder_cloning(struct intel_crtc *crtc)
9899
{
9900
	struct drm_device *dev = crtc->base.dev;
4104 Serge 9901
	struct intel_encoder *encoder;
9902
 
5354 serge 9903
	for_each_intel_encoder(dev, encoder) {
5060 serge 9904
		if (encoder->new_crtc != crtc)
4104 Serge 9905
			continue;
9906
 
5060 serge 9907
		if (!check_single_encoder_cloning(crtc, encoder))
9908
			return false;
4104 Serge 9909
	}
9910
 
5060 serge 9911
	return true;
4104 Serge 9912
}
9913
 
5354 serge 9914
static bool check_digital_port_conflicts(struct drm_device *dev)
9915
{
9916
	struct intel_connector *connector;
9917
	unsigned int used_ports = 0;
9918
 
9919
	/*
9920
	 * Walk the connector list instead of the encoder
9921
	 * list to detect the problem on ddi platforms
9922
	 * where there's just one encoder per digital port.
9923
	 */
9924
	list_for_each_entry(connector,
9925
			    &dev->mode_config.connector_list, base.head) {
9926
		struct intel_encoder *encoder = connector->new_encoder;
9927
 
9928
		if (!encoder)
9929
			continue;
9930
 
9931
		WARN_ON(!encoder->new_crtc);
9932
 
9933
		switch (encoder->type) {
9934
			unsigned int port_mask;
9935
		case INTEL_OUTPUT_UNKNOWN:
9936
			if (WARN_ON(!HAS_DDI(dev)))
9937
				break;
9938
		case INTEL_OUTPUT_DISPLAYPORT:
9939
		case INTEL_OUTPUT_HDMI:
9940
		case INTEL_OUTPUT_EDP:
9941
			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
9942
 
9943
			/* the same port mustn't appear more than once */
9944
			if (used_ports & port_mask)
9945
				return false;
9946
 
9947
			used_ports |= port_mask;
9948
		default:
9949
			break;
9950
		}
9951
	}
9952
 
9953
	return true;
9954
}
9955
 
3746 Serge 9956
static struct intel_crtc_config *
9957
intel_modeset_pipe_config(struct drm_crtc *crtc,
9958
			  struct drm_framebuffer *fb,
3031 serge 9959
			    struct drm_display_mode *mode)
9960
{
9961
	struct drm_device *dev = crtc->dev;
9962
	struct intel_encoder *encoder;
3746 Serge 9963
	struct intel_crtc_config *pipe_config;
4104 Serge 9964
	int plane_bpp, ret = -EINVAL;
9965
	bool retry = true;
3031 serge 9966
 
5060 serge 9967
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
4104 Serge 9968
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9969
		return ERR_PTR(-EINVAL);
9970
	}
9971
 
5354 serge 9972
	if (!check_digital_port_conflicts(dev)) {
9973
		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
9974
		return ERR_PTR(-EINVAL);
9975
	}
9976
 
3746 Serge 9977
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9978
	if (!pipe_config)
3031 serge 9979
		return ERR_PTR(-ENOMEM);
9980
 
3746 Serge 9981
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9982
	drm_mode_copy(&pipe_config->requested_mode, mode);
4560 Serge 9983
 
4104 Serge 9984
	pipe_config->cpu_transcoder =
9985
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9986
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
3746 Serge 9987
 
4104 Serge 9988
	/*
9989
	 * Sanitize sync polarity flags based on requested ones. If neither
9990
	 * positive or negative polarity is requested, treat this as meaning
9991
	 * negative polarity.
9992
	 */
9993
	if (!(pipe_config->adjusted_mode.flags &
9994
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9995
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9996
 
9997
	if (!(pipe_config->adjusted_mode.flags &
9998
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9999
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
10000
 
10001
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
10002
	 * plane pixel format and any sink constraints into account. Returns the
10003
	 * source plane bpp so that dithering can be selected on mismatches
10004
	 * after encoders and crtc also have had their say. */
10005
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10006
					      fb, pipe_config);
3746 Serge 10007
	if (plane_bpp < 0)
10008
		goto fail;
10009
 
4560 Serge 10010
	/*
10011
	 * Determine the real pipe dimensions. Note that stereo modes can
10012
	 * increase the actual pipe size due to the frame doubling and
10013
	 * insertion of additional space for blanks between the frame. This
10014
	 * is stored in the crtc timings. We use the requested mode to do this
10015
	 * computation to clearly distinguish it from the adjusted mode, which
10016
	 * can be changed by the connectors in the below retry loop.
10017
	 */
10018
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
10019
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
10020
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
10021
 
4104 Serge 10022
encoder_retry:
10023
	/* Ensure the port clock defaults are reset when retrying. */
10024
	pipe_config->port_clock = 0;
10025
	pipe_config->pixel_multiplier = 1;
10026
 
10027
	/* Fill in default crtc timings, allow encoders to overwrite them. */
4560 Serge 10028
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
4104 Serge 10029
 
3031 serge 10030
	/* Pass our mode to the connectors and the CRTC to give them a chance to
10031
	 * adjust it according to limitations or connector properties, and also
10032
	 * a chance to reject the mode entirely.
2330 Serge 10033
	 */
5354 serge 10034
	for_each_intel_encoder(dev, encoder) {
2327 Serge 10035
 
3031 serge 10036
		if (&encoder->new_crtc->base != crtc)
10037
			continue;
3746 Serge 10038
 
10039
			if (!(encoder->compute_config(encoder, pipe_config))) {
10040
				DRM_DEBUG_KMS("Encoder config failure\n");
10041
				goto fail;
10042
			}
10043
		}
10044
 
4104 Serge 10045
	/* Set default port clock if not overwritten by the encoder. Needs to be
10046
	 * done afterwards in case the encoder adjusts the mode. */
10047
	if (!pipe_config->port_clock)
4560 Serge 10048
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
10049
			* pipe_config->pixel_multiplier;
2327 Serge 10050
 
4104 Serge 10051
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10052
	if (ret < 0) {
3031 serge 10053
		DRM_DEBUG_KMS("CRTC fixup failed\n");
10054
		goto fail;
10055
	}
2327 Serge 10056
 
4104 Serge 10057
	if (ret == RETRY) {
10058
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
10059
			ret = -EINVAL;
10060
			goto fail;
10061
		}
10062
 
10063
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10064
		retry = false;
10065
		goto encoder_retry;
10066
	}
10067
 
3746 Serge 10068
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
10069
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
10070
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10071
 
10072
	return pipe_config;
3031 serge 10073
fail:
3746 Serge 10074
	kfree(pipe_config);
4104 Serge 10075
	return ERR_PTR(ret);
3031 serge 10076
}
2327 Serge 10077
 
3031 serge 10078
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
10079
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
10080
static void
10081
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10082
			     unsigned *prepare_pipes, unsigned *disable_pipes)
10083
{
10084
	struct intel_crtc *intel_crtc;
10085
	struct drm_device *dev = crtc->dev;
10086
	struct intel_encoder *encoder;
10087
	struct intel_connector *connector;
10088
	struct drm_crtc *tmp_crtc;
10089
 
10090
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
10091
 
10092
	/* Check which crtcs have changed outputs connected to them, these need
10093
	 * to be part of the prepare_pipes mask. We don't (yet) support global
10094
	 * modeset across multiple crtcs, so modeset_pipes will only have one
10095
	 * bit set at most. */
10096
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10097
			    base.head) {
10098
		if (connector->base.encoder == &connector->new_encoder->base)
10099
			continue;
10100
 
10101
		if (connector->base.encoder) {
10102
			tmp_crtc = connector->base.encoder->crtc;
10103
 
10104
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10105
		}
10106
 
10107
		if (connector->new_encoder)
10108
			*prepare_pipes |=
10109
				1 << connector->new_encoder->new_crtc->pipe;
10110
	}
10111
 
5354 serge 10112
	for_each_intel_encoder(dev, encoder) {
3031 serge 10113
		if (encoder->base.crtc == &encoder->new_crtc->base)
10114
			continue;
10115
 
10116
		if (encoder->base.crtc) {
10117
			tmp_crtc = encoder->base.crtc;
10118
 
10119
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10120
		}
10121
 
10122
		if (encoder->new_crtc)
10123
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
10124
	}
10125
 
5060 serge 10126
	/* Check for pipes that will be enabled/disabled ... */
10127
	for_each_intel_crtc(dev, intel_crtc) {
10128
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
3031 serge 10129
			continue;
10130
 
5060 serge 10131
		if (!intel_crtc->new_enabled)
3031 serge 10132
			*disable_pipes |= 1 << intel_crtc->pipe;
5060 serge 10133
		else
10134
			*prepare_pipes |= 1 << intel_crtc->pipe;
3031 serge 10135
	}
10136
 
10137
 
10138
	/* set_mode is also used to update properties on life display pipes. */
10139
	intel_crtc = to_intel_crtc(crtc);
5060 serge 10140
	if (intel_crtc->new_enabled)
3031 serge 10141
		*prepare_pipes |= 1 << intel_crtc->pipe;
10142
 
3746 Serge 10143
	/*
10144
	 * For simplicity do a full modeset on any pipe where the output routing
10145
	 * changed. We could be more clever, but that would require us to be
10146
	 * more careful with calling the relevant encoder->mode_set functions.
10147
	 */
3031 serge 10148
	if (*prepare_pipes)
10149
		*modeset_pipes = *prepare_pipes;
10150
 
10151
	/* ... and mask these out. */
10152
	*modeset_pipes &= ~(*disable_pipes);
10153
	*prepare_pipes &= ~(*disable_pipes);
3746 Serge 10154
 
10155
	/*
10156
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10157
	 * obies this rule, but the modeset restore mode of
10158
	 * intel_modeset_setup_hw_state does not.
10159
	 */
10160
	*modeset_pipes &= 1 << intel_crtc->pipe;
10161
	*prepare_pipes &= 1 << intel_crtc->pipe;
4104 Serge 10162
 
10163
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10164
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
2330 Serge 10165
}
2327 Serge 10166
 
3031 serge 10167
static bool intel_crtc_in_use(struct drm_crtc *crtc)
2330 Serge 10168
{
3031 serge 10169
	struct drm_encoder *encoder;
2330 Serge 10170
	struct drm_device *dev = crtc->dev;
2327 Serge 10171
 
3031 serge 10172
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10173
		if (encoder->crtc == crtc)
10174
			return true;
10175
 
10176
	return false;
10177
}
10178
 
10179
static void
10180
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10181
{
5354 serge 10182
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10183
	struct intel_encoder *intel_encoder;
10184
	struct intel_crtc *intel_crtc;
10185
	struct drm_connector *connector;
10186
 
5354 serge 10187
	intel_shared_dpll_commit(dev_priv);
10188
 
10189
	for_each_intel_encoder(dev, intel_encoder) {
3031 serge 10190
		if (!intel_encoder->base.crtc)
10191
			continue;
10192
 
10193
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10194
 
10195
		if (prepare_pipes & (1 << intel_crtc->pipe))
10196
			intel_encoder->connectors_active = false;
10197
	}
10198
 
10199
	intel_modeset_commit_output_state(dev);
10200
 
5060 serge 10201
	/* Double check state. */
10202
	for_each_intel_crtc(dev, intel_crtc) {
10203
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10204
		WARN_ON(intel_crtc->new_config &&
10205
			intel_crtc->new_config != &intel_crtc->config);
10206
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
3031 serge 10207
	}
10208
 
10209
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10210
		if (!connector->encoder || !connector->encoder->crtc)
10211
			continue;
10212
 
10213
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10214
 
10215
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10216
			struct drm_property *dpms_property =
10217
				dev->mode_config.dpms_property;
10218
 
10219
			connector->dpms = DRM_MODE_DPMS_ON;
3243 Serge 10220
			drm_object_property_set_value(&connector->base,
3031 serge 10221
							 dpms_property,
10222
							 DRM_MODE_DPMS_ON);
10223
 
10224
			intel_encoder = to_intel_encoder(connector->encoder);
10225
			intel_encoder->connectors_active = true;
10226
		}
10227
	}
10228
 
10229
}
10230
 
4560 Serge 10231
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 10232
{
4560 Serge 10233
	int diff;
4104 Serge 10234
 
10235
	if (clock1 == clock2)
10236
		return true;
10237
 
10238
	if (!clock1 || !clock2)
10239
		return false;
10240
 
10241
	diff = abs(clock1 - clock2);
10242
 
10243
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10244
		return true;
10245
 
10246
	return false;
10247
}
10248
 
3031 serge 10249
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10250
	list_for_each_entry((intel_crtc), \
10251
			    &(dev)->mode_config.crtc_list, \
10252
			    base.head) \
4104 Serge 10253
		if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 10254
 
3746 Serge 10255
static bool
4104 Serge 10256
intel_pipe_config_compare(struct drm_device *dev,
10257
			  struct intel_crtc_config *current_config,
3746 Serge 10258
			  struct intel_crtc_config *pipe_config)
10259
{
4104 Serge 10260
#define PIPE_CONF_CHECK_X(name)	\
10261
	if (current_config->name != pipe_config->name) { \
10262
		DRM_ERROR("mismatch in " #name " " \
10263
			  "(expected 0x%08x, found 0x%08x)\n", \
10264
			  current_config->name, \
10265
			  pipe_config->name); \
10266
		return false; \
3746 Serge 10267
	}
10268
 
4104 Serge 10269
#define PIPE_CONF_CHECK_I(name)	\
10270
	if (current_config->name != pipe_config->name) { \
10271
		DRM_ERROR("mismatch in " #name " " \
10272
			  "(expected %i, found %i)\n", \
10273
			  current_config->name, \
10274
			  pipe_config->name); \
10275
		return false; \
10276
	}
10277
 
5354 serge 10278
/* This is required for BDW+ where there is only one set of registers for
10279
 * switching between high and low RR.
10280
 * This macro can be used whenever a comparison has to be made between one
10281
 * hw state and multiple sw state variables.
10282
 */
10283
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
10284
	if ((current_config->name != pipe_config->name) && \
10285
		(current_config->alt_name != pipe_config->name)) { \
10286
			DRM_ERROR("mismatch in " #name " " \
10287
				  "(expected %i or %i, found %i)\n", \
10288
				  current_config->name, \
10289
				  current_config->alt_name, \
10290
				  pipe_config->name); \
10291
			return false; \
10292
	}
10293
 
4104 Serge 10294
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10295
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10296
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10297
			  "(expected %i, found %i)\n", \
10298
			  current_config->name & (mask), \
10299
			  pipe_config->name & (mask)); \
10300
		return false; \
10301
	}
10302
 
4560 Serge 10303
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10304
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10305
		DRM_ERROR("mismatch in " #name " " \
10306
			  "(expected %i, found %i)\n", \
10307
			  current_config->name, \
10308
			  pipe_config->name); \
10309
		return false; \
10310
	}
10311
 
4104 Serge 10312
#define PIPE_CONF_QUIRK(quirk)	\
10313
	((current_config->quirks | pipe_config->quirks) & (quirk))
10314
 
10315
	PIPE_CONF_CHECK_I(cpu_transcoder);
10316
 
10317
	PIPE_CONF_CHECK_I(has_pch_encoder);
10318
	PIPE_CONF_CHECK_I(fdi_lanes);
10319
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10320
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10321
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10322
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10323
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10324
 
4560 Serge 10325
	PIPE_CONF_CHECK_I(has_dp_encoder);
5354 serge 10326
 
10327
	if (INTEL_INFO(dev)->gen < 8) {
4560 Serge 10328
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10329
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10330
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10331
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10332
	PIPE_CONF_CHECK_I(dp_m_n.tu);
10333
 
5354 serge 10334
		if (current_config->has_drrs) {
10335
			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
10336
			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
10337
			PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
10338
			PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
10339
			PIPE_CONF_CHECK_I(dp_m2_n2.tu);
10340
		}
10341
	} else {
10342
		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
10343
		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
10344
		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
10345
		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
10346
		PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
10347
	}
10348
 
4104 Serge 10349
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10350
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10351
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10352
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10353
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10354
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10355
 
10356
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10357
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10358
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10359
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10360
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10361
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10362
 
10363
		PIPE_CONF_CHECK_I(pixel_multiplier);
5060 serge 10364
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10365
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10366
	    IS_VALLEYVIEW(dev))
10367
		PIPE_CONF_CHECK_I(limited_color_range);
5354 serge 10368
	PIPE_CONF_CHECK_I(has_infoframe);
4104 Serge 10369
 
5060 serge 10370
	PIPE_CONF_CHECK_I(has_audio);
10371
 
4104 Serge 10372
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10373
			      DRM_MODE_FLAG_INTERLACE);
10374
 
10375
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10376
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10377
				      DRM_MODE_FLAG_PHSYNC);
10378
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10379
				      DRM_MODE_FLAG_NHSYNC);
10380
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10381
				      DRM_MODE_FLAG_PVSYNC);
10382
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10383
				      DRM_MODE_FLAG_NVSYNC);
10384
	}
10385
 
4560 Serge 10386
	PIPE_CONF_CHECK_I(pipe_src_w);
10387
	PIPE_CONF_CHECK_I(pipe_src_h);
4104 Serge 10388
 
5060 serge 10389
	/*
10390
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10391
	 * screen. Since we don't yet re-compute the pipe config when moving
10392
	 * just the lvds port away to another pipe the sw tracking won't match.
10393
	 *
10394
	 * Proper atomic modesets with recomputed global state will fix this.
10395
	 * Until then just don't check gmch state for inherited modes.
10396
	 */
10397
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
4104 Serge 10398
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10399
	/* pfit ratios are autocomputed by the hw on gen4+ */
10400
	if (INTEL_INFO(dev)->gen < 4)
10401
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10402
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
5060 serge 10403
	}
10404
 
4104 Serge 10405
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10406
	if (current_config->pch_pfit.enabled) {
10407
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10408
	PIPE_CONF_CHECK_I(pch_pfit.size);
10409
	}
10410
 
4560 Serge 10411
	/* BDW+ don't expose a synchronous way to read the state */
10412
	if (IS_HASWELL(dev))
4104 Serge 10413
	PIPE_CONF_CHECK_I(ips_enabled);
10414
 
4560 Serge 10415
	PIPE_CONF_CHECK_I(double_wide);
10416
 
5060 serge 10417
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10418
 
4104 Serge 10419
	PIPE_CONF_CHECK_I(shared_dpll);
10420
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10421
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10422
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10423
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5060 serge 10424
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
5354 serge 10425
	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10426
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10427
	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
4104 Serge 10428
 
4280 Serge 10429
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10430
		PIPE_CONF_CHECK_I(pipe_bpp);
10431
 
4560 Serge 10432
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10433
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10434
 
4104 Serge 10435
#undef PIPE_CONF_CHECK_X
10436
#undef PIPE_CONF_CHECK_I
5354 serge 10437
#undef PIPE_CONF_CHECK_I_ALT
4104 Serge 10438
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 10439
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 10440
#undef PIPE_CONF_QUIRK
10441
 
3746 Serge 10442
	return true;
10443
}
10444
 
5354 serge 10445
static void check_wm_state(struct drm_device *dev)
10446
{
10447
	struct drm_i915_private *dev_priv = dev->dev_private;
10448
	struct skl_ddb_allocation hw_ddb, *sw_ddb;
10449
	struct intel_crtc *intel_crtc;
10450
	int plane;
10451
 
10452
	if (INTEL_INFO(dev)->gen < 9)
10453
		return;
10454
 
10455
	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
10456
	sw_ddb = &dev_priv->wm.skl_hw.ddb;
10457
 
10458
	for_each_intel_crtc(dev, intel_crtc) {
10459
		struct skl_ddb_entry *hw_entry, *sw_entry;
10460
		const enum pipe pipe = intel_crtc->pipe;
10461
 
10462
		if (!intel_crtc->active)
10463
			continue;
10464
 
10465
		/* planes */
10466
		for_each_plane(pipe, plane) {
10467
			hw_entry = &hw_ddb.plane[pipe][plane];
10468
			sw_entry = &sw_ddb->plane[pipe][plane];
10469
 
10470
			if (skl_ddb_entry_equal(hw_entry, sw_entry))
10471
				continue;
10472
 
10473
			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
10474
				  "(expected (%u,%u), found (%u,%u))\n",
10475
				  pipe_name(pipe), plane + 1,
10476
				  sw_entry->start, sw_entry->end,
10477
				  hw_entry->start, hw_entry->end);
10478
		}
10479
 
10480
		/* cursor */
10481
		hw_entry = &hw_ddb.cursor[pipe];
10482
		sw_entry = &sw_ddb->cursor[pipe];
10483
 
10484
		if (skl_ddb_entry_equal(hw_entry, sw_entry))
10485
			continue;
10486
 
10487
		DRM_ERROR("mismatch in DDB state pipe %c cursor "
10488
			  "(expected (%u,%u), found (%u,%u))\n",
10489
			  pipe_name(pipe),
10490
			  sw_entry->start, sw_entry->end,
10491
			  hw_entry->start, hw_entry->end);
10492
	}
10493
}
10494
 
4104 Serge 10495
static void
10496
check_connector_state(struct drm_device *dev)
3031 serge 10497
{
10498
	struct intel_connector *connector;
10499
 
10500
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10501
			    base.head) {
10502
		/* This also checks the encoder/connector hw state with the
10503
		 * ->get_hw_state callbacks. */
10504
		intel_connector_check_state(connector);
10505
 
10506
		WARN(&connector->new_encoder->base != connector->base.encoder,
10507
		     "connector's staged encoder doesn't match current encoder\n");
10508
	}
4104 Serge 10509
}
3031 serge 10510
 
4104 Serge 10511
static void
10512
check_encoder_state(struct drm_device *dev)
10513
{
10514
	struct intel_encoder *encoder;
10515
	struct intel_connector *connector;
10516
 
5354 serge 10517
	for_each_intel_encoder(dev, encoder) {
3031 serge 10518
		bool enabled = false;
10519
		bool active = false;
10520
		enum pipe pipe, tracked_pipe;
10521
 
10522
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10523
			      encoder->base.base.id,
5060 serge 10524
			      encoder->base.name);
3031 serge 10525
 
10526
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10527
		     "encoder's stage crtc doesn't match current crtc\n");
10528
		WARN(encoder->connectors_active && !encoder->base.crtc,
10529
		     "encoder's active_connectors set, but no crtc\n");
10530
 
10531
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10532
				    base.head) {
10533
			if (connector->base.encoder != &encoder->base)
10534
				continue;
10535
			enabled = true;
10536
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10537
				active = true;
10538
		}
5060 serge 10539
		/*
10540
		 * for MST connectors if we unplug the connector is gone
10541
		 * away but the encoder is still connected to a crtc
10542
		 * until a modeset happens in response to the hotplug.
10543
		 */
10544
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10545
			continue;
10546
 
3031 serge 10547
		WARN(!!encoder->base.crtc != enabled,
10548
		     "encoder's enabled state mismatch "
10549
		     "(expected %i, found %i)\n",
10550
		     !!encoder->base.crtc, enabled);
10551
		WARN(active && !encoder->base.crtc,
10552
		     "active encoder with no crtc\n");
10553
 
10554
		WARN(encoder->connectors_active != active,
10555
		     "encoder's computed active state doesn't match tracked active state "
10556
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10557
 
10558
		active = encoder->get_hw_state(encoder, &pipe);
10559
		WARN(active != encoder->connectors_active,
10560
		     "encoder's hw state doesn't match sw tracking "
10561
		     "(expected %i, found %i)\n",
10562
		     encoder->connectors_active, active);
10563
 
10564
		if (!encoder->base.crtc)
10565
			continue;
10566
 
10567
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10568
		WARN(active && pipe != tracked_pipe,
10569
		     "active encoder's pipe doesn't match"
10570
		     "(expected %i, found %i)\n",
10571
		     tracked_pipe, pipe);
10572
 
10573
	}
4104 Serge 10574
}
3031 serge 10575
 
4104 Serge 10576
static void
10577
check_crtc_state(struct drm_device *dev)
10578
{
5060 serge 10579
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10580
	struct intel_crtc *crtc;
10581
	struct intel_encoder *encoder;
10582
	struct intel_crtc_config pipe_config;
10583
 
5060 serge 10584
	for_each_intel_crtc(dev, crtc) {
3031 serge 10585
		bool enabled = false;
10586
		bool active = false;
10587
 
4104 Serge 10588
		memset(&pipe_config, 0, sizeof(pipe_config));
10589
 
3031 serge 10590
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10591
			      crtc->base.base.id);
10592
 
10593
		WARN(crtc->active && !crtc->base.enabled,
10594
		     "active crtc, but not enabled in sw tracking\n");
10595
 
5354 serge 10596
		for_each_intel_encoder(dev, encoder) {
3031 serge 10597
			if (encoder->base.crtc != &crtc->base)
10598
				continue;
10599
			enabled = true;
10600
			if (encoder->connectors_active)
10601
				active = true;
10602
		}
4104 Serge 10603
 
3031 serge 10604
		WARN(active != crtc->active,
10605
		     "crtc's computed active state doesn't match tracked active state "
10606
		     "(expected %i, found %i)\n", active, crtc->active);
10607
		WARN(enabled != crtc->base.enabled,
10608
		     "crtc's computed enabled state doesn't match tracked enabled state "
10609
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10610
 
3746 Serge 10611
		active = dev_priv->display.get_pipe_config(crtc,
10612
							   &pipe_config);
10613
 
5354 serge 10614
		/* hw state is inconsistent with the pipe quirk */
10615
		if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
10616
		    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
3746 Serge 10617
			active = crtc->active;
10618
 
5354 serge 10619
		for_each_intel_encoder(dev, encoder) {
4104 Serge 10620
			enum pipe pipe;
10621
			if (encoder->base.crtc != &crtc->base)
10622
				continue;
4560 Serge 10623
			if (encoder->get_hw_state(encoder, &pipe))
4104 Serge 10624
				encoder->get_config(encoder, &pipe_config);
10625
		}
10626
 
3746 Serge 10627
		WARN(crtc->active != active,
10628
		     "crtc active state doesn't match with hw state "
10629
		     "(expected %i, found %i)\n", crtc->active, active);
10630
 
4104 Serge 10631
		if (active &&
10632
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10633
			WARN(1, "pipe state doesn't match!\n");
10634
			intel_dump_pipe_config(crtc, &pipe_config,
10635
					       "[hw state]");
10636
			intel_dump_pipe_config(crtc, &crtc->config,
10637
					       "[sw state]");
10638
		}
3031 serge 10639
	}
10640
}
10641
 
4104 Serge 10642
static void
10643
check_shared_dpll_state(struct drm_device *dev)
10644
{
5060 serge 10645
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10646
	struct intel_crtc *crtc;
10647
	struct intel_dpll_hw_state dpll_hw_state;
10648
	int i;
10649
 
10650
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10651
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10652
		int enabled_crtcs = 0, active_crtcs = 0;
10653
		bool active;
10654
 
10655
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10656
 
10657
		DRM_DEBUG_KMS("%s\n", pll->name);
10658
 
10659
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10660
 
5354 serge 10661
		WARN(pll->active > hweight32(pll->config.crtc_mask),
4104 Serge 10662
		     "more active pll users than references: %i vs %i\n",
5354 serge 10663
		     pll->active, hweight32(pll->config.crtc_mask));
4104 Serge 10664
		WARN(pll->active && !pll->on,
10665
		     "pll in active use but not on in sw tracking\n");
10666
		WARN(pll->on && !pll->active,
10667
		     "pll in on but not on in use in sw tracking\n");
10668
		WARN(pll->on != active,
10669
		     "pll on state mismatch (expected %i, found %i)\n",
10670
		     pll->on, active);
10671
 
5060 serge 10672
		for_each_intel_crtc(dev, crtc) {
4104 Serge 10673
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10674
				enabled_crtcs++;
10675
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10676
				active_crtcs++;
10677
		}
10678
		WARN(pll->active != active_crtcs,
10679
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10680
		     pll->active, active_crtcs);
5354 serge 10681
		WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
4104 Serge 10682
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
5354 serge 10683
		     hweight32(pll->config.crtc_mask), enabled_crtcs);
4104 Serge 10684
 
5354 serge 10685
		WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
4104 Serge 10686
				       sizeof(dpll_hw_state)),
10687
		     "pll hw state mismatch\n");
10688
	}
10689
}
10690
 
10691
void
10692
intel_modeset_check_state(struct drm_device *dev)
10693
{
5354 serge 10694
	check_wm_state(dev);
4104 Serge 10695
	check_connector_state(dev);
10696
	check_encoder_state(dev);
10697
	check_crtc_state(dev);
10698
	check_shared_dpll_state(dev);
10699
}
10700
 
4560 Serge 10701
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10702
				     int dotclock)
10703
{
10704
	/*
10705
	 * FDI already provided one idea for the dotclock.
10706
	 * Yell if the encoder disagrees.
10707
	 */
10708
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10709
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10710
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10711
}
10712
 
5060 serge 10713
static void update_scanline_offset(struct intel_crtc *crtc)
10714
{
10715
	struct drm_device *dev = crtc->base.dev;
10716
 
10717
	/*
10718
	 * The scanline counter increments at the leading edge of hsync.
10719
	 *
10720
	 * On most platforms it starts counting from vtotal-1 on the
10721
	 * first active line. That means the scanline counter value is
10722
	 * always one less than what we would expect. Ie. just after
10723
	 * start of vblank, which also occurs at start of hsync (on the
10724
	 * last active line), the scanline counter will read vblank_start-1.
10725
	 *
10726
	 * On gen2 the scanline counter starts counting from 1 instead
10727
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10728
	 * to keep the value positive), instead of adding one.
10729
	 *
10730
	 * On HSW+ the behaviour of the scanline counter depends on the output
10731
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10732
	 * there's an extra 1 line difference. So we need to add two instead of
10733
	 * one to the value.
10734
	 */
10735
	if (IS_GEN2(dev)) {
10736
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10737
		int vtotal;
10738
 
10739
		vtotal = mode->crtc_vtotal;
10740
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10741
			vtotal /= 2;
10742
 
10743
		crtc->scanline_offset = vtotal - 1;
10744
	} else if (HAS_DDI(dev) &&
5354 serge 10745
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
5060 serge 10746
		crtc->scanline_offset = 2;
10747
	} else
10748
		crtc->scanline_offset = 1;
10749
}
10750
 
5354 serge 10751
static struct intel_crtc_config *
10752
intel_modeset_compute_config(struct drm_crtc *crtc,
10753
			     struct drm_display_mode *mode,
10754
			     struct drm_framebuffer *fb,
10755
			     unsigned *modeset_pipes,
10756
			     unsigned *prepare_pipes,
10757
			     unsigned *disable_pipes)
10758
{
10759
	struct intel_crtc_config *pipe_config = NULL;
10760
 
10761
	intel_modeset_affected_pipes(crtc, modeset_pipes,
10762
				     prepare_pipes, disable_pipes);
10763
 
10764
	if ((*modeset_pipes) == 0)
10765
		goto out;
10766
 
10767
	/*
10768
	 * Note this needs changes when we start tracking multiple modes
10769
	 * and crtcs.  At that point we'll need to compute the whole config
10770
	 * (i.e. one pipe_config for each crtc) rather than just the one
10771
	 * for this crtc.
10772
	 */
10773
	pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10774
	if (IS_ERR(pipe_config)) {
10775
		goto out;
10776
	}
10777
	intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10778
			       "[modeset]");
10779
 
10780
out:
10781
	return pipe_config;
10782
}
10783
 
3746 Serge 10784
static int __intel_set_mode(struct drm_crtc *crtc,
3031 serge 10785
		    struct drm_display_mode *mode,
5354 serge 10786
			    int x, int y, struct drm_framebuffer *fb,
10787
			    struct intel_crtc_config *pipe_config,
10788
			    unsigned modeset_pipes,
10789
			    unsigned prepare_pipes,
10790
			    unsigned disable_pipes)
3031 serge 10791
{
10792
	struct drm_device *dev = crtc->dev;
5060 serge 10793
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 10794
	struct drm_display_mode *saved_mode;
3031 serge 10795
	struct intel_crtc *intel_crtc;
3480 Serge 10796
	int ret = 0;
3031 serge 10797
 
4560 Serge 10798
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
3480 Serge 10799
	if (!saved_mode)
10800
		return -ENOMEM;
10801
 
10802
	*saved_mode = crtc->mode;
3031 serge 10803
 
5354 serge 10804
	if (modeset_pipes)
5060 serge 10805
		to_intel_crtc(crtc)->new_config = pipe_config;
3031 serge 10806
 
4560 Serge 10807
	/*
10808
	 * See if the config requires any additional preparation, e.g.
10809
	 * to adjust global state with pipes off.  We need to do this
10810
	 * here so we can get the modeset_pipe updated config for the new
10811
	 * mode set on this crtc.  For other crtcs we need to use the
10812
	 * adjusted_mode bits in the crtc directly.
10813
	 */
10814
	if (IS_VALLEYVIEW(dev)) {
5060 serge 10815
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
4560 Serge 10816
 
10817
		/* may have added more to prepare_pipes than we should */
10818
		prepare_pipes &= ~disable_pipes;
10819
	}
10820
 
5354 serge 10821
	if (dev_priv->display.crtc_compute_clock) {
10822
		unsigned clear_pipes = modeset_pipes | disable_pipes;
10823
 
10824
		ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
10825
		if (ret)
10826
			goto done;
10827
 
10828
		for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10829
			ret = dev_priv->display.crtc_compute_clock(intel_crtc);
10830
			if (ret) {
10831
				intel_shared_dpll_abort_config(dev_priv);
10832
				goto done;
10833
			}
10834
		}
10835
	}
10836
 
3746 Serge 10837
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10838
		intel_crtc_disable(&intel_crtc->base);
10839
 
3031 serge 10840
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10841
		if (intel_crtc->base.enabled)
10842
			dev_priv->display.crtc_disable(&intel_crtc->base);
10843
	}
10844
 
10845
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10846
	 * to set it here already despite that we pass it down the callchain.
5354 serge 10847
	 *
10848
	 * Note we'll need to fix this up when we start tracking multiple
10849
	 * pipes; here we assume a single modeset_pipe and only track the
10850
	 * single crtc and mode.
2330 Serge 10851
	 */
3746 Serge 10852
	if (modeset_pipes) {
3031 serge 10853
		crtc->mode = *mode;
3746 Serge 10854
		/* mode_set/enable/disable functions rely on a correct pipe
10855
		 * config. */
10856
		to_intel_crtc(crtc)->config = *pipe_config;
5060 serge 10857
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
4560 Serge 10858
 
10859
		/*
10860
		 * Calculate and store various constants which
10861
		 * are later needed by vblank and swap-completion
10862
		 * timestamping. They are derived from true hwmode.
10863
		 */
10864
		drm_calc_timestamping_constants(crtc,
10865
						&pipe_config->adjusted_mode);
3746 Serge 10866
	}
2327 Serge 10867
 
3031 serge 10868
	/* Only after disabling all output pipelines that will be changed can we
10869
	 * update the the output configuration. */
10870
	intel_modeset_update_state(dev, prepare_pipes);
10871
 
5354 serge 10872
	modeset_update_crtc_power_domains(dev);
3243 Serge 10873
 
3031 serge 10874
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10875
	 * on the DPLL.
2330 Serge 10876
	 */
3031 serge 10877
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
5060 serge 10878
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10879
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10880
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10881
 
10882
		mutex_lock(&dev->struct_mutex);
5354 serge 10883
		ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
5060 serge 10884
		if (ret != 0) {
10885
			DRM_ERROR("pin & fence failed\n");
10886
			mutex_unlock(&dev->struct_mutex);
10887
			goto done;
10888
		}
10889
		if (old_fb)
10890
			intel_unpin_fb_obj(old_obj);
10891
		i915_gem_track_fb(old_obj, obj,
10892
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10893
		mutex_unlock(&dev->struct_mutex);
10894
 
10895
		crtc->primary->fb = fb;
10896
		crtc->x = x;
10897
		crtc->y = y;
3031 serge 10898
	}
10899
 
10900
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
5060 serge 10901
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10902
		update_scanline_offset(intel_crtc);
10903
 
3031 serge 10904
		dev_priv->display.crtc_enable(&intel_crtc->base);
5060 serge 10905
	}
3031 serge 10906
 
10907
	/* FIXME: add subpixel order */
10908
done:
4560 Serge 10909
	if (ret && crtc->enabled)
3480 Serge 10910
		crtc->mode = *saved_mode;
3031 serge 10911
 
3746 Serge 10912
	kfree(pipe_config);
3480 Serge 10913
	kfree(saved_mode);
3031 serge 10914
	return ret;
2330 Serge 10915
}
2327 Serge 10916
 
5354 serge 10917
static int intel_set_mode_pipes(struct drm_crtc *crtc,
3746 Serge 10918
		     struct drm_display_mode *mode,
5354 serge 10919
				int x, int y, struct drm_framebuffer *fb,
10920
				struct intel_crtc_config *pipe_config,
10921
				unsigned modeset_pipes,
10922
				unsigned prepare_pipes,
10923
				unsigned disable_pipes)
3746 Serge 10924
{
10925
	int ret;
10926
 
5354 serge 10927
	ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
10928
			       prepare_pipes, disable_pipes);
3746 Serge 10929
 
10930
	if (ret == 0)
10931
		intel_modeset_check_state(crtc->dev);
10932
 
10933
	return ret;
10934
}
10935
 
5354 serge 10936
static int intel_set_mode(struct drm_crtc *crtc,
10937
			  struct drm_display_mode *mode,
10938
			  int x, int y, struct drm_framebuffer *fb)
10939
{
10940
	struct intel_crtc_config *pipe_config;
10941
	unsigned modeset_pipes, prepare_pipes, disable_pipes;
10942
 
10943
	pipe_config = intel_modeset_compute_config(crtc, mode, fb,
10944
						   &modeset_pipes,
10945
						   &prepare_pipes,
10946
						   &disable_pipes);
10947
 
10948
	if (IS_ERR(pipe_config))
10949
		return PTR_ERR(pipe_config);
10950
 
10951
	return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
10952
				    modeset_pipes, prepare_pipes,
10953
				    disable_pipes);
10954
}
10955
 
3480 Serge 10956
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10957
{
5060 serge 10958
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
3480 Serge 10959
}
10960
 
3031 serge 10961
#undef for_each_intel_crtc_masked
2327 Serge 10962
 
3031 serge 10963
static void intel_set_config_free(struct intel_set_config *config)
10964
{
10965
	if (!config)
10966
		return;
10967
 
10968
	kfree(config->save_connector_encoders);
10969
	kfree(config->save_encoder_crtcs);
5060 serge 10970
	kfree(config->save_crtc_enabled);
3031 serge 10971
	kfree(config);
10972
}
10973
 
10974
static int intel_set_config_save_state(struct drm_device *dev,
10975
				       struct intel_set_config *config)
10976
{
5060 serge 10977
	struct drm_crtc *crtc;
3031 serge 10978
	struct drm_encoder *encoder;
10979
	struct drm_connector *connector;
10980
	int count;
10981
 
5060 serge 10982
	config->save_crtc_enabled =
10983
		kcalloc(dev->mode_config.num_crtc,
10984
			sizeof(bool), GFP_KERNEL);
10985
	if (!config->save_crtc_enabled)
10986
		return -ENOMEM;
10987
 
3031 serge 10988
	config->save_encoder_crtcs =
10989
		kcalloc(dev->mode_config.num_encoder,
10990
			sizeof(struct drm_crtc *), GFP_KERNEL);
10991
	if (!config->save_encoder_crtcs)
10992
		return -ENOMEM;
10993
 
10994
	config->save_connector_encoders =
10995
		kcalloc(dev->mode_config.num_connector,
10996
			sizeof(struct drm_encoder *), GFP_KERNEL);
10997
	if (!config->save_connector_encoders)
10998
		return -ENOMEM;
10999
 
11000
	/* Copy data. Note that driver private data is not affected.
11001
	 * Should anything bad happen only the expected state is
11002
	 * restored, not the drivers personal bookkeeping.
11003
	 */
11004
	count = 0;
5060 serge 11005
	for_each_crtc(dev, crtc) {
11006
		config->save_crtc_enabled[count++] = crtc->enabled;
11007
	}
11008
 
11009
	count = 0;
3031 serge 11010
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
11011
		config->save_encoder_crtcs[count++] = encoder->crtc;
11012
	}
11013
 
11014
	count = 0;
11015
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11016
		config->save_connector_encoders[count++] = connector->encoder;
11017
	}
11018
 
11019
	return 0;
11020
}
11021
 
11022
static void intel_set_config_restore_state(struct drm_device *dev,
11023
					   struct intel_set_config *config)
11024
{
5060 serge 11025
	struct intel_crtc *crtc;
3031 serge 11026
	struct intel_encoder *encoder;
11027
	struct intel_connector *connector;
11028
	int count;
11029
 
11030
	count = 0;
5060 serge 11031
	for_each_intel_crtc(dev, crtc) {
11032
		crtc->new_enabled = config->save_crtc_enabled[count++];
11033
 
11034
		if (crtc->new_enabled)
11035
			crtc->new_config = &crtc->config;
11036
		else
11037
			crtc->new_config = NULL;
11038
	}
11039
 
11040
	count = 0;
5354 serge 11041
	for_each_intel_encoder(dev, encoder) {
3031 serge 11042
		encoder->new_crtc =
11043
			to_intel_crtc(config->save_encoder_crtcs[count++]);
11044
	}
11045
 
11046
	count = 0;
11047
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11048
		connector->new_encoder =
11049
			to_intel_encoder(config->save_connector_encoders[count++]);
11050
	}
11051
}
11052
 
3746 Serge 11053
static bool
4104 Serge 11054
is_crtc_connector_off(struct drm_mode_set *set)
3746 Serge 11055
{
11056
	int i;
11057
 
4104 Serge 11058
	if (set->num_connectors == 0)
11059
		return false;
11060
 
11061
	if (WARN_ON(set->connectors == NULL))
11062
		return false;
11063
 
11064
	for (i = 0; i < set->num_connectors; i++)
11065
		if (set->connectors[i]->encoder &&
11066
		    set->connectors[i]->encoder->crtc == set->crtc &&
11067
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
3746 Serge 11068
			return true;
11069
 
11070
	return false;
11071
}
11072
 
3031 serge 11073
static void
11074
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
11075
				      struct intel_set_config *config)
11076
{
11077
 
11078
	/* We should be able to check here if the fb has the same properties
11079
	 * and then just flip_or_move it */
4104 Serge 11080
	if (is_crtc_connector_off(set)) {
3746 Serge 11081
			config->mode_changed = true;
5060 serge 11082
	} else if (set->crtc->primary->fb != set->fb) {
11083
		/*
11084
		 * If we have no fb, we can only flip as long as the crtc is
11085
		 * active, otherwise we need a full mode set.  The crtc may
11086
		 * be active if we've only disabled the primary plane, or
11087
		 * in fastboot situations.
11088
		 */
11089
		if (set->crtc->primary->fb == NULL) {
4104 Serge 11090
			struct intel_crtc *intel_crtc =
11091
				to_intel_crtc(set->crtc);
11092
 
5060 serge 11093
			if (intel_crtc->active) {
4104 Serge 11094
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
11095
				config->fb_changed = true;
11096
			} else {
11097
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
3031 serge 11098
			config->mode_changed = true;
4104 Serge 11099
			}
3031 serge 11100
		} else if (set->fb == NULL) {
11101
			config->mode_changed = true;
3746 Serge 11102
		} else if (set->fb->pixel_format !=
5060 serge 11103
			   set->crtc->primary->fb->pixel_format) {
3031 serge 11104
			config->mode_changed = true;
3746 Serge 11105
		} else {
3031 serge 11106
			config->fb_changed = true;
11107
	}
3746 Serge 11108
	}
3031 serge 11109
 
11110
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
11111
		config->fb_changed = true;
11112
 
11113
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
11114
		DRM_DEBUG_KMS("modes are different, full mode set\n");
11115
		drm_mode_debug_printmodeline(&set->crtc->mode);
11116
		drm_mode_debug_printmodeline(set->mode);
11117
		config->mode_changed = true;
11118
	}
4104 Serge 11119
 
11120
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
11121
			set->crtc->base.id, config->mode_changed, config->fb_changed);
3031 serge 11122
}
11123
 
11124
static int
11125
intel_modeset_stage_output_state(struct drm_device *dev,
11126
				 struct drm_mode_set *set,
11127
				 struct intel_set_config *config)
11128
{
11129
	struct intel_connector *connector;
11130
	struct intel_encoder *encoder;
5060 serge 11131
	struct intel_crtc *crtc;
4104 Serge 11132
	int ro;
3031 serge 11133
 
3480 Serge 11134
	/* The upper layers ensure that we either disable a crtc or have a list
3031 serge 11135
	 * of connectors. For paranoia, double-check this. */
11136
	WARN_ON(!set->fb && (set->num_connectors != 0));
11137
	WARN_ON(set->fb && (set->num_connectors == 0));
11138
 
11139
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11140
			    base.head) {
11141
		/* Otherwise traverse passed in connector list and get encoders
11142
		 * for them. */
11143
		for (ro = 0; ro < set->num_connectors; ro++) {
11144
			if (set->connectors[ro] == &connector->base) {
5060 serge 11145
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
3031 serge 11146
				break;
11147
			}
11148
		}
11149
 
11150
		/* If we disable the crtc, disable all its connectors. Also, if
11151
		 * the connector is on the changing crtc but not on the new
11152
		 * connector list, disable it. */
11153
		if ((!set->fb || ro == set->num_connectors) &&
11154
		    connector->base.encoder &&
11155
		    connector->base.encoder->crtc == set->crtc) {
11156
			connector->new_encoder = NULL;
11157
 
11158
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
11159
				connector->base.base.id,
5060 serge 11160
				connector->base.name);
3031 serge 11161
		}
11162
 
11163
 
11164
		if (&connector->new_encoder->base != connector->base.encoder) {
11165
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
11166
			config->mode_changed = true;
11167
		}
11168
	}
11169
	/* connector->new_encoder is now updated for all connectors. */
11170
 
11171
	/* Update crtc of enabled connectors. */
11172
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11173
			    base.head) {
5060 serge 11174
		struct drm_crtc *new_crtc;
11175
 
3031 serge 11176
		if (!connector->new_encoder)
11177
			continue;
11178
 
11179
		new_crtc = connector->new_encoder->base.crtc;
11180
 
11181
		for (ro = 0; ro < set->num_connectors; ro++) {
11182
			if (set->connectors[ro] == &connector->base)
11183
				new_crtc = set->crtc;
11184
		}
11185
 
11186
		/* Make sure the new CRTC will work with the encoder */
4560 Serge 11187
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
3031 serge 11188
					   new_crtc)) {
11189
			return -EINVAL;
11190
		}
5060 serge 11191
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
3031 serge 11192
 
11193
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11194
			connector->base.base.id,
5060 serge 11195
			connector->base.name,
3031 serge 11196
			new_crtc->base.id);
11197
	}
11198
 
11199
	/* Check for any encoders that needs to be disabled. */
5354 serge 11200
	for_each_intel_encoder(dev, encoder) {
4560 Serge 11201
		int num_connectors = 0;
3031 serge 11202
		list_for_each_entry(connector,
11203
				    &dev->mode_config.connector_list,
11204
				    base.head) {
11205
			if (connector->new_encoder == encoder) {
11206
				WARN_ON(!connector->new_encoder->new_crtc);
4560 Serge 11207
				num_connectors++;
3031 serge 11208
			}
11209
		}
4560 Serge 11210
 
11211
		if (num_connectors == 0)
3031 serge 11212
		encoder->new_crtc = NULL;
4560 Serge 11213
		else if (num_connectors > 1)
11214
			return -EINVAL;
11215
 
3031 serge 11216
		/* Only now check for crtc changes so we don't miss encoders
11217
		 * that will be disabled. */
11218
		if (&encoder->new_crtc->base != encoder->base.crtc) {
11219
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
11220
			config->mode_changed = true;
11221
		}
11222
	}
11223
	/* Now we've also updated encoder->new_crtc for all encoders. */
5060 serge 11224
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11225
			    base.head) {
11226
		if (connector->new_encoder)
11227
			if (connector->new_encoder != connector->encoder)
11228
				connector->encoder = connector->new_encoder;
11229
	}
11230
	for_each_intel_crtc(dev, crtc) {
11231
		crtc->new_enabled = false;
3031 serge 11232
 
5354 serge 11233
		for_each_intel_encoder(dev, encoder) {
5060 serge 11234
			if (encoder->new_crtc == crtc) {
11235
				crtc->new_enabled = true;
11236
				break;
11237
			}
11238
		}
11239
 
11240
		if (crtc->new_enabled != crtc->base.enabled) {
11241
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11242
				      crtc->new_enabled ? "en" : "dis");
11243
			config->mode_changed = true;
11244
		}
11245
 
11246
		if (crtc->new_enabled)
11247
			crtc->new_config = &crtc->config;
11248
		else
11249
			crtc->new_config = NULL;
11250
	}
11251
 
3031 serge 11252
	return 0;
11253
}
11254
 
5060 serge 11255
static void disable_crtc_nofb(struct intel_crtc *crtc)
11256
{
11257
	struct drm_device *dev = crtc->base.dev;
11258
	struct intel_encoder *encoder;
11259
	struct intel_connector *connector;
11260
 
11261
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11262
		      pipe_name(crtc->pipe));
11263
 
11264
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11265
		if (connector->new_encoder &&
11266
		    connector->new_encoder->new_crtc == crtc)
11267
			connector->new_encoder = NULL;
11268
	}
11269
 
5354 serge 11270
	for_each_intel_encoder(dev, encoder) {
5060 serge 11271
		if (encoder->new_crtc == crtc)
11272
			encoder->new_crtc = NULL;
11273
	}
11274
 
11275
	crtc->new_enabled = false;
11276
	crtc->new_config = NULL;
11277
}
11278
 
3031 serge 11279
static int intel_crtc_set_config(struct drm_mode_set *set)
11280
{
11281
	struct drm_device *dev;
11282
	struct drm_mode_set save_set;
11283
	struct intel_set_config *config;
5354 serge 11284
	struct intel_crtc_config *pipe_config;
11285
	unsigned modeset_pipes, prepare_pipes, disable_pipes;
3031 serge 11286
	int ret;
11287
 
11288
	BUG_ON(!set);
11289
	BUG_ON(!set->crtc);
11290
	BUG_ON(!set->crtc->helper_private);
11291
 
3480 Serge 11292
	/* Enforce sane interface api - has been abused by the fb helper. */
11293
	BUG_ON(!set->mode && set->fb);
11294
	BUG_ON(set->fb && set->num_connectors == 0);
3031 serge 11295
 
11296
	if (set->fb) {
11297
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11298
				set->crtc->base.id, set->fb->base.id,
11299
				(int)set->num_connectors, set->x, set->y);
11300
	} else {
11301
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11302
	}
11303
 
11304
	dev = set->crtc->dev;
11305
 
11306
	ret = -ENOMEM;
11307
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11308
	if (!config)
11309
		goto out_config;
11310
 
11311
	ret = intel_set_config_save_state(dev, config);
11312
	if (ret)
11313
		goto out_config;
11314
 
11315
	save_set.crtc = set->crtc;
11316
	save_set.mode = &set->crtc->mode;
11317
	save_set.x = set->crtc->x;
11318
	save_set.y = set->crtc->y;
5060 serge 11319
	save_set.fb = set->crtc->primary->fb;
3031 serge 11320
 
11321
	/* Compute whether we need a full modeset, only an fb base update or no
11322
	 * change at all. In the future we might also check whether only the
11323
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11324
	 * such cases. */
11325
	intel_set_config_compute_mode_changes(set, config);
11326
 
11327
	ret = intel_modeset_stage_output_state(dev, set, config);
11328
	if (ret)
11329
		goto fail;
11330
 
5354 serge 11331
	pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
11332
						   set->fb,
11333
						   &modeset_pipes,
11334
						   &prepare_pipes,
11335
						   &disable_pipes);
11336
	if (IS_ERR(pipe_config)) {
11337
		ret = PTR_ERR(pipe_config);
11338
		goto fail;
11339
	} else if (pipe_config) {
11340
		if (pipe_config->has_audio !=
11341
		    to_intel_crtc(set->crtc)->config.has_audio)
11342
			config->mode_changed = true;
11343
 
11344
		/*
11345
		 * Note we have an issue here with infoframes: current code
11346
		 * only updates them on the full mode set path per hw
11347
		 * requirements.  So here we should be checking for any
11348
		 * required changes and forcing a mode set.
11349
		 */
11350
	}
11351
 
11352
	/* set_mode will free it in the mode_changed case */
11353
	if (!config->mode_changed)
11354
		kfree(pipe_config);
11355
 
11356
	intel_update_pipe_size(to_intel_crtc(set->crtc));
11357
 
3031 serge 11358
	if (config->mode_changed) {
5354 serge 11359
		ret = intel_set_mode_pipes(set->crtc, set->mode,
11360
					   set->x, set->y, set->fb, pipe_config,
11361
					   modeset_pipes, prepare_pipes,
11362
					   disable_pipes);
3031 serge 11363
	} else if (config->fb_changed) {
5060 serge 11364
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
3746 Serge 11365
 
5354 serge 11366
//       intel_crtc_wait_for_pending_flips(set->crtc);
5060 serge 11367
 
3031 serge 11368
		ret = intel_pipe_set_base(set->crtc,
11369
					  set->x, set->y, set->fb);
5060 serge 11370
 
4560 Serge 11371
		/*
5060 serge 11372
		 * We need to make sure the primary plane is re-enabled if it
11373
		 * has previously been turned off.
11374
		 */
11375
		if (!intel_crtc->primary_enabled && ret == 0) {
11376
			WARN_ON(!intel_crtc->active);
5354 serge 11377
			intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
5060 serge 11378
		}
11379
 
11380
		/*
4560 Serge 11381
		 * In the fastboot case this may be our only check of the
11382
		 * state after boot.  It would be better to only do it on
11383
		 * the first update, but we don't have a nice way of doing that
11384
		 * (and really, set_config isn't used much for high freq page
11385
		 * flipping, so increasing its cost here shouldn't be a big
11386
		 * deal).
11387
		 */
5060 serge 11388
		if (i915.fastboot && ret == 0)
4560 Serge 11389
			intel_modeset_check_state(set->crtc->dev);
3031 serge 11390
	}
11391
 
3746 Serge 11392
	if (ret) {
4104 Serge 11393
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
3746 Serge 11394
			  set->crtc->base.id, ret);
3031 serge 11395
fail:
11396
	intel_set_config_restore_state(dev, config);
11397
 
5060 serge 11398
		/*
11399
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11400
		 * force the pipe off to avoid oopsing in the modeset code
11401
		 * due to fb==NULL. This should only happen during boot since
11402
		 * we don't yet reconstruct the FB from the hardware state.
11403
		 */
11404
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11405
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11406
 
3031 serge 11407
	/* Try to restore the config */
11408
	if (config->mode_changed &&
3480 Serge 11409
	    intel_set_mode(save_set.crtc, save_set.mode,
3031 serge 11410
			    save_set.x, save_set.y, save_set.fb))
11411
		DRM_ERROR("failed to restore config after modeset failure\n");
3746 Serge 11412
	}
3031 serge 11413
 
11414
out_config:
11415
	intel_set_config_free(config);
11416
	return ret;
11417
}
11418
 
2330 Serge 11419
static const struct drm_crtc_funcs intel_crtc_funcs = {
11420
	.gamma_set = intel_crtc_gamma_set,
3031 serge 11421
	.set_config = intel_crtc_set_config,
2330 Serge 11422
	.destroy = intel_crtc_destroy,
11423
//	.page_flip = intel_crtc_page_flip,
11424
};
2327 Serge 11425
 
4104 Serge 11426
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11427
				      struct intel_shared_dpll *pll,
11428
				      struct intel_dpll_hw_state *hw_state)
3031 serge 11429
{
4104 Serge 11430
	uint32_t val;
3031 serge 11431
 
5354 serge 11432
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
5060 serge 11433
		return false;
11434
 
4104 Serge 11435
	val = I915_READ(PCH_DPLL(pll->id));
11436
	hw_state->dpll = val;
11437
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11438
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11439
 
11440
	return val & DPLL_VCO_ENABLE;
11441
}
11442
 
11443
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11444
				  struct intel_shared_dpll *pll)
11445
{
5354 serge 11446
	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
11447
	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
4104 Serge 11448
}
11449
 
11450
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11451
				struct intel_shared_dpll *pll)
11452
{
11453
	/* PCH refclock must be enabled first */
4560 Serge 11454
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 11455
 
5354 serge 11456
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 11457
 
11458
	/* Wait for the clocks to stabilize. */
11459
	POSTING_READ(PCH_DPLL(pll->id));
11460
	udelay(150);
11461
 
11462
	/* The pixel multiplier can only be updated once the
11463
	 * DPLL is enabled and the clocks are stable.
11464
	 *
11465
	 * So write it again.
11466
	 */
5354 serge 11467
	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
4104 Serge 11468
	POSTING_READ(PCH_DPLL(pll->id));
11469
	udelay(200);
11470
}
11471
 
11472
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11473
				 struct intel_shared_dpll *pll)
11474
{
11475
	struct drm_device *dev = dev_priv->dev;
11476
	struct intel_crtc *crtc;
11477
 
11478
	/* Make sure no transcoder isn't still depending on us. */
5060 serge 11479
	for_each_intel_crtc(dev, crtc) {
4104 Serge 11480
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11481
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 11482
	}
11483
 
4104 Serge 11484
	I915_WRITE(PCH_DPLL(pll->id), 0);
11485
	POSTING_READ(PCH_DPLL(pll->id));
11486
	udelay(200);
11487
}
11488
 
11489
static char *ibx_pch_dpll_names[] = {
11490
	"PCH DPLL A",
11491
	"PCH DPLL B",
11492
};
11493
 
11494
static void ibx_pch_dpll_init(struct drm_device *dev)
11495
{
11496
	struct drm_i915_private *dev_priv = dev->dev_private;
11497
	int i;
11498
 
11499
	dev_priv->num_shared_dpll = 2;
11500
 
11501
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11502
		dev_priv->shared_dplls[i].id = i;
11503
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11504
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11505
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11506
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11507
		dev_priv->shared_dplls[i].get_hw_state =
11508
			ibx_pch_dpll_get_hw_state;
3031 serge 11509
	}
11510
}
11511
 
4104 Serge 11512
static void intel_shared_dpll_init(struct drm_device *dev)
11513
{
11514
	struct drm_i915_private *dev_priv = dev->dev_private;
11515
 
5060 serge 11516
	if (HAS_DDI(dev))
11517
		intel_ddi_pll_init(dev);
11518
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4104 Serge 11519
		ibx_pch_dpll_init(dev);
11520
	else
11521
		dev_priv->num_shared_dpll = 0;
11522
 
11523
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11524
}
11525
 
5060 serge 11526
static int
11527
intel_primary_plane_disable(struct drm_plane *plane)
11528
{
11529
	struct drm_device *dev = plane->dev;
11530
	struct intel_crtc *intel_crtc;
11531
 
11532
	if (!plane->fb)
11533
		return 0;
11534
 
11535
	BUG_ON(!plane->crtc);
11536
 
11537
	intel_crtc = to_intel_crtc(plane->crtc);
11538
 
11539
	/*
11540
	 * Even though we checked plane->fb above, it's still possible that
11541
	 * the primary plane has been implicitly disabled because the crtc
11542
	 * coordinates given weren't visible, or because we detected
11543
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11544
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11545
	 * In either case, we need to unpin the FB and let the fb pointer get
11546
	 * updated, but otherwise we don't need to touch the hardware.
11547
	 */
11548
	if (!intel_crtc->primary_enabled)
11549
		goto disable_unpin;
11550
 
5354 serge 11551
//   intel_crtc_wait_for_pending_flips(plane->crtc);
11552
	intel_disable_primary_hw_plane(plane, plane->crtc);
11553
 
5060 serge 11554
disable_unpin:
11555
	mutex_lock(&dev->struct_mutex);
11556
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11557
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11558
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11559
	mutex_unlock(&dev->struct_mutex);
11560
	plane->fb = NULL;
11561
 
11562
	return 0;
11563
}
11564
 
11565
static int
5354 serge 11566
intel_check_primary_plane(struct drm_plane *plane,
11567
			  struct intel_plane_state *state)
5060 serge 11568
{
5354 serge 11569
	struct drm_crtc *crtc = state->crtc;
11570
	struct drm_framebuffer *fb = state->fb;
11571
	struct drm_rect *dest = &state->dst;
11572
	struct drm_rect *src = &state->src;
11573
	const struct drm_rect *clip = &state->clip;
11574
 
11575
	return drm_plane_helper_check_update(plane, crtc, fb,
11576
					     src, dest, clip,
11577
					     DRM_PLANE_HELPER_NO_SCALING,
11578
					     DRM_PLANE_HELPER_NO_SCALING,
11579
					     false, true, &state->visible);
11580
}
11581
 
11582
static int
11583
intel_prepare_primary_plane(struct drm_plane *plane,
11584
			    struct intel_plane_state *state)
11585
{
11586
	struct drm_crtc *crtc = state->crtc;
11587
	struct drm_framebuffer *fb = state->fb;
5060 serge 11588
	struct drm_device *dev = crtc->dev;
11589
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5354 serge 11590
	enum pipe pipe = intel_crtc->pipe;
5060 serge 11591
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11592
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11593
	int ret;
11594
 
11595
 
11596
 
5354 serge 11597
	if (old_obj != obj) {
5060 serge 11598
		mutex_lock(&dev->struct_mutex);
5354 serge 11599
		ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
11600
		if (ret == 0)
11601
			i915_gem_track_fb(old_obj, obj,
11602
					  INTEL_FRONTBUFFER_PRIMARY(pipe));
11603
		mutex_unlock(&dev->struct_mutex);
11604
		if (ret != 0) {
11605
			DRM_DEBUG_KMS("pin & fence failed\n");
11606
		return ret;
11607
		}
11608
	}
5060 serge 11609
 
5354 serge 11610
	return 0;
11611
}
11612
 
11613
static void
11614
intel_commit_primary_plane(struct drm_plane *plane,
11615
			   struct intel_plane_state *state)
11616
{
11617
	struct drm_crtc *crtc = state->crtc;
11618
	struct drm_framebuffer *fb = state->fb;
11619
	struct drm_device *dev = crtc->dev;
11620
	struct drm_i915_private *dev_priv = dev->dev_private;
11621
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11622
	enum pipe pipe = intel_crtc->pipe;
11623
	struct drm_framebuffer *old_fb = plane->fb;
11624
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11625
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11626
	struct intel_plane *intel_plane = to_intel_plane(plane);
11627
	struct drm_rect *src = &state->src;
11628
 
11629
	crtc->primary->fb = fb;
11630
	crtc->x = src->x1 >> 16;
11631
	crtc->y = src->y1 >> 16;
11632
 
11633
	intel_plane->crtc_x = state->orig_dst.x1;
11634
	intel_plane->crtc_y = state->orig_dst.y1;
11635
	intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11636
	intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11637
	intel_plane->src_x = state->orig_src.x1;
11638
	intel_plane->src_y = state->orig_src.y1;
11639
	intel_plane->src_w = drm_rect_width(&state->orig_src);
11640
	intel_plane->src_h = drm_rect_height(&state->orig_src);
11641
	intel_plane->obj = obj;
11642
 
11643
	if (intel_crtc->active) {
5060 serge 11644
		/*
5354 serge 11645
		 * FBC does not work on some platforms for rotated
11646
		 * planes, so disable it when rotation is not 0 and
11647
		 * update it when rotation is set back to 0.
11648
		 *
11649
		 * FIXME: This is redundant with the fbc update done in
11650
		 * the primary plane enable function except that that
11651
		 * one is done too late. We eventually need to unify
11652
		 * this.
5060 serge 11653
		 */
5354 serge 11654
		if (intel_crtc->primary_enabled &&
11655
		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11656
		    dev_priv->fbc.plane == intel_crtc->plane &&
11657
		    intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11658
			intel_disable_fbc(dev);
11659
		}
5060 serge 11660
 
5354 serge 11661
		if (state->visible) {
11662
			bool was_enabled = intel_crtc->primary_enabled;
5060 serge 11663
 
5354 serge 11664
			/* FIXME: kill this fastboot hack */
11665
			intel_update_pipe_size(intel_crtc);
5060 serge 11666
 
5354 serge 11667
			intel_crtc->primary_enabled = true;
5060 serge 11668
 
5354 serge 11669
			dev_priv->display.update_primary_plane(crtc, plane->fb,
11670
					crtc->x, crtc->y);
5060 serge 11671
 
11672
	/*
5354 serge 11673
			 * BDW signals flip done immediately if the plane
11674
			 * is disabled, even if the plane enable is already
11675
			 * armed to occur at the next vblank :(
11676
			 */
11677
			if (IS_BROADWELL(dev) && !was_enabled)
11678
				intel_wait_for_vblank(dev, intel_crtc->pipe);
11679
		} else {
11680
			/*
11681
			 * If clipping results in a non-visible primary plane,
11682
			 * we'll disable the primary plane.  Note that this is
11683
			 * a bit different than what happens if userspace
11684
			 * explicitly disables the plane by passing fb=0
5060 serge 11685
	 * because plane->fb still gets set and pinned.
11686
	 */
5354 serge 11687
			intel_disable_primary_hw_plane(plane, crtc);
11688
		}
11689
 
11690
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
11691
 
5060 serge 11692
		mutex_lock(&dev->struct_mutex);
5354 serge 11693
		intel_update_fbc(dev);
11694
		mutex_unlock(&dev->struct_mutex);
11695
	}
5060 serge 11696
 
5354 serge 11697
	if (old_fb && old_fb != fb) {
11698
		if (intel_crtc->active)
11699
			intel_wait_for_vblank(dev, intel_crtc->pipe);
11700
 
11701
		mutex_lock(&dev->struct_mutex);
11702
		intel_unpin_fb_obj(old_obj);
5060 serge 11703
				mutex_unlock(&dev->struct_mutex);
11704
		}
5354 serge 11705
}
5060 serge 11706
 
5354 serge 11707
static int
11708
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11709
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11710
			     unsigned int crtc_w, unsigned int crtc_h,
11711
			     uint32_t src_x, uint32_t src_y,
11712
			     uint32_t src_w, uint32_t src_h)
11713
{
11714
	struct intel_plane_state state;
11715
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11716
	int ret;
5060 serge 11717
 
5354 serge 11718
	state.crtc = crtc;
11719
	state.fb = fb;
5060 serge 11720
 
5354 serge 11721
	/* sample coordinates in 16.16 fixed point */
11722
	state.src.x1 = src_x;
11723
	state.src.x2 = src_x + src_w;
11724
	state.src.y1 = src_y;
11725
	state.src.y2 = src_y + src_h;
5060 serge 11726
 
5354 serge 11727
	/* integer pixels */
11728
	state.dst.x1 = crtc_x;
11729
	state.dst.x2 = crtc_x + crtc_w;
11730
	state.dst.y1 = crtc_y;
11731
	state.dst.y2 = crtc_y + crtc_h;
5060 serge 11732
 
5354 serge 11733
	state.clip.x1 = 0;
11734
	state.clip.y1 = 0;
11735
	state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11736
	state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
5060 serge 11737
 
5354 serge 11738
	state.orig_src = state.src;
11739
	state.orig_dst = state.dst;
5060 serge 11740
 
5354 serge 11741
	ret = intel_check_primary_plane(plane, &state);
5060 serge 11742
	if (ret)
11743
		return ret;
11744
 
5354 serge 11745
	ret = intel_prepare_primary_plane(plane, &state);
11746
	if (ret)
11747
		return ret;
5060 serge 11748
 
5354 serge 11749
	intel_commit_primary_plane(plane, &state);
11750
 
5060 serge 11751
	return 0;
11752
}
11753
 
11754
/* Common destruction function for both primary and cursor planes */
11755
static void intel_plane_destroy(struct drm_plane *plane)
11756
{
11757
	struct intel_plane *intel_plane = to_intel_plane(plane);
11758
	drm_plane_cleanup(plane);
11759
	kfree(intel_plane);
11760
}
11761
 
11762
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11763
	.update_plane = intel_primary_plane_setplane,
11764
	.disable_plane = intel_primary_plane_disable,
11765
	.destroy = intel_plane_destroy,
5354 serge 11766
	.set_property = intel_plane_set_property
5060 serge 11767
};
11768
 
11769
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11770
						    int pipe)
11771
{
11772
	struct intel_plane *primary;
11773
	const uint32_t *intel_primary_formats;
11774
	int num_formats;
11775
 
11776
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11777
	if (primary == NULL)
11778
		return NULL;
11779
 
11780
	primary->can_scale = false;
11781
	primary->max_downscale = 1;
11782
	primary->pipe = pipe;
11783
	primary->plane = pipe;
5354 serge 11784
	primary->rotation = BIT(DRM_ROTATE_0);
5060 serge 11785
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11786
		primary->plane = !pipe;
11787
 
11788
	if (INTEL_INFO(dev)->gen <= 3) {
11789
		intel_primary_formats = intel_primary_formats_gen2;
11790
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11791
	} else {
11792
		intel_primary_formats = intel_primary_formats_gen4;
11793
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11794
	}
11795
 
11796
	drm_universal_plane_init(dev, &primary->base, 0,
11797
				 &intel_primary_plane_funcs,
11798
				 intel_primary_formats, num_formats,
11799
				 DRM_PLANE_TYPE_PRIMARY);
5354 serge 11800
 
11801
	if (INTEL_INFO(dev)->gen >= 4) {
11802
		if (!dev->mode_config.rotation_property)
11803
			dev->mode_config.rotation_property =
11804
				drm_mode_create_rotation_property(dev,
11805
							BIT(DRM_ROTATE_0) |
11806
							BIT(DRM_ROTATE_180));
11807
		if (dev->mode_config.rotation_property)
11808
			drm_object_attach_property(&primary->base.base,
11809
				dev->mode_config.rotation_property,
11810
				primary->rotation);
11811
	}
11812
 
5060 serge 11813
	return &primary->base;
11814
}
11815
 
11816
static int
11817
intel_cursor_plane_disable(struct drm_plane *plane)
11818
{
11819
	if (!plane->fb)
11820
		return 0;
11821
 
11822
	BUG_ON(!plane->crtc);
11823
 
11824
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11825
}
11826
 
11827
static int
5354 serge 11828
intel_check_cursor_plane(struct drm_plane *plane,
11829
			 struct intel_plane_state *state)
5060 serge 11830
{
5354 serge 11831
	struct drm_crtc *crtc = state->crtc;
11832
	struct drm_device *dev = crtc->dev;
11833
	struct drm_framebuffer *fb = state->fb;
11834
	struct drm_rect *dest = &state->dst;
11835
	struct drm_rect *src = &state->src;
11836
	const struct drm_rect *clip = &state->clip;
11837
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11838
	int crtc_w, crtc_h;
11839
	unsigned stride;
5060 serge 11840
	int ret;
11841
 
11842
	ret = drm_plane_helper_check_update(plane, crtc, fb,
5354 serge 11843
					    src, dest, clip,
5060 serge 11844
					    DRM_PLANE_HELPER_NO_SCALING,
11845
					    DRM_PLANE_HELPER_NO_SCALING,
5354 serge 11846
					    true, true, &state->visible);
5060 serge 11847
	if (ret)
11848
		return ret;
11849
 
5354 serge 11850
 
11851
	/* if we want to turn off the cursor ignore width and height */
11852
	if (!obj)
11853
		return 0;
11854
 
11855
	/* Check for which cursor types we support */
11856
	crtc_w = drm_rect_width(&state->orig_dst);
11857
	crtc_h = drm_rect_height(&state->orig_dst);
11858
	if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
11859
		DRM_DEBUG("Cursor dimension not supported\n");
11860
		return -EINVAL;
11861
	}
11862
 
11863
	stride = roundup_pow_of_two(crtc_w) * 4;
11864
	if (obj->base.size < stride * crtc_h) {
11865
		DRM_DEBUG_KMS("buffer is too small\n");
11866
		return -ENOMEM;
11867
	}
11868
 
11869
	if (fb == crtc->cursor->fb)
11870
		return 0;
11871
 
11872
	/* we only need to pin inside GTT if cursor is non-phy */
11873
	mutex_lock(&dev->struct_mutex);
11874
	if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
11875
		DRM_DEBUG_KMS("cursor cannot be tiled\n");
11876
		ret = -EINVAL;
11877
	}
11878
	mutex_unlock(&dev->struct_mutex);
11879
 
11880
	return ret;
11881
}
11882
 
11883
static int
11884
intel_commit_cursor_plane(struct drm_plane *plane,
11885
			  struct intel_plane_state *state)
11886
{
11887
	struct drm_crtc *crtc = state->crtc;
11888
	struct drm_framebuffer *fb = state->fb;
11889
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11890
	struct intel_plane *intel_plane = to_intel_plane(plane);
11891
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11892
	struct drm_i915_gem_object *obj = intel_fb->obj;
11893
	int crtc_w, crtc_h;
11894
 
11895
	crtc->cursor_x = state->orig_dst.x1;
11896
	crtc->cursor_y = state->orig_dst.y1;
11897
 
11898
	intel_plane->crtc_x = state->orig_dst.x1;
11899
	intel_plane->crtc_y = state->orig_dst.y1;
11900
	intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11901
	intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11902
	intel_plane->src_x = state->orig_src.x1;
11903
	intel_plane->src_y = state->orig_src.y1;
11904
	intel_plane->src_w = drm_rect_width(&state->orig_src);
11905
	intel_plane->src_h = drm_rect_height(&state->orig_src);
11906
	intel_plane->obj = obj;
11907
 
5060 serge 11908
	if (fb != crtc->cursor->fb) {
5354 serge 11909
		crtc_w = drm_rect_width(&state->orig_dst);
11910
		crtc_h = drm_rect_height(&state->orig_dst);
5060 serge 11911
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11912
	} else {
5354 serge 11913
		intel_crtc_update_cursor(crtc, state->visible);
11914
 
11915
 
5060 serge 11916
		return 0;
11917
	}
11918
}
5354 serge 11919
 
11920
static int
11921
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11922
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11923
			  unsigned int crtc_w, unsigned int crtc_h,
11924
			  uint32_t src_x, uint32_t src_y,
11925
			  uint32_t src_w, uint32_t src_h)
11926
{
11927
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11928
	struct intel_plane_state state;
11929
	int ret;
11930
 
11931
	state.crtc = crtc;
11932
	state.fb = fb;
11933
 
11934
	/* sample coordinates in 16.16 fixed point */
11935
	state.src.x1 = src_x;
11936
	state.src.x2 = src_x + src_w;
11937
	state.src.y1 = src_y;
11938
	state.src.y2 = src_y + src_h;
11939
 
11940
	/* integer pixels */
11941
	state.dst.x1 = crtc_x;
11942
	state.dst.x2 = crtc_x + crtc_w;
11943
	state.dst.y1 = crtc_y;
11944
	state.dst.y2 = crtc_y + crtc_h;
11945
 
11946
	state.clip.x1 = 0;
11947
	state.clip.y1 = 0;
11948
	state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11949
	state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
11950
 
11951
	state.orig_src = state.src;
11952
	state.orig_dst = state.dst;
11953
 
11954
	ret = intel_check_cursor_plane(plane, &state);
11955
	if (ret)
11956
		return ret;
11957
 
11958
	return intel_commit_cursor_plane(plane, &state);
11959
}
11960
 
5060 serge 11961
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11962
	.update_plane = intel_cursor_plane_update,
11963
	.disable_plane = intel_cursor_plane_disable,
11964
	.destroy = intel_plane_destroy,
5354 serge 11965
	.set_property = intel_plane_set_property,
5060 serge 11966
};
11967
 
11968
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11969
						   int pipe)
11970
{
11971
	struct intel_plane *cursor;
11972
 
11973
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11974
	if (cursor == NULL)
11975
		return NULL;
11976
 
11977
	cursor->can_scale = false;
11978
	cursor->max_downscale = 1;
11979
	cursor->pipe = pipe;
11980
	cursor->plane = pipe;
5354 serge 11981
	cursor->rotation = BIT(DRM_ROTATE_0);
5060 serge 11982
 
11983
	drm_universal_plane_init(dev, &cursor->base, 0,
11984
				 &intel_cursor_plane_funcs,
11985
				 intel_cursor_formats,
11986
				 ARRAY_SIZE(intel_cursor_formats),
11987
				 DRM_PLANE_TYPE_CURSOR);
5354 serge 11988
 
11989
	if (INTEL_INFO(dev)->gen >= 4) {
11990
		if (!dev->mode_config.rotation_property)
11991
			dev->mode_config.rotation_property =
11992
				drm_mode_create_rotation_property(dev,
11993
							BIT(DRM_ROTATE_0) |
11994
							BIT(DRM_ROTATE_180));
11995
		if (dev->mode_config.rotation_property)
11996
			drm_object_attach_property(&cursor->base.base,
11997
				dev->mode_config.rotation_property,
11998
				cursor->rotation);
11999
	}
12000
 
5060 serge 12001
	return &cursor->base;
12002
}
12003
 
2330 Serge 12004
static void intel_crtc_init(struct drm_device *dev, int pipe)
12005
{
5060 serge 12006
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 12007
	struct intel_crtc *intel_crtc;
5060 serge 12008
	struct drm_plane *primary = NULL;
12009
	struct drm_plane *cursor = NULL;
12010
	int i, ret;
2327 Serge 12011
 
4560 Serge 12012
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 12013
	if (intel_crtc == NULL)
12014
		return;
2327 Serge 12015
 
5060 serge 12016
	primary = intel_primary_plane_create(dev, pipe);
12017
	if (!primary)
12018
		goto fail;
2327 Serge 12019
 
5060 serge 12020
	cursor = intel_cursor_plane_create(dev, pipe);
12021
	if (!cursor)
12022
		goto fail;
12023
 
12024
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
12025
					cursor, &intel_crtc_funcs);
12026
	if (ret)
12027
		goto fail;
12028
 
2330 Serge 12029
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
12030
	for (i = 0; i < 256; i++) {
12031
		intel_crtc->lut_r[i] = i;
12032
		intel_crtc->lut_g[i] = i;
12033
		intel_crtc->lut_b[i] = i;
12034
	}
2327 Serge 12035
 
4560 Serge 12036
	/*
12037
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
5060 serge 12038
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
4560 Serge 12039
	 */
2330 Serge 12040
	intel_crtc->pipe = pipe;
12041
	intel_crtc->plane = pipe;
4560 Serge 12042
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 12043
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
12044
		intel_crtc->plane = !pipe;
12045
	}
2327 Serge 12046
 
5060 serge 12047
	intel_crtc->cursor_base = ~0;
12048
	intel_crtc->cursor_cntl = ~0;
5354 serge 12049
	intel_crtc->cursor_size = ~0;
5060 serge 12050
 
2330 Serge 12051
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
12052
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
12053
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12054
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 12055
 
2330 Serge 12056
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5060 serge 12057
 
12058
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
12059
	return;
12060
 
12061
fail:
12062
	if (primary)
12063
		drm_plane_cleanup(primary);
12064
	if (cursor)
12065
		drm_plane_cleanup(cursor);
12066
	kfree(intel_crtc);
2330 Serge 12067
}
2327 Serge 12068
 
4560 Serge 12069
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12070
{
12071
	struct drm_encoder *encoder = connector->base.encoder;
5060 serge 12072
	struct drm_device *dev = connector->base.dev;
4560 Serge 12073
 
5060 serge 12074
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4560 Serge 12075
 
5354 serge 12076
	if (!encoder || WARN_ON(!encoder->crtc))
4560 Serge 12077
		return INVALID_PIPE;
12078
 
12079
	return to_intel_crtc(encoder->crtc)->pipe;
12080
}
12081
 
3031 serge 12082
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12083
				struct drm_file *file)
12084
{
12085
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5060 serge 12086
	struct drm_crtc *drmmode_crtc;
3031 serge 12087
	struct intel_crtc *crtc;
2327 Serge 12088
 
3482 Serge 12089
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
12090
		return -ENODEV;
12091
 
5060 serge 12092
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
2327 Serge 12093
 
5060 serge 12094
	if (!drmmode_crtc) {
3031 serge 12095
		DRM_ERROR("no such CRTC id\n");
4560 Serge 12096
		return -ENOENT;
3031 serge 12097
	}
2327 Serge 12098
 
5060 serge 12099
	crtc = to_intel_crtc(drmmode_crtc);
3031 serge 12100
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 12101
 
3031 serge 12102
	return 0;
12103
}
2327 Serge 12104
 
3031 serge 12105
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 12106
{
3031 serge 12107
	struct drm_device *dev = encoder->base.dev;
12108
	struct intel_encoder *source_encoder;
2330 Serge 12109
	int index_mask = 0;
12110
	int entry = 0;
2327 Serge 12111
 
5354 serge 12112
	for_each_intel_encoder(dev, source_encoder) {
5060 serge 12113
		if (encoders_cloneable(encoder, source_encoder))
2330 Serge 12114
			index_mask |= (1 << entry);
3031 serge 12115
 
2330 Serge 12116
		entry++;
12117
	}
2327 Serge 12118
 
2330 Serge 12119
	return index_mask;
12120
}
2327 Serge 12121
 
2330 Serge 12122
static bool has_edp_a(struct drm_device *dev)
12123
{
12124
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12125
 
2330 Serge 12126
	if (!IS_MOBILE(dev))
12127
		return false;
2327 Serge 12128
 
2330 Serge 12129
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
12130
		return false;
2327 Serge 12131
 
5060 serge 12132
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
2330 Serge 12133
		return false;
2327 Serge 12134
 
2330 Serge 12135
	return true;
12136
}
2327 Serge 12137
 
4560 Serge 12138
const char *intel_output_name(int output)
12139
{
12140
	static const char *names[] = {
12141
		[INTEL_OUTPUT_UNUSED] = "Unused",
12142
		[INTEL_OUTPUT_ANALOG] = "Analog",
12143
		[INTEL_OUTPUT_DVO] = "DVO",
12144
		[INTEL_OUTPUT_SDVO] = "SDVO",
12145
		[INTEL_OUTPUT_LVDS] = "LVDS",
12146
		[INTEL_OUTPUT_TVOUT] = "TV",
12147
		[INTEL_OUTPUT_HDMI] = "HDMI",
12148
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
12149
		[INTEL_OUTPUT_EDP] = "eDP",
12150
		[INTEL_OUTPUT_DSI] = "DSI",
12151
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
12152
	};
12153
 
12154
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
12155
		return "Invalid";
12156
 
12157
	return names[output];
12158
}
12159
 
5060 serge 12160
static bool intel_crt_present(struct drm_device *dev)
12161
{
12162
	struct drm_i915_private *dev_priv = dev->dev_private;
12163
 
5354 serge 12164
	if (INTEL_INFO(dev)->gen >= 9)
5060 serge 12165
		return false;
12166
 
5354 serge 12167
	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
12168
		return false;
12169
 
5060 serge 12170
	if (IS_CHERRYVIEW(dev))
12171
		return false;
12172
 
12173
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
12174
		return false;
12175
 
12176
	return true;
12177
}
12178
 
2330 Serge 12179
static void intel_setup_outputs(struct drm_device *dev)
12180
{
12181
	struct drm_i915_private *dev_priv = dev->dev_private;
12182
	struct intel_encoder *encoder;
12183
	bool dpd_is_edp = false;
2327 Serge 12184
 
4104 Serge 12185
	intel_lvds_init(dev);
2327 Serge 12186
 
5060 serge 12187
	if (intel_crt_present(dev))
2330 Serge 12188
	intel_crt_init(dev);
2327 Serge 12189
 
3480 Serge 12190
	if (HAS_DDI(dev)) {
2330 Serge 12191
		int found;
2327 Serge 12192
 
3031 serge 12193
		/* Haswell uses DDI functions to detect digital outputs */
12194
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
12195
		/* DDI A only supports eDP */
12196
		if (found)
12197
			intel_ddi_init(dev, PORT_A);
12198
 
12199
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
12200
		 * register */
12201
		found = I915_READ(SFUSE_STRAP);
12202
 
12203
		if (found & SFUSE_STRAP_DDIB_DETECTED)
12204
			intel_ddi_init(dev, PORT_B);
12205
		if (found & SFUSE_STRAP_DDIC_DETECTED)
12206
			intel_ddi_init(dev, PORT_C);
12207
		if (found & SFUSE_STRAP_DDID_DETECTED)
12208
			intel_ddi_init(dev, PORT_D);
12209
	} else if (HAS_PCH_SPLIT(dev)) {
12210
		int found;
4560 Serge 12211
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 12212
 
3243 Serge 12213
		if (has_edp_a(dev))
12214
			intel_dp_init(dev, DP_A, PORT_A);
12215
 
3746 Serge 12216
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 12217
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 12218
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 12219
			if (!found)
3746 Serge 12220
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 12221
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 12222
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 12223
		}
2327 Serge 12224
 
3746 Serge 12225
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
12226
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 12227
 
3746 Serge 12228
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
12229
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 12230
 
2330 Serge 12231
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 12232
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 12233
 
3243 Serge 12234
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 12235
			intel_dp_init(dev, PCH_DP_D, PORT_D);
12236
	} else if (IS_VALLEYVIEW(dev)) {
5354 serge 12237
		/*
12238
		 * The DP_DETECTED bit is the latched state of the DDC
12239
		 * SDA pin at boot. However since eDP doesn't require DDC
12240
		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
12241
		 * eDP ports may have been muxed to an alternate function.
12242
		 * Thus we can't rely on the DP_DETECTED bit alone to detect
12243
		 * eDP ports. Consult the VBT as well as DP_DETECTED to
12244
		 * detect eDP ports.
12245
		 */
12246
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
4560 Serge 12247
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12248
					PORT_B);
5354 serge 12249
		if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
12250
		    intel_dp_is_edp(dev, PORT_B))
4560 Serge 12251
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12252
 
5354 serge 12253
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
4104 Serge 12254
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12255
					PORT_C);
5354 serge 12256
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
12257
		    intel_dp_is_edp(dev, PORT_C))
4560 Serge 12258
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
3243 Serge 12259
 
5060 serge 12260
		if (IS_CHERRYVIEW(dev)) {
5354 serge 12261
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
5060 serge 12262
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12263
						PORT_D);
5354 serge 12264
			/* eDP not supported on port D, so don't check VBT */
5060 serge 12265
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12266
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12267
			}
12268
 
4560 Serge 12269
		intel_dsi_init(dev);
2330 Serge 12270
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
12271
		bool found = false;
2327 Serge 12272
 
3746 Serge 12273
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 12274
			DRM_DEBUG_KMS("probing SDVOB\n");
3746 Serge 12275
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
2330 Serge 12276
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
12277
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 12278
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 12279
			}
2327 Serge 12280
 
4104 Serge 12281
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
3031 serge 12282
				intel_dp_init(dev, DP_B, PORT_B);
2330 Serge 12283
			}
2327 Serge 12284
 
2330 Serge 12285
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 12286
 
3746 Serge 12287
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 12288
			DRM_DEBUG_KMS("probing SDVOC\n");
3746 Serge 12289
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
2330 Serge 12290
		}
2327 Serge 12291
 
3746 Serge 12292
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 12293
 
2330 Serge 12294
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
12295
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 12296
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 12297
			}
4104 Serge 12298
			if (SUPPORTS_INTEGRATED_DP(dev))
3031 serge 12299
				intel_dp_init(dev, DP_C, PORT_C);
2330 Serge 12300
			}
2327 Serge 12301
 
2330 Serge 12302
		if (SUPPORTS_INTEGRATED_DP(dev) &&
4104 Serge 12303
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 12304
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 12305
	} else if (IS_GEN2(dev))
12306
		intel_dvo_init(dev);
2327 Serge 12307
 
12308
 
5354 serge 12309
	intel_psr_init(dev);
5060 serge 12310
 
5354 serge 12311
	for_each_intel_encoder(dev, encoder) {
2330 Serge 12312
		encoder->base.possible_crtcs = encoder->crtc_mask;
12313
		encoder->base.possible_clones =
3031 serge 12314
			intel_encoder_clones(encoder);
2330 Serge 12315
	}
2327 Serge 12316
 
3243 Serge 12317
	intel_init_pch_refclk(dev);
12318
 
12319
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 12320
}
12321
 
12322
 
12323
 
2335 Serge 12324
static const struct drm_framebuffer_funcs intel_fb_funcs = {
12325
//	.destroy = intel_user_framebuffer_destroy,
12326
//	.create_handle = intel_user_framebuffer_create_handle,
12327
};
2327 Serge 12328
 
5060 serge 12329
static int intel_framebuffer_init(struct drm_device *dev,
2335 Serge 12330
			   struct intel_framebuffer *intel_fb,
2342 Serge 12331
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 12332
			   struct drm_i915_gem_object *obj)
12333
{
5060 serge 12334
	int aligned_height;
4104 Serge 12335
	int pitch_limit;
2335 Serge 12336
	int ret;
2327 Serge 12337
 
4560 Serge 12338
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
12339
 
3243 Serge 12340
	if (obj->tiling_mode == I915_TILING_Y) {
12341
		DRM_DEBUG("hardware does not support tiling Y\n");
2335 Serge 12342
		return -EINVAL;
3243 Serge 12343
	}
2327 Serge 12344
 
3243 Serge 12345
	if (mode_cmd->pitches[0] & 63) {
12346
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
12347
			  mode_cmd->pitches[0]);
12348
		return -EINVAL;
12349
	}
12350
 
4104 Serge 12351
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
12352
		pitch_limit = 32*1024;
12353
	} else if (INTEL_INFO(dev)->gen >= 4) {
12354
		if (obj->tiling_mode)
12355
			pitch_limit = 16*1024;
12356
		else
12357
			pitch_limit = 32*1024;
12358
	} else if (INTEL_INFO(dev)->gen >= 3) {
12359
		if (obj->tiling_mode)
12360
			pitch_limit = 8*1024;
12361
		else
12362
			pitch_limit = 16*1024;
12363
	} else
12364
		/* XXX DSPC is limited to 4k tiled */
12365
		pitch_limit = 8*1024;
12366
 
12367
	if (mode_cmd->pitches[0] > pitch_limit) {
12368
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
12369
			  obj->tiling_mode ? "tiled" : "linear",
12370
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 12371
		return -EINVAL;
12372
	}
12373
 
12374
	if (obj->tiling_mode != I915_TILING_NONE &&
12375
	    mode_cmd->pitches[0] != obj->stride) {
12376
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
12377
			  mode_cmd->pitches[0], obj->stride);
2335 Serge 12378
			return -EINVAL;
3243 Serge 12379
	}
2327 Serge 12380
 
3243 Serge 12381
	/* Reject formats not supported by any plane early. */
2342 Serge 12382
	switch (mode_cmd->pixel_format) {
3243 Serge 12383
	case DRM_FORMAT_C8:
2342 Serge 12384
	case DRM_FORMAT_RGB565:
12385
	case DRM_FORMAT_XRGB8888:
3243 Serge 12386
	case DRM_FORMAT_ARGB8888:
12387
		break;
12388
	case DRM_FORMAT_XRGB1555:
12389
	case DRM_FORMAT_ARGB1555:
12390
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 12391
			DRM_DEBUG("unsupported pixel format: %s\n",
12392
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 12393
			return -EINVAL;
12394
		}
12395
		break;
3031 serge 12396
	case DRM_FORMAT_XBGR8888:
3243 Serge 12397
	case DRM_FORMAT_ABGR8888:
2342 Serge 12398
	case DRM_FORMAT_XRGB2101010:
12399
	case DRM_FORMAT_ARGB2101010:
3243 Serge 12400
	case DRM_FORMAT_XBGR2101010:
12401
	case DRM_FORMAT_ABGR2101010:
12402
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 12403
			DRM_DEBUG("unsupported pixel format: %s\n",
12404
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 12405
			return -EINVAL;
12406
		}
2335 Serge 12407
		break;
2342 Serge 12408
	case DRM_FORMAT_YUYV:
12409
	case DRM_FORMAT_UYVY:
12410
	case DRM_FORMAT_YVYU:
12411
	case DRM_FORMAT_VYUY:
3243 Serge 12412
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 12413
			DRM_DEBUG("unsupported pixel format: %s\n",
12414
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 12415
			return -EINVAL;
12416
		}
2342 Serge 12417
		break;
2335 Serge 12418
	default:
4104 Serge 12419
		DRM_DEBUG("unsupported pixel format: %s\n",
12420
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 12421
		return -EINVAL;
12422
	}
2327 Serge 12423
 
3243 Serge 12424
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12425
	if (mode_cmd->offsets[0] != 0)
12426
		return -EINVAL;
12427
 
5060 serge 12428
	aligned_height = intel_align_height(dev, mode_cmd->height,
12429
					    obj->tiling_mode);
4560 Serge 12430
	/* FIXME drm helper for size checks (especially planar formats)? */
12431
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
12432
		return -EINVAL;
12433
 
3480 Serge 12434
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
12435
	intel_fb->obj = obj;
4560 Serge 12436
	intel_fb->obj->framebuffer_references++;
3480 Serge 12437
 
2335 Serge 12438
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
12439
	if (ret) {
12440
		DRM_ERROR("framebuffer init failed %d\n", ret);
12441
		return ret;
12442
	}
2327 Serge 12443
 
2335 Serge 12444
	return 0;
12445
}
2327 Serge 12446
 
4560 Serge 12447
#ifndef CONFIG_DRM_I915_FBDEV
12448
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
12449
{
12450
}
12451
#endif
2327 Serge 12452
 
2360 Serge 12453
static const struct drm_mode_config_funcs intel_mode_funcs = {
4560 Serge 12454
	.fb_create = NULL,
12455
	.output_poll_changed = intel_fbdev_output_poll_changed,
2360 Serge 12456
};
2327 Serge 12457
 
3031 serge 12458
/* Set up chip specific display functions */
12459
static void intel_init_display(struct drm_device *dev)
12460
{
12461
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12462
 
4104 Serge 12463
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
12464
		dev_priv->display.find_dpll = g4x_find_best_dpll;
5060 serge 12465
	else if (IS_CHERRYVIEW(dev))
12466
		dev_priv->display.find_dpll = chv_find_best_dpll;
4104 Serge 12467
	else if (IS_VALLEYVIEW(dev))
12468
		dev_priv->display.find_dpll = vlv_find_best_dpll;
12469
	else if (IS_PINEVIEW(dev))
12470
		dev_priv->display.find_dpll = pnv_find_best_dpll;
12471
	else
12472
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
12473
 
3480 Serge 12474
	if (HAS_DDI(dev)) {
3746 Serge 12475
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5060 serge 12476
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
5354 serge 12477
		dev_priv->display.crtc_compute_clock =
12478
			haswell_crtc_compute_clock;
3243 Serge 12479
		dev_priv->display.crtc_enable = haswell_crtc_enable;
12480
		dev_priv->display.crtc_disable = haswell_crtc_disable;
5060 serge 12481
		dev_priv->display.off = ironlake_crtc_off;
5354 serge 12482
		if (INTEL_INFO(dev)->gen >= 9)
12483
			dev_priv->display.update_primary_plane =
12484
				skylake_update_primary_plane;
12485
		else
5060 serge 12486
		dev_priv->display.update_primary_plane =
12487
			ironlake_update_primary_plane;
3243 Serge 12488
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 12489
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5060 serge 12490
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
5354 serge 12491
		dev_priv->display.crtc_compute_clock =
12492
			ironlake_crtc_compute_clock;
3031 serge 12493
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
12494
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
12495
		dev_priv->display.off = ironlake_crtc_off;
5060 serge 12496
		dev_priv->display.update_primary_plane =
12497
			ironlake_update_primary_plane;
4104 Serge 12498
	} else if (IS_VALLEYVIEW(dev)) {
12499
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5060 serge 12500
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
5354 serge 12501
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
4104 Serge 12502
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12503
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12504
		dev_priv->display.off = i9xx_crtc_off;
5060 serge 12505
		dev_priv->display.update_primary_plane =
12506
			i9xx_update_primary_plane;
3031 serge 12507
	} else {
3746 Serge 12508
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5060 serge 12509
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
5354 serge 12510
		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
3031 serge 12511
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12512
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12513
		dev_priv->display.off = i9xx_crtc_off;
5060 serge 12514
		dev_priv->display.update_primary_plane =
12515
			i9xx_update_primary_plane;
3031 serge 12516
	}
2327 Serge 12517
 
3031 serge 12518
	/* Returns the core display clock speed */
12519
	if (IS_VALLEYVIEW(dev))
12520
		dev_priv->display.get_display_clock_speed =
12521
			valleyview_get_display_clock_speed;
12522
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12523
		dev_priv->display.get_display_clock_speed =
12524
			i945_get_display_clock_speed;
12525
	else if (IS_I915G(dev))
12526
		dev_priv->display.get_display_clock_speed =
12527
			i915_get_display_clock_speed;
4104 Serge 12528
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 12529
		dev_priv->display.get_display_clock_speed =
12530
			i9xx_misc_get_display_clock_speed;
4104 Serge 12531
	else if (IS_PINEVIEW(dev))
12532
		dev_priv->display.get_display_clock_speed =
12533
			pnv_get_display_clock_speed;
3031 serge 12534
	else if (IS_I915GM(dev))
12535
		dev_priv->display.get_display_clock_speed =
12536
			i915gm_get_display_clock_speed;
12537
	else if (IS_I865G(dev))
12538
		dev_priv->display.get_display_clock_speed =
12539
			i865_get_display_clock_speed;
12540
	else if (IS_I85X(dev))
12541
		dev_priv->display.get_display_clock_speed =
12542
			i855_get_display_clock_speed;
12543
	else /* 852, 830 */
12544
		dev_priv->display.get_display_clock_speed =
12545
			i830_get_display_clock_speed;
2327 Serge 12546
 
3031 serge 12547
		if (IS_GEN5(dev)) {
12548
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12549
		} else if (IS_GEN6(dev)) {
12550
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12551
		} else if (IS_IVYBRIDGE(dev)) {
12552
			/* FIXME: detect B0+ stepping and use auto training */
12553
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
3243 Serge 12554
			dev_priv->display.modeset_global_resources =
12555
				ivb_modeset_global_resources;
5354 serge 12556
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3031 serge 12557
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
4560 Serge 12558
	} else if (IS_VALLEYVIEW(dev)) {
12559
		dev_priv->display.modeset_global_resources =
12560
			valleyview_modeset_global_resources;
3031 serge 12561
	}
2327 Serge 12562
 
3031 serge 12563
	/* Default just returns -ENODEV to indicate unsupported */
12564
//	dev_priv->display.queue_flip = intel_default_queue_flip;
2327 Serge 12565
 
12566
 
12567
 
12568
 
4560 Serge 12569
	intel_panel_init_backlight_funcs(dev);
5354 serge 12570
 
12571
	mutex_init(&dev_priv->pps_mutex);
3031 serge 12572
}
12573
 
12574
/*
12575
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12576
 * resume, or other times.  This quirk makes sure that's the case for
12577
 * affected systems.
12578
 */
12579
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 12580
{
12581
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12582
 
3031 serge 12583
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12584
	DRM_INFO("applying pipe a force quirk\n");
12585
}
2327 Serge 12586
 
5354 serge 12587
static void quirk_pipeb_force(struct drm_device *dev)
12588
{
12589
	struct drm_i915_private *dev_priv = dev->dev_private;
12590
 
12591
	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
12592
	DRM_INFO("applying pipe b force quirk\n");
12593
}
12594
 
3031 serge 12595
/*
12596
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12597
 */
12598
static void quirk_ssc_force_disable(struct drm_device *dev)
12599
{
12600
	struct drm_i915_private *dev_priv = dev->dev_private;
12601
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12602
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 12603
}
2327 Serge 12604
 
3031 serge 12605
/*
12606
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12607
 * brightness value
12608
 */
12609
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 12610
{
12611
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12612
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12613
	DRM_INFO("applying inverted panel brightness quirk\n");
12614
}
2327 Serge 12615
 
5060 serge 12616
/* Some VBT's incorrectly indicate no backlight is present */
12617
static void quirk_backlight_present(struct drm_device *dev)
12618
{
12619
	struct drm_i915_private *dev_priv = dev->dev_private;
12620
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12621
	DRM_INFO("applying backlight present quirk\n");
12622
}
12623
 
3031 serge 12624
struct intel_quirk {
12625
	int device;
12626
	int subsystem_vendor;
12627
	int subsystem_device;
12628
	void (*hook)(struct drm_device *dev);
12629
};
2327 Serge 12630
 
3031 serge 12631
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12632
struct intel_dmi_quirk {
12633
	void (*hook)(struct drm_device *dev);
12634
	const struct dmi_system_id (*dmi_id_list)[];
12635
};
2327 Serge 12636
 
3031 serge 12637
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12638
{
12639
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12640
	return 1;
2330 Serge 12641
}
2327 Serge 12642
 
3031 serge 12643
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12644
	{
12645
		.dmi_id_list = &(const struct dmi_system_id[]) {
12646
			{
12647
				.callback = intel_dmi_reverse_brightness,
12648
				.ident = "NCR Corporation",
12649
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12650
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12651
				},
12652
			},
12653
			{ }  /* terminating entry */
12654
		},
12655
		.hook = quirk_invert_brightness,
12656
	},
12657
};
2327 Serge 12658
 
3031 serge 12659
static struct intel_quirk intel_quirks[] = {
12660
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12661
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 12662
 
3031 serge 12663
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12664
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 12665
 
3031 serge 12666
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12667
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 12668
 
3031 serge 12669
	/* Lenovo U160 cannot use SSC on LVDS */
12670
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 12671
 
3031 serge 12672
	/* Sony Vaio Y cannot use SSC on LVDS */
12673
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 12674
 
3031 serge 12675
	/* Acer Aspire 5734Z must invert backlight brightness */
12676
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 12677
 
12678
	/* Acer/eMachines G725 */
12679
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12680
 
12681
	/* Acer/eMachines e725 */
12682
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12683
 
12684
	/* Acer/Packard Bell NCL20 */
12685
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12686
 
12687
	/* Acer Aspire 4736Z */
12688
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
5060 serge 12689
 
12690
	/* Acer Aspire 5336 */
12691
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12692
 
12693
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12694
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12695
 
5097 serge 12696
	/* Acer C720 Chromebook (Core i3 4005U) */
12697
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12698
 
5354 serge 12699
	/* Apple Macbook 2,1 (Core 2 T7400) */
12700
	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
12701
 
5060 serge 12702
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12703
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12704
 
12705
	/* HP Chromebook 14 (Celeron 2955U) */
12706
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
3031 serge 12707
};
2327 Serge 12708
 
3031 serge 12709
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 12710
{
3031 serge 12711
	struct pci_dev *d = dev->pdev;
12712
	int i;
2327 Serge 12713
 
3031 serge 12714
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12715
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 12716
 
3031 serge 12717
		if (d->device == q->device &&
12718
		    (d->subsystem_vendor == q->subsystem_vendor ||
12719
		     q->subsystem_vendor == PCI_ANY_ID) &&
12720
		    (d->subsystem_device == q->subsystem_device ||
12721
		     q->subsystem_device == PCI_ANY_ID))
12722
			q->hook(dev);
12723
	}
5097 serge 12724
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
12725
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
12726
			intel_dmi_quirks[i].hook(dev);
12727
	}
2330 Serge 12728
}
2327 Serge 12729
 
3031 serge 12730
/* Disable the VGA plane that we never use */
12731
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 12732
{
12733
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12734
	u8 sr1;
3480 Serge 12735
	u32 vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 12736
 
4560 Serge 12737
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12738
	outb(SR01, VGA_SR_INDEX);
12739
	sr1 = inb(VGA_SR_DATA);
12740
	outb(sr1 | 1<<5, VGA_SR_DATA);
12741
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 12742
	udelay(300);
2327 Serge 12743
 
5354 serge 12744
	/*
12745
	 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
12746
	 * from S3 without preserving (some of?) the other bits.
12747
	 */
12748
	I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
3031 serge 12749
	POSTING_READ(vga_reg);
2330 Serge 12750
}
12751
 
3031 serge 12752
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 12753
{
3031 serge 12754
	intel_prepare_ddi(dev);
2342 Serge 12755
 
5060 serge 12756
	if (IS_VALLEYVIEW(dev))
12757
		vlv_update_cdclk(dev);
12758
 
3031 serge 12759
	intel_init_clock_gating(dev);
12760
 
3482 Serge 12761
    intel_enable_gt_powersave(dev);
2342 Serge 12762
}
12763
 
3031 serge 12764
void intel_modeset_init(struct drm_device *dev)
2330 Serge 12765
{
3031 serge 12766
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 12767
	int sprite, ret;
12768
	enum pipe pipe;
12769
	struct intel_crtc *crtc;
2330 Serge 12770
 
3031 serge 12771
	drm_mode_config_init(dev);
2330 Serge 12772
 
3031 serge 12773
	dev->mode_config.min_width = 0;
12774
	dev->mode_config.min_height = 0;
2330 Serge 12775
 
3031 serge 12776
	dev->mode_config.preferred_depth = 24;
12777
	dev->mode_config.prefer_shadow = 1;
2330 Serge 12778
 
3031 serge 12779
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 12780
 
3031 serge 12781
	intel_init_quirks(dev);
2330 Serge 12782
 
3031 serge 12783
	intel_init_pm(dev);
2330 Serge 12784
 
3746 Serge 12785
	if (INTEL_INFO(dev)->num_pipes == 0)
12786
		return;
12787
 
3031 serge 12788
	intel_init_display(dev);
2330 Serge 12789
 
3031 serge 12790
	if (IS_GEN2(dev)) {
12791
		dev->mode_config.max_width = 2048;
12792
		dev->mode_config.max_height = 2048;
12793
	} else if (IS_GEN3(dev)) {
12794
		dev->mode_config.max_width = 4096;
12795
		dev->mode_config.max_height = 4096;
12796
	} else {
12797
		dev->mode_config.max_width = 8192;
12798
		dev->mode_config.max_height = 8192;
12799
	}
5060 serge 12800
 
12801
	if (IS_GEN2(dev)) {
12802
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12803
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12804
	} else {
12805
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12806
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12807
	}
12808
 
3480 Serge 12809
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 12810
 
3031 serge 12811
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 12812
		      INTEL_INFO(dev)->num_pipes,
12813
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 12814
 
5354 serge 12815
	for_each_pipe(dev_priv, pipe) {
5060 serge 12816
		intel_crtc_init(dev, pipe);
12817
		for_each_sprite(pipe, sprite) {
12818
			ret = intel_plane_init(dev, pipe, sprite);
3031 serge 12819
		if (ret)
4104 Serge 12820
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
5060 serge 12821
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
3746 Serge 12822
		}
2330 Serge 12823
	}
12824
 
4560 Serge 12825
	intel_init_dpio(dev);
12826
 
4104 Serge 12827
	intel_shared_dpll_init(dev);
2330 Serge 12828
 
5354 serge 12829
	/* save the BIOS value before clobbering it */
12830
	dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
3031 serge 12831
	/* Just disable it once at startup */
12832
	i915_disable_vga(dev);
12833
	intel_setup_outputs(dev);
3480 Serge 12834
 
12835
	/* Just in case the BIOS is doing something questionable. */
12836
	intel_disable_fbc(dev);
2330 Serge 12837
 
5060 serge 12838
	drm_modeset_lock_all(dev);
12839
	intel_modeset_setup_hw_state(dev, false);
12840
	drm_modeset_unlock_all(dev);
12841
 
12842
	for_each_intel_crtc(dev, crtc) {
12843
		if (!crtc->active)
12844
			continue;
12845
 
12846
		/*
12847
		 * Note that reserving the BIOS fb up front prevents us
12848
		 * from stuffing other stolen allocations like the ring
12849
		 * on top.  This prevents some ugliness at boot time, and
12850
		 * can even allow for smooth boot transitions if the BIOS
12851
		 * fb is large enough for the active pipe configuration.
12852
		 */
12853
		if (dev_priv->display.get_plane_config) {
12854
			dev_priv->display.get_plane_config(crtc,
12855
							   &crtc->plane_config);
12856
			/*
12857
			 * If the fb is shared between multiple heads, we'll
12858
			 * just get the first one.
12859
			 */
12860
			intel_find_plane_obj(crtc, &crtc->plane_config);
12861
		}
12862
	}
2330 Serge 12863
}
12864
 
3031 serge 12865
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 12866
{
3031 serge 12867
	struct intel_connector *connector;
12868
	struct drm_connector *crt = NULL;
12869
	struct intel_load_detect_pipe load_detect_temp;
5060 serge 12870
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
2330 Serge 12871
 
3031 serge 12872
	/* We can't just switch on the pipe A, we need to set things up with a
12873
	 * proper mode and output configuration. As a gross hack, enable pipe A
12874
	 * by enabling the load detect pipe once. */
12875
	list_for_each_entry(connector,
12876
			    &dev->mode_config.connector_list,
12877
			    base.head) {
12878
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12879
			crt = &connector->base;
12880
			break;
2330 Serge 12881
		}
12882
	}
12883
 
3031 serge 12884
	if (!crt)
12885
		return;
2330 Serge 12886
 
5060 serge 12887
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
3031 serge 12888
		intel_release_load_detect_pipe(crt, &load_detect_temp);
2327 Serge 12889
}
12890
 
3031 serge 12891
static bool
12892
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 12893
{
3746 Serge 12894
	struct drm_device *dev = crtc->base.dev;
12895
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12896
	u32 reg, val;
2327 Serge 12897
 
3746 Serge 12898
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 12899
		return true;
2327 Serge 12900
 
3031 serge 12901
	reg = DSPCNTR(!crtc->plane);
12902
	val = I915_READ(reg);
2327 Serge 12903
 
3031 serge 12904
	if ((val & DISPLAY_PLANE_ENABLE) &&
12905
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12906
		return false;
2327 Serge 12907
 
3031 serge 12908
	return true;
2327 Serge 12909
}
12910
 
3031 serge 12911
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 12912
{
3031 serge 12913
	struct drm_device *dev = crtc->base.dev;
2327 Serge 12914
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12915
	u32 reg;
2327 Serge 12916
 
3031 serge 12917
	/* Clear any frame start delays used for debugging left by the BIOS */
3746 Serge 12918
	reg = PIPECONF(crtc->config.cpu_transcoder);
3031 serge 12919
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 12920
 
5060 serge 12921
	/* restore vblank interrupts to correct state */
5354 serge 12922
	if (crtc->active) {
12923
		update_scanline_offset(crtc);
5060 serge 12924
		drm_vblank_on(dev, crtc->pipe);
5354 serge 12925
	} else
5060 serge 12926
		drm_vblank_off(dev, crtc->pipe);
12927
 
3031 serge 12928
	/* We need to sanitize the plane -> pipe mapping first because this will
12929
	 * disable the crtc (and hence change the state) if it is wrong. Note
12930
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12931
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12932
		struct intel_connector *connector;
12933
		bool plane;
2327 Serge 12934
 
3031 serge 12935
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12936
			      crtc->base.base.id);
2327 Serge 12937
 
3031 serge 12938
		/* Pipe has the wrong plane attached and the plane is active.
12939
		 * Temporarily change the plane mapping and disable everything
12940
		 * ...  */
12941
		plane = crtc->plane;
12942
		crtc->plane = !plane;
5060 serge 12943
		crtc->primary_enabled = true;
3031 serge 12944
		dev_priv->display.crtc_disable(&crtc->base);
12945
		crtc->plane = plane;
2342 Serge 12946
 
3031 serge 12947
		/* ... and break all links. */
12948
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12949
				    base.head) {
12950
			if (connector->encoder->base.crtc != &crtc->base)
12951
				continue;
2327 Serge 12952
 
5060 serge 12953
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12954
			connector->base.encoder = NULL;
3031 serge 12955
		}
5060 serge 12956
		/* multiple connectors may have the same encoder:
12957
		 *  handle them and break crtc link separately */
12958
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12959
				    base.head)
12960
			if (connector->encoder->base.crtc == &crtc->base) {
12961
				connector->encoder->base.crtc = NULL;
12962
				connector->encoder->connectors_active = false;
12963
		}
2327 Serge 12964
 
3031 serge 12965
		WARN_ON(crtc->active);
12966
		crtc->base.enabled = false;
12967
	}
2327 Serge 12968
 
3031 serge 12969
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12970
	    crtc->pipe == PIPE_A && !crtc->active) {
12971
		/* BIOS forgot to enable pipe A, this mostly happens after
12972
		 * resume. Force-enable the pipe to fix this, the update_dpms
12973
		 * call below we restore the pipe to the right state, but leave
12974
		 * the required bits on. */
12975
		intel_enable_pipe_a(dev);
12976
	}
2327 Serge 12977
 
3031 serge 12978
	/* Adjust the state of the output pipe according to whether we
12979
	 * have active connectors/encoders. */
12980
	intel_crtc_update_dpms(&crtc->base);
2327 Serge 12981
 
3031 serge 12982
	if (crtc->active != crtc->base.enabled) {
12983
		struct intel_encoder *encoder;
2327 Serge 12984
 
3031 serge 12985
		/* This can happen either due to bugs in the get_hw_state
12986
		 * functions or because the pipe is force-enabled due to the
12987
		 * pipe A quirk. */
12988
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12989
			      crtc->base.base.id,
12990
			      crtc->base.enabled ? "enabled" : "disabled",
12991
			      crtc->active ? "enabled" : "disabled");
2327 Serge 12992
 
3031 serge 12993
		crtc->base.enabled = crtc->active;
2327 Serge 12994
 
3031 serge 12995
		/* Because we only establish the connector -> encoder ->
12996
		 * crtc links if something is active, this means the
12997
		 * crtc is now deactivated. Break the links. connector
12998
		 * -> encoder links are only establish when things are
12999
		 *  actually up, hence no need to break them. */
13000
		WARN_ON(crtc->active);
2327 Serge 13001
 
3031 serge 13002
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13003
			WARN_ON(encoder->connectors_active);
13004
			encoder->base.crtc = NULL;
13005
		}
13006
	}
5060 serge 13007
 
5354 serge 13008
	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
5060 serge 13009
		/*
13010
		 * We start out with underrun reporting disabled to avoid races.
13011
		 * For correct bookkeeping mark this on active crtcs.
13012
		 *
13013
		 * Also on gmch platforms we dont have any hardware bits to
13014
		 * disable the underrun reporting. Which means we need to start
13015
		 * out with underrun reporting disabled also on inactive pipes,
13016
		 * since otherwise we'll complain about the garbage we read when
13017
		 * e.g. coming up after runtime pm.
13018
		 *
13019
		 * No protection against concurrent access is required - at
13020
		 * worst a fifo underrun happens which also sets this to false.
13021
		 */
13022
		crtc->cpu_fifo_underrun_disabled = true;
13023
		crtc->pch_fifo_underrun_disabled = true;
13024
	}
2327 Serge 13025
}
13026
 
3031 serge 13027
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 13028
{
3031 serge 13029
	struct intel_connector *connector;
13030
	struct drm_device *dev = encoder->base.dev;
2327 Serge 13031
 
3031 serge 13032
	/* We need to check both for a crtc link (meaning that the
13033
	 * encoder is active and trying to read from a pipe) and the
13034
	 * pipe itself being active. */
13035
	bool has_active_crtc = encoder->base.crtc &&
13036
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 13037
 
3031 serge 13038
	if (encoder->connectors_active && !has_active_crtc) {
13039
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
13040
			      encoder->base.base.id,
5060 serge 13041
			      encoder->base.name);
2327 Serge 13042
 
3031 serge 13043
		/* Connector is active, but has no active pipe. This is
13044
		 * fallout from our resume register restoring. Disable
13045
		 * the encoder manually again. */
13046
		if (encoder->base.crtc) {
13047
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
13048
				      encoder->base.base.id,
5060 serge 13049
				      encoder->base.name);
3031 serge 13050
			encoder->disable(encoder);
5060 serge 13051
			if (encoder->post_disable)
13052
				encoder->post_disable(encoder);
3031 serge 13053
		}
5060 serge 13054
		encoder->base.crtc = NULL;
13055
		encoder->connectors_active = false;
2327 Serge 13056
 
3031 serge 13057
		/* Inconsistent output/port/pipe state happens presumably due to
13058
		 * a bug in one of the get_hw_state functions. Or someplace else
13059
		 * in our code, like the register restore mess on resume. Clamp
13060
		 * things to off as a safer default. */
13061
		list_for_each_entry(connector,
13062
				    &dev->mode_config.connector_list,
13063
				    base.head) {
13064
			if (connector->encoder != encoder)
13065
				continue;
5060 serge 13066
			connector->base.dpms = DRM_MODE_DPMS_OFF;
13067
			connector->base.encoder = NULL;
3031 serge 13068
		}
13069
	}
13070
	/* Enabled encoders without active connectors will be fixed in
13071
	 * the crtc fixup. */
2327 Serge 13072
}
13073
 
5060 serge 13074
void i915_redisable_vga_power_on(struct drm_device *dev)
3746 Serge 13075
{
13076
	struct drm_i915_private *dev_priv = dev->dev_private;
13077
	u32 vga_reg = i915_vgacntrl_reg(dev);
13078
 
5060 serge 13079
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
13080
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
13081
		i915_disable_vga(dev);
13082
	}
13083
}
13084
 
13085
void i915_redisable_vga(struct drm_device *dev)
13086
{
13087
	struct drm_i915_private *dev_priv = dev->dev_private;
13088
 
4104 Serge 13089
	/* This function can be called both from intel_modeset_setup_hw_state or
13090
	 * at a very early point in our resume sequence, where the power well
13091
	 * structures are not yet restored. Since this function is at a very
13092
	 * paranoid "someone might have enabled VGA while we were not looking"
13093
	 * level, just check if the power well is enabled instead of trying to
13094
	 * follow the "don't touch the power well if we don't need it" policy
13095
	 * the rest of the driver uses. */
5354 serge 13096
	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
4104 Serge 13097
		return;
13098
 
5060 serge 13099
	i915_redisable_vga_power_on(dev);
3746 Serge 13100
}
13101
 
5060 serge 13102
static bool primary_get_hw_state(struct intel_crtc *crtc)
13103
{
13104
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
13105
 
13106
	if (!crtc->active)
13107
		return false;
13108
 
13109
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
13110
}
13111
 
4104 Serge 13112
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 13113
{
13114
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 13115
	enum pipe pipe;
13116
	struct intel_crtc *crtc;
13117
	struct intel_encoder *encoder;
13118
	struct intel_connector *connector;
4104 Serge 13119
	int i;
2327 Serge 13120
 
5060 serge 13121
	for_each_intel_crtc(dev, crtc) {
3746 Serge 13122
		memset(&crtc->config, 0, sizeof(crtc->config));
2327 Serge 13123
 
5060 serge 13124
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
13125
 
3746 Serge 13126
		crtc->active = dev_priv->display.get_pipe_config(crtc,
13127
								 &crtc->config);
2327 Serge 13128
 
3031 serge 13129
		crtc->base.enabled = crtc->active;
5060 serge 13130
		crtc->primary_enabled = primary_get_hw_state(crtc);
2330 Serge 13131
 
3031 serge 13132
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
13133
			      crtc->base.base.id,
13134
			      crtc->active ? "enabled" : "disabled");
2339 Serge 13135
	}
2332 Serge 13136
 
4104 Serge 13137
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13138
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13139
 
5354 serge 13140
		pll->on = pll->get_hw_state(dev_priv, pll,
13141
					    &pll->config.hw_state);
4104 Serge 13142
		pll->active = 0;
5354 serge 13143
		pll->config.crtc_mask = 0;
5060 serge 13144
		for_each_intel_crtc(dev, crtc) {
5354 serge 13145
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
4104 Serge 13146
				pll->active++;
5354 serge 13147
				pll->config.crtc_mask |= 1 << crtc->pipe;
13148
			}
4104 Serge 13149
		}
13150
 
5354 serge 13151
		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
13152
			      pll->name, pll->config.crtc_mask, pll->on);
5060 serge 13153
 
5354 serge 13154
		if (pll->config.crtc_mask)
5060 serge 13155
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
4104 Serge 13156
	}
13157
 
5354 serge 13158
	for_each_intel_encoder(dev, encoder) {
3031 serge 13159
		pipe = 0;
2332 Serge 13160
 
3031 serge 13161
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 13162
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13163
			encoder->base.crtc = &crtc->base;
13164
				encoder->get_config(encoder, &crtc->config);
3031 serge 13165
		} else {
13166
			encoder->base.crtc = NULL;
13167
		}
2332 Serge 13168
 
3031 serge 13169
		encoder->connectors_active = false;
4560 Serge 13170
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 13171
			      encoder->base.base.id,
5060 serge 13172
			      encoder->base.name,
3031 serge 13173
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 13174
			      pipe_name(pipe));
3031 serge 13175
	}
2332 Serge 13176
 
3031 serge 13177
	list_for_each_entry(connector, &dev->mode_config.connector_list,
13178
			    base.head) {
13179
		if (connector->get_hw_state(connector)) {
13180
			connector->base.dpms = DRM_MODE_DPMS_ON;
13181
			connector->encoder->connectors_active = true;
13182
			connector->base.encoder = &connector->encoder->base;
13183
		} else {
13184
			connector->base.dpms = DRM_MODE_DPMS_OFF;
13185
			connector->base.encoder = NULL;
13186
		}
13187
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
13188
			      connector->base.base.id,
5060 serge 13189
			      connector->base.name,
3031 serge 13190
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 13191
	}
4104 Serge 13192
}
2332 Serge 13193
 
4104 Serge 13194
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
13195
 * and i915 state tracking structures. */
13196
void intel_modeset_setup_hw_state(struct drm_device *dev,
13197
				  bool force_restore)
13198
{
13199
	struct drm_i915_private *dev_priv = dev->dev_private;
13200
	enum pipe pipe;
13201
	struct intel_crtc *crtc;
13202
	struct intel_encoder *encoder;
13203
	int i;
13204
 
13205
	intel_modeset_readout_hw_state(dev);
13206
 
13207
	/*
13208
	 * Now that we have the config, copy it to each CRTC struct
13209
	 * Note that this could go away if we move to using crtc_config
13210
	 * checking everywhere.
13211
	 */
5060 serge 13212
	for_each_intel_crtc(dev, crtc) {
13213
		if (crtc->active && i915.fastboot) {
13214
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
4104 Serge 13215
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
13216
				      crtc->base.base.id);
13217
			drm_mode_debug_printmodeline(&crtc->base.mode);
13218
		}
13219
	}
13220
 
3031 serge 13221
	/* HW state is read out, now we need to sanitize this mess. */
5354 serge 13222
	for_each_intel_encoder(dev, encoder) {
3031 serge 13223
		intel_sanitize_encoder(encoder);
2332 Serge 13224
	}
13225
 
5354 serge 13226
	for_each_pipe(dev_priv, pipe) {
3031 serge 13227
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13228
		intel_sanitize_crtc(crtc);
4104 Serge 13229
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
2332 Serge 13230
	}
13231
 
4104 Serge 13232
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13233
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13234
 
13235
		if (!pll->on || pll->active)
13236
			continue;
13237
 
13238
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
13239
 
13240
		pll->disable(dev_priv, pll);
13241
		pll->on = false;
13242
	}
13243
 
5354 serge 13244
	if (IS_GEN9(dev))
13245
		skl_wm_get_hw_state(dev);
13246
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 13247
		ilk_wm_get_hw_state(dev);
13248
 
3243 Serge 13249
	if (force_restore) {
4560 Serge 13250
		i915_redisable_vga(dev);
13251
 
3746 Serge 13252
		/*
13253
		 * We need to use raw interfaces for restoring state to avoid
13254
		 * checking (bogus) intermediate states.
13255
		 */
5354 serge 13256
		for_each_pipe(dev_priv, pipe) {
3746 Serge 13257
			struct drm_crtc *crtc =
13258
				dev_priv->pipe_to_crtc_mapping[pipe];
13259
 
5354 serge 13260
			intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
5060 serge 13261
					 crtc->primary->fb);
3243 Serge 13262
		}
13263
	} else {
3031 serge 13264
	intel_modeset_update_staged_output_state(dev);
3243 Serge 13265
	}
2332 Serge 13266
 
3031 serge 13267
	intel_modeset_check_state(dev);
2332 Serge 13268
}
13269
 
3031 serge 13270
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 13271
{
5354 serge 13272
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 13273
	struct drm_crtc *c;
13274
	struct drm_i915_gem_object *obj;
13275
 
13276
	mutex_lock(&dev->struct_mutex);
13277
	intel_init_gt_powersave(dev);
13278
	mutex_unlock(&dev->struct_mutex);
13279
 
5354 serge 13280
	/*
13281
	 * There may be no VBT; and if the BIOS enabled SSC we can
13282
	 * just keep using it to avoid unnecessary flicker.  Whereas if the
13283
	 * BIOS isn't using it, don't assume it will work even if the VBT
13284
	 * indicates as much.
13285
	 */
13286
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13287
		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
13288
						DREF_SSC1_ENABLE);
13289
 
3031 serge 13290
	intel_modeset_init_hw(dev);
2330 Serge 13291
 
3031 serge 13292
//   intel_setup_overlay(dev);
2330 Serge 13293
 
5060 serge 13294
	/*
13295
	 * Make sure any fbs we allocated at startup are properly
13296
	 * pinned & fenced.  When we do the allocation it's too early
13297
	 * for this.
13298
	 */
13299
	mutex_lock(&dev->struct_mutex);
13300
	for_each_crtc(dev, c) {
13301
		obj = intel_fb_obj(c->primary->fb);
13302
		if (obj == NULL)
13303
			continue;
13304
 
5354 serge 13305
		if (intel_pin_and_fence_fb_obj(c->primary,
13306
					       c->primary->fb,
13307
					       NULL)) {
5060 serge 13308
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
13309
				  to_intel_crtc(c)->pipe);
13310
			drm_framebuffer_unreference(c->primary->fb);
13311
			c->primary->fb = NULL;
13312
		}
13313
	}
13314
	mutex_unlock(&dev->struct_mutex);
2330 Serge 13315
}
13316
 
5060 serge 13317
void intel_connector_unregister(struct intel_connector *intel_connector)
13318
{
13319
	struct drm_connector *connector = &intel_connector->base;
13320
 
13321
	intel_panel_destroy_backlight(connector);
13322
	drm_connector_unregister(connector);
13323
}
13324
 
3031 serge 13325
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 13326
{
3031 serge 13327
#if 0
13328
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 13329
	struct drm_connector *connector;
2327 Serge 13330
 
5354 serge 13331
	intel_disable_gt_powersave(dev);
13332
 
13333
	intel_backlight_unregister(dev);
13334
 
4104 Serge 13335
	/*
13336
	 * Interrupts and polling as the first thing to avoid creating havoc.
5354 serge 13337
	 * Too much stuff here (turning of connectors, ...) would
4104 Serge 13338
	 * experience fancy races otherwise.
13339
	 */
5354 serge 13340
	intel_irq_uninstall(dev_priv);
5060 serge 13341
 
4104 Serge 13342
	/*
13343
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13344
	 * poll handlers. Hence disable polling after hpd handling is shut down.
13345
	 */
4560 Serge 13346
	drm_kms_helper_poll_fini(dev);
4104 Serge 13347
 
3031 serge 13348
	mutex_lock(&dev->struct_mutex);
2327 Serge 13349
 
4560 Serge 13350
	intel_unregister_dsm_handler();
2327 Serge 13351
 
3031 serge 13352
	intel_disable_fbc(dev);
2342 Serge 13353
 
3031 serge 13354
	ironlake_teardown_rc6(dev);
2327 Serge 13355
 
3031 serge 13356
	mutex_unlock(&dev->struct_mutex);
2327 Serge 13357
 
4104 Serge 13358
	/* flush any delayed tasks or pending work */
13359
	flush_scheduled_work();
2327 Serge 13360
 
4560 Serge 13361
	/* destroy the backlight and sysfs files before encoders/connectors */
13362
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5060 serge 13363
		struct intel_connector *intel_connector;
13364
 
13365
		intel_connector = to_intel_connector(connector);
13366
		intel_connector->unregister(intel_connector);
4560 Serge 13367
	}
2327 Serge 13368
 
3031 serge 13369
	drm_mode_config_cleanup(dev);
5060 serge 13370
 
13371
	intel_cleanup_overlay(dev);
13372
 
13373
	mutex_lock(&dev->struct_mutex);
13374
	intel_cleanup_gt_powersave(dev);
13375
	mutex_unlock(&dev->struct_mutex);
2327 Serge 13376
#endif
13377
}
13378
 
13379
/*
3031 serge 13380
 * Return which encoder is currently attached for connector.
2327 Serge 13381
 */
3031 serge 13382
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 13383
{
3031 serge 13384
	return &intel_attached_encoder(connector)->base;
13385
}
2327 Serge 13386
 
3031 serge 13387
void intel_connector_attach_encoder(struct intel_connector *connector,
13388
				    struct intel_encoder *encoder)
13389
{
13390
	connector->encoder = encoder;
13391
	drm_mode_connector_attach_encoder(&connector->base,
13392
					  &encoder->base);
2327 Serge 13393
}
13394
 
13395
/*
3031 serge 13396
 * set vga decode state - true == enable VGA decode
2327 Serge 13397
 */
3031 serge 13398
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 13399
{
2330 Serge 13400
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 13401
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 13402
	u16 gmch_ctrl;
2327 Serge 13403
 
5060 serge 13404
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
13405
		DRM_ERROR("failed to read control word\n");
13406
		return -EIO;
13407
	}
13408
 
13409
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
13410
		return 0;
13411
 
3031 serge 13412
	if (state)
13413
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 13414
	else
3031 serge 13415
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5060 serge 13416
 
13417
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
13418
		DRM_ERROR("failed to write control word\n");
13419
		return -EIO;
13420
	}
13421
 
3031 serge 13422
	return 0;
2330 Serge 13423
}
13424
 
3031 serge 13425
#ifdef CONFIG_DEBUG_FS
2327 Serge 13426
 
3031 serge 13427
struct intel_display_error_state {
4104 Serge 13428
 
13429
	u32 power_well_driver;
13430
 
13431
	int num_transcoders;
13432
 
3031 serge 13433
	struct intel_cursor_error_state {
13434
		u32 control;
13435
		u32 position;
13436
		u32 base;
13437
		u32 size;
13438
	} cursor[I915_MAX_PIPES];
2327 Serge 13439
 
3031 serge 13440
	struct intel_pipe_error_state {
4560 Serge 13441
		bool power_domain_on;
3031 serge 13442
		u32 source;
5060 serge 13443
		u32 stat;
3031 serge 13444
	} pipe[I915_MAX_PIPES];
2327 Serge 13445
 
3031 serge 13446
	struct intel_plane_error_state {
13447
		u32 control;
13448
		u32 stride;
13449
		u32 size;
13450
		u32 pos;
13451
		u32 addr;
13452
		u32 surface;
13453
		u32 tile_offset;
13454
	} plane[I915_MAX_PIPES];
4104 Serge 13455
 
13456
	struct intel_transcoder_error_state {
4560 Serge 13457
		bool power_domain_on;
4104 Serge 13458
		enum transcoder cpu_transcoder;
13459
 
13460
		u32 conf;
13461
 
13462
		u32 htotal;
13463
		u32 hblank;
13464
		u32 hsync;
13465
		u32 vtotal;
13466
		u32 vblank;
13467
		u32 vsync;
13468
	} transcoder[4];
3031 serge 13469
};
2327 Serge 13470
 
3031 serge 13471
struct intel_display_error_state *
13472
intel_display_capture_error_state(struct drm_device *dev)
13473
{
5060 serge 13474
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 13475
	struct intel_display_error_state *error;
4104 Serge 13476
	int transcoders[] = {
13477
		TRANSCODER_A,
13478
		TRANSCODER_B,
13479
		TRANSCODER_C,
13480
		TRANSCODER_EDP,
13481
	};
3031 serge 13482
	int i;
2327 Serge 13483
 
4104 Serge 13484
	if (INTEL_INFO(dev)->num_pipes == 0)
13485
		return NULL;
13486
 
4560 Serge 13487
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 13488
	if (error == NULL)
13489
		return NULL;
2327 Serge 13490
 
4560 Serge 13491
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 13492
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13493
 
5354 serge 13494
	for_each_pipe(dev_priv, i) {
4560 Serge 13495
		error->pipe[i].power_domain_on =
5354 serge 13496
			__intel_display_power_is_enabled(dev_priv,
5060 serge 13497
						       POWER_DOMAIN_PIPE(i));
4560 Serge 13498
		if (!error->pipe[i].power_domain_on)
13499
			continue;
13500
 
3031 serge 13501
		error->cursor[i].control = I915_READ(CURCNTR(i));
13502
		error->cursor[i].position = I915_READ(CURPOS(i));
13503
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 13504
 
3031 serge 13505
		error->plane[i].control = I915_READ(DSPCNTR(i));
13506
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 13507
		if (INTEL_INFO(dev)->gen <= 3) {
3031 serge 13508
		error->plane[i].size = I915_READ(DSPSIZE(i));
13509
		error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 13510
		}
13511
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3031 serge 13512
		error->plane[i].addr = I915_READ(DSPADDR(i));
13513
		if (INTEL_INFO(dev)->gen >= 4) {
13514
			error->plane[i].surface = I915_READ(DSPSURF(i));
13515
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13516
		}
2327 Serge 13517
 
3031 serge 13518
		error->pipe[i].source = I915_READ(PIPESRC(i));
5060 serge 13519
 
13520
		if (HAS_GMCH_DISPLAY(dev))
13521
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
3031 serge 13522
	}
2327 Serge 13523
 
4104 Serge 13524
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13525
	if (HAS_DDI(dev_priv->dev))
13526
		error->num_transcoders++; /* Account for eDP. */
13527
 
13528
	for (i = 0; i < error->num_transcoders; i++) {
13529
		enum transcoder cpu_transcoder = transcoders[i];
13530
 
4560 Serge 13531
		error->transcoder[i].power_domain_on =
5354 serge 13532
			__intel_display_power_is_enabled(dev_priv,
4560 Serge 13533
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13534
		if (!error->transcoder[i].power_domain_on)
13535
			continue;
13536
 
4104 Serge 13537
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13538
 
13539
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13540
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13541
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13542
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13543
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13544
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13545
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13546
	}
13547
 
3031 serge 13548
	return error;
2330 Serge 13549
}
2327 Serge 13550
 
4104 Serge 13551
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13552
 
3031 serge 13553
void
4104 Serge 13554
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 13555
				struct drm_device *dev,
13556
				struct intel_display_error_state *error)
2332 Serge 13557
{
5354 serge 13558
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 13559
	int i;
2330 Serge 13560
 
4104 Serge 13561
	if (!error)
13562
		return;
13563
 
13564
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 13565
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 13566
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13567
			   error->power_well_driver);
5354 serge 13568
	for_each_pipe(dev_priv, i) {
4104 Serge 13569
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 13570
		err_printf(m, "  Power: %s\n",
13571
			   error->pipe[i].power_domain_on ? "on" : "off");
4104 Serge 13572
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
5060 serge 13573
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
2332 Serge 13574
 
4104 Serge 13575
		err_printf(m, "Plane [%d]:\n", i);
13576
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13577
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 13578
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 13579
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13580
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 13581
		}
13582
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 13583
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 13584
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 13585
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13586
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 13587
		}
2332 Serge 13588
 
4104 Serge 13589
		err_printf(m, "Cursor [%d]:\n", i);
13590
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13591
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13592
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 13593
	}
4104 Serge 13594
 
13595
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 13596
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 13597
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 13598
		err_printf(m, "  Power: %s\n",
13599
			   error->transcoder[i].power_domain_on ? "on" : "off");
4104 Serge 13600
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13601
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13602
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13603
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13604
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13605
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13606
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13607
	}
2327 Serge 13608
}
3031 serge 13609
#endif
5354 serge 13610
 
13611
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13612
{
13613
	struct intel_crtc *crtc;
13614
 
13615
	for_each_intel_crtc(dev, crtc) {
13616
		struct intel_unpin_work *work;
13617
 
13618
		spin_lock_irq(&dev->event_lock);
13619
 
13620
		work = crtc->unpin_work;
13621
 
13622
		if (work && work->event &&
13623
		    work->event->base.file_priv == file) {
13624
			kfree(work->event);
13625
			work->event = NULL;
13626
		}
13627
 
13628
		spin_unlock_irq(&dev->event_lock);
13629
	}
13630
}